Merge compiler-rt trunk r321017 to contrib/compiler-rt.

This commit is contained in:
dim 2017-12-20 19:12:15 +00:00
commit 8d786676ca
341 changed files with 14648 additions and 4693 deletions

View File

@ -76,6 +76,13 @@ extern "C" {
void (*malloc_hook)(const volatile void *, size_t),
void (*free_hook)(const volatile void *));
/* Drains allocator quarantines (calling thread's and global ones), returns
freed memory back to OS and releases other non-essential internal allocator
resources in attempt to reduce process RSS.
Currently available with ASan only.
*/
void __sanitizer_purge_allocator();
#ifdef __cplusplus
} // extern "C"
#endif

View File

@ -144,6 +144,10 @@ extern "C" {
void *__asan_addr_is_in_fake_stack(void *fake_stack, void *addr, void **beg,
void **end);
// Performs cleanup before a [[noreturn]] function. Must be called
// before things like _exit and execl to avoid false positives on stack.
void __asan_handle_no_return(void);
#ifdef __cplusplus
} // extern "C"
#endif

View File

@ -22,8 +22,11 @@ extern "C" {
// Record and dump coverage info.
void __sanitizer_cov_dump();
// Dump collected coverage info. Sorts pcs by module into individual
// .sancov files.
// Clear collected coverage info.
void __sanitizer_cov_reset();
// Dump collected coverage info. Sorts pcs by module into individual .sancov
// files.
void __sanitizer_dump_coverage(const uintptr_t *pcs, uintptr_t len);
#ifdef __cplusplus

View File

@ -0,0 +1,33 @@
//===-- sanitizer/asan_interface.h ------------------------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file is a part of HWAddressSanitizer.
//
// Public interface header.
//===----------------------------------------------------------------------===//
#ifndef SANITIZER_HWASAN_INTERFACE_H
#define SANITIZER_HWASAN_INTERFACE_H
#include <sanitizer/common_interface_defs.h>
#ifdef __cplusplus
extern "C" {
#endif
// This function may be optionally provided by user and should return
// a string containing HWASan runtime options. See asan_flags.h for details.
const char* __hwasan_default_options();
void __hwasan_enable_allocator_tagging();
void __hwasan_disable_allocator_tagging();
#ifdef __cplusplus
} // extern "C"
#endif
#endif // SANITIZER_HWASAN_INTERFACE_H

View File

@ -64,8 +64,14 @@ extern "C" {
// for the program it is linked into (if the return value is non-zero). This
// function must be defined as returning a constant value; any behavior beyond
// that is unsupported.
// To avoid dead stripping, you may need to define this function with
// __attribute__((used))
int __lsan_is_turned_off();
// This function may be optionally provided by user and should return
// a string containing LSan runtime options. See lsan_flags.inc for details.
const char *__lsan_default_options();
// This function may be optionally provided by the user and should return
// a string containing LSan suppressions.
const char *__lsan_default_suppressions();

View File

@ -0,0 +1,34 @@
//===-- sanitizer/scudo_interface.h -----------------------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
/// Public Scudo interface header.
//
//===----------------------------------------------------------------------===//
#ifndef SANITIZER_SCUDO_INTERFACE_H_
#define SANITIZER_SCUDO_INTERFACE_H_
#include <sanitizer/common_interface_defs.h>
#ifdef __cplusplus
extern "C" {
#endif
// This function may be optionally provided by a user and should return
// a string containing Scudo runtime options. See scudo_flags.h for details.
const char* __scudo_default_options();
// This function allows to set the RSS limit at runtime. This can be either
// the hard limit (HardLimit=1) or the soft limit (HardLimit=0). The limit
// can be removed by setting LimitMb to 0. This function's parameters should
// be fully trusted to avoid security mishaps.
void __scudo_set_rss_limit(unsigned long LimitMb, int HardLimit);
#ifdef __cplusplus
} // extern "C"
#endif
#endif // SANITIZER_SCUDO_INTERFACE_H_

View File

@ -44,6 +44,11 @@ const unsigned __tsan_mutex_linker_init = 1 << 0;
const unsigned __tsan_mutex_write_reentrant = 1 << 1;
// Mutex is read reentrant.
const unsigned __tsan_mutex_read_reentrant = 1 << 2;
// Mutex does not have static storage duration, and must not be used after
// its destructor runs. The opposite of __tsan_mutex_linker_init.
// If this flag is passed to __tsan_mutex_destroy, then the destruction
// is ignored unless this flag was previously set on the mutex.
const unsigned __tsan_mutex_not_static = 1 << 8;
// Mutex operation flags:
@ -70,6 +75,7 @@ void __tsan_mutex_create(void *addr, unsigned flags);
// Annotate destruction of a mutex.
// Supported flags:
// - __tsan_mutex_linker_init
// - __tsan_mutex_not_static
void __tsan_mutex_destroy(void *addr, unsigned flags);
// Annotate start of lock operation.

View File

@ -106,6 +106,14 @@ extern uintptr_t __xray_function_address(int32_t FuncId);
/// encounter errors (when there are no instrumented functions, etc.).
extern size_t __xray_max_function_id();
/// Initialize the required XRay data structures. This is useful in cases where
/// users want to control precisely when the XRay instrumentation data
/// structures are initialized, for example when the XRay library is built with
/// the XRAY_NO_PREINIT preprocessor definition.
///
/// Calling __xray_init() more than once is safe across multiple threads.
extern void __xray_init();
} // end extern "C"
#endif // XRAY_XRAY_INTERFACE_H

View File

@ -128,6 +128,16 @@ enum XRayLogFlushStatus {
XRAY_LOG_FLUSHED = 2,
};
/// This enum indicates the installation state of a logging implementation, when
/// associating a mode to a particular logging implementation through
/// `__xray_log_register_impl(...)` or through `__xray_log_select_mode(...`.
enum XRayLogRegisterStatus {
XRAY_REGISTRATION_OK = 0,
XRAY_DUPLICATE_MODE = 1,
XRAY_MODE_NOT_FOUND = 2,
XRAY_INCOMPLETE_IMPL = 3,
};
/// A valid XRay logging implementation MUST provide all of the function
/// pointers in XRayLogImpl when being installed through `__xray_set_log_impl`.
/// To be precise, ALL the functions pointers MUST NOT be nullptr.
@ -159,6 +169,9 @@ struct XRayLogImpl {
/// always have a handler for function entry and exit events. In case the
/// implementation wants to support arg1 (or other future extensions to XRay
/// logging) those MUST be installed by the installed 'log_init' handler.
///
/// Because we didn't want to change the ABI of this struct, the arg1 handler
/// may be silently overwritten during initialization as well.
void (*handle_arg0)(int32_t, XRayEntryType);
/// The log implementation provided routine for when __xray_log_flushLog() is
@ -186,6 +199,34 @@ struct XRayLogImpl {
/// called while in any other states.
void __xray_set_log_impl(XRayLogImpl Impl);
/// This function registers a logging implementation against a "mode"
/// identifier. This allows multiple modes to be registered, and chosen at
/// runtime using the same mode identifier through
/// `__xray_log_select_mode(...)`.
///
/// We treat the Mode identifier as a null-terminated byte string, as the
/// identifier used when retrieving the log impl.
///
/// Returns:
/// - XRAY_REGISTRATION_OK on success.
/// - XRAY_DUPLICATE_MODE when an implementation is already associated with
/// the provided Mode; does not update the already-registered
/// implementation.
XRayLogRegisterStatus __xray_log_register_mode(const char *Mode,
XRayLogImpl Impl);
/// This function selects the implementation associated with Mode that has been
/// registered through __xray_log_register_mode(...) and installs that
/// implementation (as if through calling __xray_set_log_impl(...)). The same
/// caveats apply to __xray_log_select_mode(...) as with
/// __xray_log_set_log_impl(...).
///
/// Returns:
/// - XRAY_REGISTRATION_OK on success.
/// - XRAY_MODE_NOT_FOUND if there is no implementation associated with Mode;
/// does not update the currently installed implementation.
XRayLogRegisterStatus __xray_log_select_mode(const char *Mode);
/// This function removes the currently installed implementation. It will also
/// uninstall any handlers that have been previously installed. It does NOT
/// unpatch the instrumentation sleds.
@ -220,12 +261,19 @@ XRayLogFlushStatus __xray_log_flushLog();
namespace __xray {
// Options used by the LLVM XRay FDR implementation.
/// Options used by the LLVM XRay FDR logging implementation.
struct FDRLoggingOptions {
bool ReportErrors = false;
int Fd = -1;
};
/// Options used by the LLVM XRay Basic (Naive) logging implementation.
struct BasicLoggingOptions {
int DurationFilterMicros = 0;
size_t MaxStackDepth = 0;
size_t ThreadBufferSize = 0;
};
} // namespace __xray
#endif // XRAY_XRAY_LOG_INTERFACE_H

View File

@ -17,6 +17,8 @@
#ifndef XRAY_XRAY_RECORDS_H
#define XRAY_XRAY_RECORDS_H
#include <cstdint>
namespace __xray {
enum FileTypes {
@ -65,18 +67,23 @@ static_assert(sizeof(XRayFileHeader) == 32, "XRayFileHeader != 32 bytes");
enum RecordTypes {
NORMAL = 0,
ARG_PAYLOAD = 1,
};
struct alignas(32) XRayRecord {
// This is the type of the record being written. We use 16 bits to allow us to
// treat this as a discriminant, and so that the first 4 bytes get packed
// properly. See RecordTypes for more supported types.
uint16_t RecordType = 0;
uint16_t RecordType = RecordTypes::NORMAL;
// The CPU where the thread is running. We assume number of CPUs <= 256.
uint8_t CPU = 0;
// The type of the event. Usually either ENTER = 0 or EXIT = 1.
// The type of the event. One of the following:
// ENTER = 0
// EXIT = 1
// TAIL_EXIT = 2
// ENTER_ARG = 3
uint8_t Type = 0;
// The function ID for the record.
@ -94,6 +101,32 @@ struct alignas(32) XRayRecord {
static_assert(sizeof(XRayRecord) == 32, "XRayRecord != 32 bytes");
struct alignas(32) XRayArgPayload {
// We use the same 16 bits as a discriminant for the records in the log here
// too, and so that the first 4 bytes are packed properly.
uint16_t RecordType = RecordTypes::ARG_PAYLOAD;
// Add a few bytes to pad.
uint8_t Padding[2] = {};
// The function ID for the record.
int32_t FuncId = 0;
// The thread ID for the currently running thread.
uint32_t TId = 0;
// Add more padding.
uint8_t Padding2[4] = {};
// The argument payload.
uint64_t Arg = 0;
// The rest of this record ought to be left as padding.
uint8_t TailPadding[8] = {};
} __attribute__((packed));
static_assert(sizeof(XRayArgPayload) == 32, "XRayArgPayload != 32 bytes");
} // namespace __xray
#endif // XRAY_XRAY_RECORDS_H

View File

@ -27,7 +27,7 @@
#if !defined(BLOCK_EXPORT)
# if defined(__cplusplus)
# define BLOCK_EXPORT extern "C"
# define BLOCK_EXPORT extern "C"
# else
# define BLOCK_EXPORT extern
# endif

View File

@ -27,7 +27,7 @@
#if !defined(BLOCK_EXPORT)
# if defined(__cplusplus)
# define BLOCK_EXPORT extern "C"
# define BLOCK_EXPORT extern "C"
# else
# define BLOCK_EXPORT extern
# endif

View File

@ -16,8 +16,10 @@
#include "asan_allocator.h"
#include "asan_flags.h"
#include "asan_internal.h"
#include "asan_mapping.h"
#include "asan_poisoning.h"
#include "asan_stack.h"
#include "sanitizer_common/sanitizer_common.h"
#include "sanitizer_common/sanitizer_flags.h"
namespace __asan {
@ -110,8 +112,9 @@ void AsanDeactivate() {
AllocatorOptions disabled = asan_deactivated_flags.allocator_options;
disabled.quarantine_size_mb = 0;
disabled.thread_local_quarantine_size_kb = 0;
disabled.min_redzone = 16; // Redzone must be at least 16 bytes long.
disabled.max_redzone = 16;
// Redzone must be at least Max(16, granularity) bytes long.
disabled.min_redzone = Max(16, (int)SHADOW_GRANULARITY);
disabled.max_redzone = disabled.min_redzone;
disabled.alloc_dealloc_mismatch = false;
disabled.may_return_null = true;
ReInitializeAllocator(disabled);

View File

@ -84,7 +84,10 @@ struct ChunkHeader {
// This field is used for small sizes. For large sizes it is equal to
// SizeClassMap::kMaxSize and the actual size is stored in the
// SecondaryAllocator's metadata.
u32 user_requested_size;
u32 user_requested_size : 29;
// align < 8 -> 0
// else -> log2(min(align, 512)) - 2
u32 user_requested_alignment_log : 3;
u32 alloc_context_id;
};
@ -271,9 +274,9 @@ struct Allocator {
atomic_store(&max_redzone, options.max_redzone, memory_order_release);
}
void Initialize(const AllocatorOptions &options) {
void InitLinkerInitialized(const AllocatorOptions &options) {
SetAllocatorMayReturnNull(options.may_return_null);
allocator.Init(options.release_to_os_interval_ms);
allocator.InitLinkerInitialized(options.release_to_os_interval_ms);
SharedInitCode(options);
}
@ -351,6 +354,20 @@ struct Allocator {
return Min(Max(rz_log, RZSize2Log(min_rz)), RZSize2Log(max_rz));
}
static uptr ComputeUserRequestedAlignmentLog(uptr user_requested_alignment) {
if (user_requested_alignment < 8)
return 0;
if (user_requested_alignment > 512)
user_requested_alignment = 512;
return Log2(user_requested_alignment) - 2;
}
static uptr ComputeUserAlignment(uptr user_requested_alignment_log) {
if (user_requested_alignment_log == 0)
return 0;
return 1LL << (user_requested_alignment_log + 2);
}
// We have an address between two chunks, and we want to report just one.
AsanChunk *ChooseChunk(uptr addr, AsanChunk *left_chunk,
AsanChunk *right_chunk) {
@ -385,6 +402,8 @@ struct Allocator {
Flags &fl = *flags();
CHECK(stack);
const uptr min_alignment = SHADOW_GRANULARITY;
const uptr user_requested_alignment_log =
ComputeUserRequestedAlignmentLog(alignment);
if (alignment < min_alignment)
alignment = min_alignment;
if (size == 0) {
@ -472,6 +491,7 @@ struct Allocator {
meta[0] = size;
meta[1] = chunk_beg;
}
m->user_requested_alignment_log = user_requested_alignment_log;
m->alloc_context_id = StackDepotPut(*stack);
@ -573,8 +593,8 @@ struct Allocator {
}
}
void Deallocate(void *ptr, uptr delete_size, BufferedStackTrace *stack,
AllocType alloc_type) {
void Deallocate(void *ptr, uptr delete_size, uptr delete_alignment,
BufferedStackTrace *stack, AllocType alloc_type) {
uptr p = reinterpret_cast<uptr>(ptr);
if (p == 0) return;
@ -601,11 +621,14 @@ struct Allocator {
ReportAllocTypeMismatch((uptr)ptr, stack, (AllocType)m->alloc_type,
(AllocType)alloc_type);
}
}
if (delete_size && flags()->new_delete_type_mismatch &&
delete_size != m->UsedSize()) {
ReportNewDeleteSizeMismatch(p, delete_size, stack);
} else {
if (flags()->new_delete_type_mismatch &&
(alloc_type == FROM_NEW || alloc_type == FROM_NEW_BR) &&
((delete_size && delete_size != m->UsedSize()) ||
ComputeUserRequestedAlignmentLog(delete_alignment) !=
m->user_requested_alignment_log)) {
ReportNewDeleteTypeMismatch(p, delete_size, delete_alignment, stack);
}
}
QuarantineChunk(m, ptr, stack);
@ -631,7 +654,7 @@ struct Allocator {
// If realloc() races with free(), we may start copying freed memory.
// However, we will report racy double-free later anyway.
REAL(memcpy)(new_ptr, old_ptr, memcpy_size);
Deallocate(old_ptr, 0, stack, FROM_MALLOC);
Deallocate(old_ptr, 0, 0, stack, FROM_MALLOC);
}
return new_ptr;
}
@ -716,6 +739,22 @@ struct Allocator {
return AsanChunkView(m1);
}
void Purge() {
AsanThread *t = GetCurrentThread();
if (t) {
AsanThreadLocalMallocStorage *ms = &t->malloc_storage();
quarantine.DrainAndRecycle(GetQuarantineCache(ms),
QuarantineCallback(GetAllocatorCache(ms)));
}
{
SpinMutexLock l(&fallback_mutex);
quarantine.DrainAndRecycle(&fallback_quarantine_cache,
QuarantineCallback(&fallback_allocator_cache));
}
allocator.ForceReleaseToOS();
}
void PrintStats() {
allocator.PrintStats();
quarantine.PrintStats();
@ -750,6 +789,9 @@ bool AsanChunkView::IsQuarantined() const {
uptr AsanChunkView::Beg() const { return chunk_->Beg(); }
uptr AsanChunkView::End() const { return Beg() + UsedSize(); }
uptr AsanChunkView::UsedSize() const { return chunk_->UsedSize(); }
u32 AsanChunkView::UserRequestedAlignment() const {
return Allocator::ComputeUserAlignment(chunk_->user_requested_alignment_log);
}
uptr AsanChunkView::AllocTid() const { return chunk_->alloc_tid; }
uptr AsanChunkView::FreeTid() const { return chunk_->free_tid; }
AllocType AsanChunkView::GetAllocType() const {
@ -775,7 +817,7 @@ StackTrace AsanChunkView::GetFreeStack() const {
}
void InitializeAllocator(const AllocatorOptions &options) {
instance.Initialize(options);
instance.InitLinkerInitialized(options);
}
void ReInitializeAllocator(const AllocatorOptions &options) {
@ -802,12 +844,12 @@ void PrintInternalAllocatorStats() {
}
void asan_free(void *ptr, BufferedStackTrace *stack, AllocType alloc_type) {
instance.Deallocate(ptr, 0, stack, alloc_type);
instance.Deallocate(ptr, 0, 0, stack, alloc_type);
}
void asan_sized_free(void *ptr, uptr size, BufferedStackTrace *stack,
AllocType alloc_type) {
instance.Deallocate(ptr, size, stack, alloc_type);
void asan_delete(void *ptr, uptr size, uptr alignment,
BufferedStackTrace *stack, AllocType alloc_type) {
instance.Deallocate(ptr, size, alignment, stack, alloc_type);
}
void *asan_malloc(uptr size, BufferedStackTrace *stack) {
@ -823,7 +865,7 @@ void *asan_realloc(void *p, uptr size, BufferedStackTrace *stack) {
return SetErrnoOnNull(instance.Allocate(size, 8, stack, FROM_MALLOC, true));
if (size == 0) {
if (flags()->allocator_frees_and_returns_null_on_realloc_zero) {
instance.Deallocate(p, 0, stack, FROM_MALLOC);
instance.Deallocate(p, 0, 0, stack, FROM_MALLOC);
return nullptr;
}
// Allocate a size of 1 if we shouldn't free() on Realloc to 0
@ -839,6 +881,10 @@ void *asan_valloc(uptr size, BufferedStackTrace *stack) {
void *asan_pvalloc(uptr size, BufferedStackTrace *stack) {
uptr PageSize = GetPageSizeCached();
if (UNLIKELY(CheckForPvallocOverflow(size, PageSize))) {
errno = errno_ENOMEM;
return AsanAllocator::FailureHandler::OnBadRequest();
}
// pvalloc(0) should allocate one page.
size = size ? RoundUpTo(size, PageSize) : PageSize;
return SetErrnoOnNull(
@ -1007,6 +1053,10 @@ uptr __sanitizer_get_allocated_size(const void *p) {
return allocated_size;
}
void __sanitizer_purge_allocator() {
instance.Purge();
}
#if !SANITIZER_SUPPORTS_WEAK_HOOKS
// Provide default (no-op) implementation of malloc hooks.
SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_malloc_hook,

View File

@ -58,6 +58,7 @@ class AsanChunkView {
uptr Beg() const; // First byte of user memory.
uptr End() const; // Last byte of user memory.
uptr UsedSize() const; // Size requested by the user.
u32 UserRequestedAlignment() const; // Originally requested alignment.
uptr AllocTid() const;
uptr FreeTid() const;
bool Eq(const AsanChunkView &c) const { return chunk_ == c.chunk_; }
@ -119,7 +120,11 @@ struct AsanMapUnmapCallback {
};
#if SANITIZER_CAN_USE_ALLOCATOR64
# if defined(__powerpc64__)
# if SANITIZER_FUCHSIA
const uptr kAllocatorSpace = ~(uptr)0;
const uptr kAllocatorSize = 0x40000000000ULL; // 4T.
typedef DefaultSizeClassMap SizeClassMap;
# elif defined(__powerpc64__)
const uptr kAllocatorSpace = 0xa0000000000ULL;
const uptr kAllocatorSize = 0x20000000000ULL; // 2T.
typedef DefaultSizeClassMap SizeClassMap;
@ -193,8 +198,8 @@ struct AsanThreadLocalMallocStorage {
void *asan_memalign(uptr alignment, uptr size, BufferedStackTrace *stack,
AllocType alloc_type);
void asan_free(void *ptr, BufferedStackTrace *stack, AllocType alloc_type);
void asan_sized_free(void *ptr, uptr size, BufferedStackTrace *stack,
AllocType alloc_type);
void asan_delete(void *ptr, uptr size, uptr alignment,
BufferedStackTrace *stack, AllocType alloc_type);
void *asan_malloc(uptr size, BufferedStackTrace *stack);
void *asan_calloc(uptr nmemb, uptr size, BufferedStackTrace *stack);

View File

@ -122,6 +122,7 @@ static void GetAccessToHeapChunkInformation(ChunkAccess *descr,
}
descr->chunk_begin = chunk.Beg();
descr->chunk_size = chunk.UsedSize();
descr->user_requested_alignment = chunk.UserRequestedAlignment();
descr->alloc_type = chunk.GetAllocType();
}
@ -150,7 +151,7 @@ static void PrintHeapChunkAccess(uptr addr, const ChunkAccess &descr) {
str.append(" %zu-byte region [%p,%p)\n", descr.chunk_size,
(void *)descr.chunk_begin,
(void *)(descr.chunk_begin + descr.chunk_size));
str.append("%s", d.EndLocation());
str.append("%s", d.Default());
Printf("%s", str.data());
}
@ -260,7 +261,7 @@ static void PrintAccessAndVarIntersection(const StackVarDescr &var, uptr addr,
// FIXME: we may want to also print the size of the access here,
// but in case of accesses generated by memset it may be confusing.
str.append("%s <== Memory access at offset %zd %s this variable%s\n",
d.Location(), addr, pos_descr, d.EndLocation());
d.Location(), addr, pos_descr, d.Default());
} else {
str.append("\n");
}
@ -295,7 +296,7 @@ static void DescribeAddressRelativeToGlobal(uptr addr, uptr access_size,
MaybeDemangleGlobalName(g.name));
PrintGlobalLocation(&str, g);
str.append("' (0x%zx) of size %zu\n", g.beg, g.size);
str.append("%s", d.EndLocation());
str.append("%s", d.Default());
PrintGlobalNameIfASCII(&str, g);
Printf("%s", str.data());
}
@ -335,6 +336,26 @@ void GlobalAddressDescription::Print(const char *bug_type) const {
}
}
bool GlobalAddressDescription::PointsInsideTheSameVariable(
const GlobalAddressDescription &other) const {
if (size == 0 || other.size == 0) return false;
for (uptr i = 0; i < size; i++) {
const __asan_global &a = globals[i];
for (uptr j = 0; j < other.size; j++) {
const __asan_global &b = other.globals[j];
if (a.beg == b.beg &&
a.beg <= addr &&
b.beg <= other.addr &&
(addr + access_size) < (a.beg + a.size) &&
(other.addr + other.access_size) < (b.beg + b.size))
return true;
}
}
return false;
}
void StackAddressDescription::Print() const {
Decorator d;
char tname[128];
@ -343,10 +364,10 @@ void StackAddressDescription::Print() const {
ThreadNameWithParenthesis(tid, tname, sizeof(tname)));
if (!frame_descr) {
Printf("%s\n", d.EndLocation());
Printf("%s\n", d.Default());
return;
}
Printf(" at offset %zu in frame%s\n", offset, d.EndLocation());
Printf(" at offset %zu in frame%s\n", offset, d.Default());
// Now we print the frame where the alloca has happened.
// We print this frame as a stack trace with one element.
@ -355,7 +376,7 @@ void StackAddressDescription::Print() const {
// previously. That's unfortunate, but I have no better solution,
// especially given that the alloca may be from entirely different place
// (e.g. use-after-scope, or different thread's stack).
Printf("%s", d.EndLocation());
Printf("%s", d.Default());
StackTrace alloca_stack(&frame_pc, 1);
alloca_stack.Print();
@ -405,18 +426,18 @@ void HeapAddressDescription::Print() const {
Printf("%sfreed by thread T%d%s here:%s\n", d.Allocation(),
free_thread->tid,
ThreadNameWithParenthesis(free_thread, tname, sizeof(tname)),
d.EndAllocation());
d.Default());
StackTrace free_stack = GetStackTraceFromId(free_stack_id);
free_stack.Print();
Printf("%spreviously allocated by thread T%d%s here:%s\n", d.Allocation(),
alloc_thread->tid,
ThreadNameWithParenthesis(alloc_thread, tname, sizeof(tname)),
d.EndAllocation());
d.Default());
} else {
Printf("%sallocated by thread T%d%s here:%s\n", d.Allocation(),
alloc_thread->tid,
ThreadNameWithParenthesis(alloc_thread, tname, sizeof(tname)),
d.EndAllocation());
d.Default());
}
alloc_stack.Print();
DescribeThread(GetCurrentThread());

View File

@ -34,11 +34,8 @@ class Decorator : public __sanitizer::SanitizerCommonDecorator {
public:
Decorator() : SanitizerCommonDecorator() {}
const char *Access() { return Blue(); }
const char *EndAccess() { return Default(); }
const char *Location() { return Green(); }
const char *EndLocation() { return Default(); }
const char *Allocation() { return Magenta(); }
const char *EndAllocation() { return Default(); }
const char *ShadowByte(u8 byte) {
switch (byte) {
@ -72,9 +69,6 @@ class Decorator : public __sanitizer::SanitizerCommonDecorator {
return Default();
}
}
const char *EndShadowByte() { return Default(); }
const char *MemoryByte() { return Magenta(); }
const char *EndMemoryByte() { return Default(); }
};
enum ShadowKind : u8 {
@ -108,6 +102,7 @@ struct ChunkAccess {
sptr offset;
uptr chunk_begin;
uptr chunk_size;
u32 user_requested_alignment : 12;
u32 access_type : 2;
u32 alloc_type : 2;
};
@ -151,6 +146,10 @@ struct GlobalAddressDescription {
u8 size;
void Print(const char *bug_type = "") const;
// Returns true when this descriptions points inside the same global variable
// as other. Descriptions can have different address within the variable
bool PointsInsideTheSameVariable(const GlobalAddressDescription &other) const;
};
bool GetGlobalAddressInformation(uptr addr, uptr access_size,

View File

@ -13,7 +13,6 @@
//===----------------------------------------------------------------------===//
#include "asan_errors.h"
#include <signal.h>
#include "asan_descriptions.h"
#include "asan_mapping.h"
#include "asan_report.h"
@ -22,82 +21,26 @@
namespace __asan {
void ErrorStackOverflow::Print() {
Decorator d;
Printf("%s", d.Warning());
Report(
"ERROR: AddressSanitizer: %s on address %p"
" (pc %p bp %p sp %p T%d)\n", scariness.GetDescription(),
(void *)addr, (void *)pc, (void *)bp, (void *)sp, tid);
Printf("%s", d.EndWarning());
scariness.Print();
BufferedStackTrace stack;
GetStackTraceWithPcBpAndContext(&stack, kStackTraceMax, pc, bp, context,
common_flags()->fast_unwind_on_fatal);
stack.Print();
ReportErrorSummary(scariness.GetDescription(), &stack);
}
static void MaybeDumpInstructionBytes(uptr pc) {
if (!flags()->dump_instruction_bytes || (pc < GetPageSizeCached())) return;
InternalScopedString str(1024);
str.append("First 16 instruction bytes at pc: ");
if (IsAccessibleMemoryRange(pc, 16)) {
for (int i = 0; i < 16; ++i) {
PrintMemoryByte(&str, "", ((u8 *)pc)[i], /*in_shadow*/ false, " ");
}
str.append("\n");
} else {
str.append("unaccessible\n");
}
Report("%s", str.data());
}
static void MaybeDumpRegisters(void *context) {
if (!flags()->dump_registers) return;
SignalContext::DumpAllRegisters(context);
}
static void MaybeReportNonExecRegion(uptr pc) {
#if SANITIZER_FREEBSD || SANITIZER_LINUX
MemoryMappingLayout proc_maps(/*cache_enabled*/ true);
MemoryMappedSegment segment;
while (proc_maps.Next(&segment)) {
if (pc >= segment.start && pc < segment.end && !segment.IsExecutable())
Report("Hint: PC is at a non-executable region. Maybe a wild jump?\n");
}
static void OnStackUnwind(const SignalContext &sig,
const void *callback_context,
BufferedStackTrace *stack) {
bool fast = common_flags()->fast_unwind_on_fatal;
#if SANITIZER_FREEBSD || SANITIZER_NETBSD
// On FreeBSD the slow unwinding that leverages _Unwind_Backtrace()
// yields the call stack of the signal's handler and not of the code
// that raised the signal (as it does on Linux).
fast = true;
#endif
// Tests and maybe some users expect that scariness is going to be printed
// just before the stack. As only asan has scariness score we have no
// corresponding code in the sanitizer_common and we use this callback to
// print it.
static_cast<const ScarinessScoreBase *>(callback_context)->Print();
GetStackTrace(stack, kStackTraceMax, sig.pc, sig.bp, sig.context, fast);
}
void ErrorDeadlySignal::Print() {
Decorator d;
Printf("%s", d.Warning());
const char *description = __sanitizer::DescribeSignalOrException(signo);
Report(
"ERROR: AddressSanitizer: %s on unknown address %p (pc %p bp %p sp %p "
"T%d)\n",
description, (void *)addr, (void *)pc, (void *)bp, (void *)sp, tid);
Printf("%s", d.EndWarning());
if (pc < GetPageSizeCached()) Report("Hint: pc points to the zero page.\n");
if (is_memory_access) {
const char *access_type =
write_flag == SignalContext::WRITE
? "WRITE"
: (write_flag == SignalContext::READ ? "READ" : "UNKNOWN");
Report("The signal is caused by a %s memory access.\n", access_type);
if (addr < GetPageSizeCached())
Report("Hint: address points to the zero page.\n");
}
MaybeReportNonExecRegion(pc);
scariness.Print();
BufferedStackTrace stack;
GetStackTraceWithPcBpAndContext(&stack, kStackTraceMax, pc, bp, context,
common_flags()->fast_unwind_on_fatal);
stack.Print();
MaybeDumpInstructionBytes(pc);
MaybeDumpRegisters(context);
Printf("AddressSanitizer can not provide additional info.\n");
ReportErrorSummary(description, &stack);
ReportDeadlySignal(signal, tid, &OnStackUnwind, &scariness);
}
void ErrorDoubleFree::Print() {
@ -109,7 +52,7 @@ void ErrorDoubleFree::Print() {
"thread T%d%s:\n",
scariness.GetDescription(), addr_description.addr, tid,
ThreadNameWithParenthesis(tid, tname, sizeof(tname)));
Printf("%s", d.EndWarning());
Printf("%s", d.Default());
scariness.Print();
GET_STACK_TRACE_FATAL(second_free_stack->trace[0],
second_free_stack->top_frame_bp);
@ -118,7 +61,7 @@ void ErrorDoubleFree::Print() {
ReportErrorSummary(scariness.GetDescription(), &stack);
}
void ErrorNewDeleteSizeMismatch::Print() {
void ErrorNewDeleteTypeMismatch::Print() {
Decorator d;
Printf("%s", d.Warning());
char tname[128];
@ -127,11 +70,29 @@ void ErrorNewDeleteSizeMismatch::Print() {
"T%d%s:\n",
scariness.GetDescription(), addr_description.addr, tid,
ThreadNameWithParenthesis(tid, tname, sizeof(tname)));
Printf("%s object passed to delete has wrong type:\n", d.EndWarning());
Printf(
" size of the allocated type: %zd bytes;\n"
" size of the deallocated type: %zd bytes.\n",
addr_description.chunk_access.chunk_size, delete_size);
Printf("%s object passed to delete has wrong type:\n", d.Default());
if (delete_size != 0) {
Printf(
" size of the allocated type: %zd bytes;\n"
" size of the deallocated type: %zd bytes.\n",
addr_description.chunk_access.chunk_size, delete_size);
}
const uptr user_alignment =
addr_description.chunk_access.user_requested_alignment;
if (delete_alignment != user_alignment) {
char user_alignment_str[32];
char delete_alignment_str[32];
internal_snprintf(user_alignment_str, sizeof(user_alignment_str),
"%zd bytes", user_alignment);
internal_snprintf(delete_alignment_str, sizeof(delete_alignment_str),
"%zd bytes", delete_alignment);
static const char *kDefaultAlignment = "default-aligned";
Printf(
" alignment of the allocated type: %s;\n"
" alignment of the deallocated type: %s.\n",
user_alignment > 0 ? user_alignment_str : kDefaultAlignment,
delete_alignment > 0 ? delete_alignment_str : kDefaultAlignment);
}
CHECK_GT(free_stack->size, 0);
scariness.Print();
GET_STACK_TRACE_FATAL(free_stack->trace[0], free_stack->top_frame_bp);
@ -152,7 +113,7 @@ void ErrorFreeNotMalloced::Print() {
"which was not malloc()-ed: %p in thread T%d%s\n",
addr_description.Address(), tid,
ThreadNameWithParenthesis(tid, tname, sizeof(tname)));
Printf("%s", d.EndWarning());
Printf("%s", d.Default());
CHECK_GT(free_stack->size, 0);
scariness.Print();
GET_STACK_TRACE_FATAL(free_stack->trace[0], free_stack->top_frame_bp);
@ -173,7 +134,7 @@ void ErrorAllocTypeMismatch::Print() {
scariness.GetDescription(),
alloc_names[alloc_type], dealloc_names[dealloc_type],
addr_description.addr);
Printf("%s", d.EndWarning());
Printf("%s", d.Default());
CHECK_GT(dealloc_stack->size, 0);
scariness.Print();
GET_STACK_TRACE_FATAL(dealloc_stack->trace[0], dealloc_stack->top_frame_bp);
@ -192,7 +153,7 @@ void ErrorMallocUsableSizeNotOwned::Print() {
"ERROR: AddressSanitizer: attempting to call malloc_usable_size() for "
"pointer which is not owned: %p\n",
addr_description.Address());
Printf("%s", d.EndWarning());
Printf("%s", d.Default());
stack->Print();
addr_description.Print();
ReportErrorSummary(scariness.GetDescription(), stack);
@ -205,7 +166,7 @@ void ErrorSanitizerGetAllocatedSizeNotOwned::Print() {
"ERROR: AddressSanitizer: attempting to call "
"__sanitizer_get_allocated_size() for pointer which is not owned: %p\n",
addr_description.Address());
Printf("%s", d.EndWarning());
Printf("%s", d.Default());
stack->Print();
addr_description.Print();
ReportErrorSummary(scariness.GetDescription(), stack);
@ -222,7 +183,7 @@ void ErrorStringFunctionMemoryRangesOverlap::Print() {
bug_type, addr1_description.Address(),
addr1_description.Address() + length1, addr2_description.Address(),
addr2_description.Address() + length2);
Printf("%s", d.EndWarning());
Printf("%s", d.Default());
scariness.Print();
stack->Print();
addr1_description.Print();
@ -235,7 +196,7 @@ void ErrorStringFunctionSizeOverflow::Print() {
Printf("%s", d.Warning());
Report("ERROR: AddressSanitizer: %s: (size=%zd)\n",
scariness.GetDescription(), size);
Printf("%s", d.EndWarning());
Printf("%s", d.Default());
scariness.Print();
stack->Print();
addr_description.Print();
@ -263,7 +224,7 @@ void ErrorODRViolation::Print() {
Printf("%s", d.Warning());
Report("ERROR: AddressSanitizer: %s (%p):\n", scariness.GetDescription(),
global1.beg);
Printf("%s", d.EndWarning());
Printf("%s", d.Default());
InternalScopedString g1_loc(256), g2_loc(256);
PrintGlobalLocation(&g1_loc, global1);
PrintGlobalLocation(&g2_loc, global2);
@ -292,7 +253,7 @@ void ErrorInvalidPointerPair::Print() {
Printf("%s", d.Warning());
Report("ERROR: AddressSanitizer: %s: %p %p\n", scariness.GetDescription(),
addr1_description.Address(), addr2_description.Address());
Printf("%s", d.EndWarning());
Printf("%s", d.Default());
GET_STACK_TRACE_FATAL(pc, bp);
stack.Print();
addr1_description.Print();
@ -477,9 +438,14 @@ static void PrintShadowMemoryForAddress(uptr addr) {
InternalScopedString str(4096 * 8);
str.append("Shadow bytes around the buggy address:\n");
for (int i = -5; i <= 5; i++) {
uptr row_shadow_addr = aligned_shadow + i * n_bytes_per_row;
// Skip rows that would be outside the shadow range. This can happen when
// the user address is near the bottom, top, or shadow gap of the address
// space.
if (!AddrIsInShadow(row_shadow_addr)) continue;
const char *prefix = (i == 0) ? "=>" : " ";
PrintShadowBytes(&str, prefix, (u8 *)(aligned_shadow + i * n_bytes_per_row),
(u8 *)shadow_addr, n_bytes_per_row);
PrintShadowBytes(&str, prefix, (u8 *)row_shadow_addr, (u8 *)shadow_addr,
n_bytes_per_row);
}
if (flags()->print_legend) PrintLegend(&str);
Printf("%s", str.data());
@ -491,13 +457,13 @@ void ErrorGeneric::Print() {
uptr addr = addr_description.Address();
Report("ERROR: AddressSanitizer: %s on address %p at pc %p bp %p sp %p\n",
bug_descr, (void *)addr, pc, bp, sp);
Printf("%s", d.EndWarning());
Printf("%s", d.Default());
char tname[128];
Printf("%s%s of size %zu at %p thread T%d%s%s\n", d.Access(),
access_size ? (is_write ? "WRITE" : "READ") : "ACCESS", access_size,
(void *)addr, tid,
ThreadNameWithParenthesis(tid, tname, sizeof(tname)), d.EndAccess());
ThreadNameWithParenthesis(tid, tname, sizeof(tname)), d.Default());
scariness.Print();
GET_STACK_TRACE_FATAL(pc, bp);

View File

@ -27,61 +27,28 @@ struct ErrorBase {
u32 tid;
};
struct ErrorStackOverflow : ErrorBase {
uptr addr, pc, bp, sp;
// ErrorStackOverflow never owns the context.
void *context;
// VS2013 doesn't implement unrestricted unions, so we need a trivial default
// constructor
ErrorStackOverflow() = default;
ErrorStackOverflow(u32 tid, const SignalContext &sig)
: ErrorBase(tid),
addr(sig.addr),
pc(sig.pc),
bp(sig.bp),
sp(sig.sp),
context(sig.context) {
scariness.Clear();
scariness.Scare(10, "stack-overflow");
}
void Print();
};
struct ErrorDeadlySignal : ErrorBase {
uptr addr, pc, bp, sp;
// ErrorDeadlySignal never owns the context.
void *context;
int signo;
SignalContext::WriteFlag write_flag;
bool is_memory_access;
SignalContext signal;
// VS2013 doesn't implement unrestricted unions, so we need a trivial default
// constructor
ErrorDeadlySignal() = default;
ErrorDeadlySignal(u32 tid, const SignalContext &sig, int signo_)
: ErrorBase(tid),
addr(sig.addr),
pc(sig.pc),
bp(sig.bp),
sp(sig.sp),
context(sig.context),
signo(signo_),
write_flag(sig.write_flag),
is_memory_access(sig.is_memory_access) {
ErrorDeadlySignal(u32 tid, const SignalContext &sig)
: ErrorBase(tid), signal(sig) {
scariness.Clear();
if (is_memory_access) {
if (addr < GetPageSizeCached()) {
scariness.Scare(10, "null-deref");
} else if (addr == pc) {
scariness.Scare(60, "wild-jump");
} else if (write_flag == SignalContext::WRITE) {
scariness.Scare(30, "wild-addr-write");
} else if (write_flag == SignalContext::READ) {
scariness.Scare(20, "wild-addr-read");
} else {
scariness.Scare(25, "wild-addr");
}
} else {
if (signal.IsStackOverflow()) {
scariness.Scare(10, "stack-overflow");
} else if (!signal.is_memory_access) {
scariness.Scare(10, "signal");
} else if (signal.addr < GetPageSizeCached()) {
scariness.Scare(10, "null-deref");
} else if (signal.addr == signal.pc) {
scariness.Scare(60, "wild-jump");
} else if (signal.write_flag == SignalContext::WRITE) {
scariness.Scare(30, "wild-addr-write");
} else if (signal.write_flag == SignalContext::READ) {
scariness.Scare(20, "wild-addr-read");
} else {
scariness.Scare(25, "wild-addr");
}
}
void Print();
@ -104,17 +71,19 @@ struct ErrorDoubleFree : ErrorBase {
void Print();
};
struct ErrorNewDeleteSizeMismatch : ErrorBase {
// ErrorNewDeleteSizeMismatch doesn't own the stack trace.
struct ErrorNewDeleteTypeMismatch : ErrorBase {
// ErrorNewDeleteTypeMismatch doesn't own the stack trace.
const BufferedStackTrace *free_stack;
HeapAddressDescription addr_description;
uptr delete_size;
uptr delete_alignment;
// VS2013 doesn't implement unrestricted unions, so we need a trivial default
// constructor
ErrorNewDeleteSizeMismatch() = default;
ErrorNewDeleteSizeMismatch(u32 tid, BufferedStackTrace *stack, uptr addr,
uptr delete_size_)
: ErrorBase(tid), free_stack(stack), delete_size(delete_size_) {
ErrorNewDeleteTypeMismatch() = default;
ErrorNewDeleteTypeMismatch(u32 tid, BufferedStackTrace *stack, uptr addr,
uptr delete_size_, uptr delete_alignment_)
: ErrorBase(tid), free_stack(stack), delete_size(delete_size_),
delete_alignment(delete_alignment_) {
GetHeapAddressInformation(addr, 1, &addr_description);
scariness.Clear();
scariness.Scare(10, "new-delete-type-mismatch");
@ -324,10 +293,9 @@ struct ErrorGeneric : ErrorBase {
// clang-format off
#define ASAN_FOR_EACH_ERROR_KIND(macro) \
macro(StackOverflow) \
macro(DeadlySignal) \
macro(DoubleFree) \
macro(NewDeleteSizeMismatch) \
macro(NewDeleteTypeMismatch) \
macro(FreeNotMalloced) \
macro(AllocTypeMismatch) \
macro(MallocUsableSizeNotOwned) \

View File

@ -28,9 +28,9 @@ static const u64 kAllocaRedzoneMask = 31UL;
// For small size classes inline PoisonShadow for better performance.
ALWAYS_INLINE void SetShadow(uptr ptr, uptr size, uptr class_id, u64 magic) {
CHECK_EQ(SHADOW_SCALE, 3); // This code expects SHADOW_SCALE=3.
u64 *shadow = reinterpret_cast<u64*>(MemToShadow(ptr));
if (class_id <= 6) {
if (SHADOW_SCALE == 3 && class_id <= 6) {
// This code expects SHADOW_SCALE=3.
for (uptr i = 0; i < (((uptr)1) << class_id); i++) {
shadow[i] = magic;
// Make sure this does not become memset.
@ -171,7 +171,7 @@ void FakeStack::ForEachFakeFrame(RangeIteratorCallback callback, void *arg) {
}
}
#if SANITIZER_LINUX && !SANITIZER_ANDROID
#if (SANITIZER_LINUX && !SANITIZER_ANDROID) || SANITIZER_FUCHSIA
static THREADLOCAL FakeStack *fake_stack_tls;
FakeStack *GetTLSFakeStack() {
@ -183,7 +183,7 @@ void SetTLSFakeStack(FakeStack *fs) {
#else
FakeStack *GetTLSFakeStack() { return 0; }
void SetTLSFakeStack(FakeStack *fs) { }
#endif // SANITIZER_LINUX && !SANITIZER_ANDROID
#endif // (SANITIZER_LINUX && !SANITIZER_ANDROID) || SANITIZER_FUCHSIA
static FakeStack *GetFakeStack() {
AsanThread *t = GetCurrentThread();

View File

@ -118,6 +118,10 @@ void InitializeFlags() {
const char *ubsan_default_options = __ubsan::MaybeCallUbsanDefaultOptions();
ubsan_parser.ParseString(ubsan_default_options);
#endif
#if CAN_SANITIZE_LEAKS
const char *lsan_default_options = __lsan::MaybeCallLsanDefaultOptions();
lsan_parser.ParseString(lsan_default_options);
#endif
// Override from command line.
asan_parser.ParseString(GetEnv("ASAN_OPTIONS"));
@ -144,6 +148,9 @@ void InitializeFlags() {
SanitizerToolName);
Die();
}
// Ensure that redzone is at least SHADOW_GRANULARITY.
if (f->redzone < (int)SHADOW_GRANULARITY)
f->redzone = SHADOW_GRANULARITY;
// Make "strict_init_order" imply "check_initialization_order".
// TODO(samsonov): Use a single runtime flag for an init-order checker.
if (f->strict_init_order) {

View File

@ -79,6 +79,10 @@ ASAN_FLAG(
"Number of seconds to sleep between printing an error report and "
"terminating the program. Useful for debugging purposes (e.g. when one "
"needs to attach gdb).")
ASAN_FLAG(
int, sleep_after_init, 0,
"Number of seconds to sleep after AddressSanitizer is initialized. "
"Useful for debugging purposes (e.g. when one needs to attach gdb).")
ASAN_FLAG(bool, check_malloc_usable_size, true,
"Allows the users to work around the bug in Nvidia drivers prior to "
"295.*.")
@ -143,11 +147,6 @@ ASAN_FLAG(int, detect_odr_violation, 2,
"If >=2, detect violation of One-Definition-Rule (ODR); "
"If ==1, detect ODR-violation only if the two variables "
"have different sizes")
ASAN_FLAG(bool, dump_instruction_bytes, false,
"If true, dump 16 bytes starting at the instruction that caused SEGV")
ASAN_FLAG(bool, dump_registers, true,
"If true, dump values of CPU registers when SEGV happens. Only "
"available on OS X for now.")
ASAN_FLAG(const char *, suppressions, "", "Suppressions file name.")
ASAN_FLAG(bool, halt_on_error, true,
"Crash the program after printing the first error report "

View File

@ -0,0 +1,218 @@
//===-- asan_fuchsia.cc --------------------------------------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===---------------------------------------------------------------------===//
//
// This file is a part of AddressSanitizer, an address sanity checker.
//
// Fuchsia-specific details.
//===---------------------------------------------------------------------===//
#include "sanitizer_common/sanitizer_fuchsia.h"
#if SANITIZER_FUCHSIA
#include "asan_interceptors.h"
#include "asan_internal.h"
#include "asan_stack.h"
#include "asan_thread.h"
#include <limits.h>
#include <zircon/sanitizer.h>
#include <zircon/syscalls.h>
#include <zircon/threads.h>
namespace __asan {
// The system already set up the shadow memory for us.
// __sanitizer::GetMaxUserVirtualAddress has already been called by
// AsanInitInternal->InitializeHighMemEnd (asan_rtl.cc).
// Just do some additional sanity checks here.
void InitializeShadowMemory() {
if (Verbosity()) PrintAddressSpaceLayout();
// Make sure SHADOW_OFFSET doesn't use __asan_shadow_memory_dynamic_address.
__asan_shadow_memory_dynamic_address = kDefaultShadowSentinel;
DCHECK(kLowShadowBeg != kDefaultShadowSentinel);
__asan_shadow_memory_dynamic_address = kLowShadowBeg;
CHECK_EQ(kShadowGapEnd, kHighShadowBeg - 1);
CHECK_EQ(kHighMemEnd, __sanitizer::ShadowBounds.memory_limit - 1);
CHECK_EQ(kHighMemBeg, __sanitizer::ShadowBounds.shadow_limit);
CHECK_EQ(kHighShadowBeg, __sanitizer::ShadowBounds.shadow_base);
CHECK_EQ(kShadowGapEnd, __sanitizer::ShadowBounds.shadow_base - 1);
CHECK_EQ(kLowShadowEnd, 0);
CHECK_EQ(kLowShadowBeg, 0);
}
void AsanApplyToGlobals(globals_op_fptr op, const void *needle) {
UNIMPLEMENTED();
}
void AsanCheckDynamicRTPrereqs() {}
void AsanCheckIncompatibleRT() {}
void InitializeAsanInterceptors() {}
void *AsanDoesNotSupportStaticLinkage() { return nullptr; }
void InitializePlatformExceptionHandlers() {}
void AsanOnDeadlySignal(int signo, void *siginfo, void *context) {
UNIMPLEMENTED();
}
// We can use a plain thread_local variable for TSD.
static thread_local void *per_thread;
void *AsanTSDGet() { return per_thread; }
void AsanTSDSet(void *tsd) { per_thread = tsd; }
// There's no initialization needed, and the passed-in destructor
// will never be called. Instead, our own thread destruction hook
// (below) will call AsanThread::TSDDtor directly.
void AsanTSDInit(void (*destructor)(void *tsd)) {
DCHECK(destructor == &PlatformTSDDtor);
}
void PlatformTSDDtor(void *tsd) { UNREACHABLE(__func__); }
static inline size_t AsanThreadMmapSize() {
return RoundUpTo(sizeof(AsanThread), PAGE_SIZE);
}
struct AsanThread::InitOptions {
uptr stack_bottom, stack_size;
};
// Shared setup between thread creation and startup for the initial thread.
static AsanThread *CreateAsanThread(StackTrace *stack, u32 parent_tid,
uptr user_id, bool detached,
const char *name, uptr stack_bottom,
uptr stack_size) {
// In lieu of AsanThread::Create.
AsanThread *thread = (AsanThread *)MmapOrDie(AsanThreadMmapSize(), __func__);
AsanThreadContext::CreateThreadContextArgs args = {thread, stack};
u32 tid =
asanThreadRegistry().CreateThread(user_id, detached, parent_tid, &args);
asanThreadRegistry().SetThreadName(tid, name);
// On other systems, AsanThread::Init() is called from the new
// thread itself. But on Fuchsia we already know the stack address
// range beforehand, so we can do most of the setup right now.
const AsanThread::InitOptions options = {stack_bottom, stack_size};
thread->Init(&options);
return thread;
}
// This gets the same arguments passed to Init by CreateAsanThread, above.
// We're in the creator thread before the new thread is actually started,
// but its stack address range is already known. We don't bother tracking
// the static TLS address range because the system itself already uses an
// ASan-aware allocator for that.
void AsanThread::SetThreadStackAndTls(const AsanThread::InitOptions *options) {
DCHECK_NE(GetCurrentThread(), this);
DCHECK_NE(GetCurrentThread(), nullptr);
CHECK_NE(options->stack_bottom, 0);
CHECK_NE(options->stack_size, 0);
stack_bottom_ = options->stack_bottom;
stack_top_ = options->stack_bottom + options->stack_size;
}
// Called by __asan::AsanInitInternal (asan_rtl.c).
AsanThread *CreateMainThread() {
thrd_t self = thrd_current();
char name[ZX_MAX_NAME_LEN];
CHECK_NE(__sanitizer::MainThreadStackBase, 0);
CHECK_GT(__sanitizer::MainThreadStackSize, 0);
AsanThread *t = CreateAsanThread(
nullptr, 0, reinterpret_cast<uptr>(self), true,
_zx_object_get_property(thrd_get_zx_handle(self), ZX_PROP_NAME, name,
sizeof(name)) == ZX_OK
? name
: nullptr,
__sanitizer::MainThreadStackBase, __sanitizer::MainThreadStackSize);
SetCurrentThread(t);
return t;
}
// This is called before each thread creation is attempted. So, in
// its first call, the calling thread is the initial and sole thread.
static void *BeforeThreadCreateHook(uptr user_id, bool detached,
const char *name, uptr stack_bottom,
uptr stack_size) {
EnsureMainThreadIDIsCorrect();
// Strict init-order checking is thread-hostile.
if (flags()->strict_init_order) StopInitOrderChecking();
GET_STACK_TRACE_THREAD;
u32 parent_tid = GetCurrentTidOrInvalid();
return CreateAsanThread(&stack, parent_tid, user_id, detached, name,
stack_bottom, stack_size);
}
// This is called after creating a new thread (in the creating thread),
// with the pointer returned by BeforeThreadCreateHook (above).
static void ThreadCreateHook(void *hook, bool aborted) {
AsanThread *thread = static_cast<AsanThread *>(hook);
if (!aborted) {
// The thread was created successfully.
// ThreadStartHook is already running in the new thread.
} else {
// The thread wasn't created after all.
// Clean up everything we set up in BeforeThreadCreateHook.
asanThreadRegistry().FinishThread(thread->tid());
UnmapOrDie(thread, AsanThreadMmapSize());
}
}
// This is called in the newly-created thread before it runs anything else,
// with the pointer returned by BeforeThreadCreateHook (above).
// cf. asan_interceptors.cc:asan_thread_start
static void ThreadStartHook(void *hook, uptr os_id) {
AsanThread *thread = static_cast<AsanThread *>(hook);
SetCurrentThread(thread);
// In lieu of AsanThread::ThreadStart.
asanThreadRegistry().StartThread(thread->tid(), os_id, /*workerthread*/ false,
nullptr);
}
// Each thread runs this just before it exits,
// with the pointer returned by BeforeThreadCreateHook (above).
// All per-thread destructors have already been called.
static void ThreadExitHook(void *hook, uptr os_id) {
AsanThread::TSDDtor(per_thread);
}
} // namespace __asan
// These are declared (in extern "C") by <zircon/sanitizer.h>.
// The system runtime will call our definitions directly.
void *__sanitizer_before_thread_create_hook(thrd_t thread, bool detached,
const char *name, void *stack_base,
size_t stack_size) {
return __asan::BeforeThreadCreateHook(
reinterpret_cast<uptr>(thread), detached, name,
reinterpret_cast<uptr>(stack_base), stack_size);
}
void __sanitizer_thread_create_hook(void *hook, thrd_t thread, int error) {
__asan::ThreadCreateHook(hook, error != thrd_success);
}
void __sanitizer_thread_start_hook(void *hook, thrd_t self) {
__asan::ThreadStartHook(hook, reinterpret_cast<uptr>(self));
}
void __sanitizer_thread_exit_hook(void *hook, thrd_t self) {
__asan::ThreadExitHook(hook, reinterpret_cast<uptr>(self));
}
#endif // SANITIZER_FUCHSIA

View File

@ -384,6 +384,10 @@ void __asan_register_globals(__asan_global *globals, uptr n) {
}
RegisterGlobal(&globals[i]);
}
// Poison the metadata. It should not be accessible to user code.
PoisonShadow(reinterpret_cast<uptr>(globals), n * sizeof(__asan_global),
kAsanGlobalRedzoneMagic);
}
// Unregister an array of globals.
@ -399,6 +403,9 @@ void __asan_unregister_globals(__asan_global *globals, uptr n) {
}
UnregisterGlobal(&globals[i]);
}
// Unpoison the metadata.
PoisonShadow(reinterpret_cast<uptr>(globals), n * sizeof(__asan_global), 0);
}
// This method runs immediately prior to dynamic initialization in each TU,

View File

@ -15,6 +15,8 @@
#ifndef ASAN_INIT_VERSION_H
#define ASAN_INIT_VERSION_H
#include "sanitizer_common/sanitizer_platform.h"
extern "C" {
// Every time the ASan ABI changes we also change the version number in the
// __asan_init function name. Objects built with incompatible ASan ABI
@ -32,7 +34,12 @@ extern "C" {
// v6=>v7: added 'odr_indicator' to __asan_global
// v7=>v8: added '__asan_(un)register_image_globals' functions for dead
// stripping support on Mach-O platforms
#if SANITIZER_WORDSIZE == 32 && SANITIZER_ANDROID
// v8=>v9: 32-bit Android switched to dynamic shadow
#define __asan_version_mismatch_check __asan_version_mismatch_check_v9
#else
#define __asan_version_mismatch_check __asan_version_mismatch_check_v8
#endif
}
#endif // ASAN_INIT_VERSION_H

View File

@ -24,6 +24,11 @@
#include "lsan/lsan_common.h"
#include "sanitizer_common/sanitizer_libc.h"
// There is no general interception at all on Fuchsia.
// Only the functions in asan_interceptors_memintrinsics.cc are
// really defined to replace libc functions.
#if !SANITIZER_FUCHSIA
#if SANITIZER_POSIX
#include "sanitizer_common/sanitizer_posix.h"
#endif
@ -36,108 +41,6 @@
namespace __asan {
// Return true if we can quickly decide that the region is unpoisoned.
// We assume that a redzone is at least 16 bytes.
static inline bool QuickCheckForUnpoisonedRegion(uptr beg, uptr size) {
if (size == 0) return true;
if (size <= 32)
return !AddressIsPoisoned(beg) &&
!AddressIsPoisoned(beg + size - 1) &&
!AddressIsPoisoned(beg + size / 2);
if (size <= 64)
return !AddressIsPoisoned(beg) &&
!AddressIsPoisoned(beg + size / 4) &&
!AddressIsPoisoned(beg + size - 1) &&
!AddressIsPoisoned(beg + 3 * size / 4) &&
!AddressIsPoisoned(beg + size / 2);
return false;
}
struct AsanInterceptorContext {
const char *interceptor_name;
};
// We implement ACCESS_MEMORY_RANGE, ASAN_READ_RANGE,
// and ASAN_WRITE_RANGE as macro instead of function so
// that no extra frames are created, and stack trace contains
// relevant information only.
// We check all shadow bytes.
#define ACCESS_MEMORY_RANGE(ctx, offset, size, isWrite) do { \
uptr __offset = (uptr)(offset); \
uptr __size = (uptr)(size); \
uptr __bad = 0; \
if (__offset > __offset + __size) { \
GET_STACK_TRACE_FATAL_HERE; \
ReportStringFunctionSizeOverflow(__offset, __size, &stack); \
} \
if (!QuickCheckForUnpoisonedRegion(__offset, __size) && \
(__bad = __asan_region_is_poisoned(__offset, __size))) { \
AsanInterceptorContext *_ctx = (AsanInterceptorContext *)ctx; \
bool suppressed = false; \
if (_ctx) { \
suppressed = IsInterceptorSuppressed(_ctx->interceptor_name); \
if (!suppressed && HaveStackTraceBasedSuppressions()) { \
GET_STACK_TRACE_FATAL_HERE; \
suppressed = IsStackTraceSuppressed(&stack); \
} \
} \
if (!suppressed) { \
GET_CURRENT_PC_BP_SP; \
ReportGenericError(pc, bp, sp, __bad, isWrite, __size, 0, false);\
} \
} \
} while (0)
// memcpy is called during __asan_init() from the internals of printf(...).
// We do not treat memcpy with to==from as a bug.
// See http://llvm.org/bugs/show_bug.cgi?id=11763.
#define ASAN_MEMCPY_IMPL(ctx, to, from, size) \
do { \
if (UNLIKELY(!asan_inited)) return internal_memcpy(to, from, size); \
if (asan_init_is_running) { \
return REAL(memcpy)(to, from, size); \
} \
ENSURE_ASAN_INITED(); \
if (flags()->replace_intrin) { \
if (to != from) { \
CHECK_RANGES_OVERLAP("memcpy", to, size, from, size); \
} \
ASAN_READ_RANGE(ctx, from, size); \
ASAN_WRITE_RANGE(ctx, to, size); \
} \
return REAL(memcpy)(to, from, size); \
} while (0)
// memset is called inside Printf.
#define ASAN_MEMSET_IMPL(ctx, block, c, size) \
do { \
if (UNLIKELY(!asan_inited)) return internal_memset(block, c, size); \
if (asan_init_is_running) { \
return REAL(memset)(block, c, size); \
} \
ENSURE_ASAN_INITED(); \
if (flags()->replace_intrin) { \
ASAN_WRITE_RANGE(ctx, block, size); \
} \
return REAL(memset)(block, c, size); \
} while (0)
#define ASAN_MEMMOVE_IMPL(ctx, to, from, size) \
do { \
if (UNLIKELY(!asan_inited)) return internal_memmove(to, from, size); \
ENSURE_ASAN_INITED(); \
if (flags()->replace_intrin) { \
ASAN_READ_RANGE(ctx, from, size); \
ASAN_WRITE_RANGE(ctx, to, size); \
} \
return internal_memmove(to, from, size); \
} while (0)
#define ASAN_READ_RANGE(ctx, offset, size) \
ACCESS_MEMORY_RANGE(ctx, offset, size, false)
#define ASAN_WRITE_RANGE(ctx, offset, size) \
ACCESS_MEMORY_RANGE(ctx, offset, size, true)
#define ASAN_READ_STRING_OF_LEN(ctx, s, len, n) \
ASAN_READ_RANGE((ctx), (s), \
common_flags()->strict_string_checks ? (len) + 1 : (n))
@ -145,23 +48,6 @@ struct AsanInterceptorContext {
#define ASAN_READ_STRING(ctx, s, n) \
ASAN_READ_STRING_OF_LEN((ctx), (s), REAL(strlen)(s), (n))
// Behavior of functions like "memcpy" or "strcpy" is undefined
// if memory intervals overlap. We report error in this case.
// Macro is used to avoid creation of new frames.
static inline bool RangesOverlap(const char *offset1, uptr length1,
const char *offset2, uptr length2) {
return !((offset1 + length1 <= offset2) || (offset2 + length2 <= offset1));
}
#define CHECK_RANGES_OVERLAP(name, _offset1, length1, _offset2, length2) do { \
const char *offset1 = (const char*)_offset1; \
const char *offset2 = (const char*)_offset2; \
if (RangesOverlap(offset1, length1, offset2, length2)) { \
GET_STACK_TRACE_FATAL_HERE; \
ReportStringFunctionMemoryRangesOverlap(name, offset1, length1, \
offset2, length2, &stack); \
} \
} while (0)
static inline uptr MaybeRealStrnlen(const char *s, uptr maxlen) {
#if SANITIZER_INTERCEPT_STRNLEN
if (REAL(strnlen)) {
@ -275,6 +161,7 @@ DECLARE_REAL_AND_INTERCEPTOR(void, free, void *)
} while (false)
#include "sanitizer_common/sanitizer_common_interceptors.inc"
#include "sanitizer_common/sanitizer_signal_interceptors.inc"
// Syscall interceptors don't have contexts, we don't support suppressions
// for them.
@ -356,42 +243,6 @@ INTERCEPTOR(int, pthread_join, void *t, void **arg) {
DEFINE_REAL_PTHREAD_FUNCTIONS
#endif // ASAN_INTERCEPT_PTHREAD_CREATE
#if ASAN_INTERCEPT_SIGNAL_AND_SIGACTION
#if SANITIZER_ANDROID
INTERCEPTOR(void*, bsd_signal, int signum, void *handler) {
if (GetHandleSignalMode(signum) != kHandleSignalExclusive)
return REAL(bsd_signal)(signum, handler);
return 0;
}
#endif
INTERCEPTOR(void*, signal, int signum, void *handler) {
if (GetHandleSignalMode(signum) != kHandleSignalExclusive)
return REAL(signal)(signum, handler);
return nullptr;
}
INTERCEPTOR(int, sigaction, int signum, const struct sigaction *act,
struct sigaction *oldact) {
if (GetHandleSignalMode(signum) != kHandleSignalExclusive)
return REAL(sigaction)(signum, act, oldact);
return 0;
}
namespace __sanitizer {
int real_sigaction(int signum, const void *act, void *oldact) {
return REAL(sigaction)(signum, (const struct sigaction *)act,
(struct sigaction *)oldact);
}
} // namespace __sanitizer
#elif SANITIZER_POSIX
// We need to have defined REAL(sigaction) on posix systems.
DEFINE_REAL(int, sigaction, int signum, const struct sigaction *act,
struct sigaction *oldact)
#endif // ASAN_INTERCEPT_SIGNAL_AND_SIGACTION
#if ASAN_INTERCEPT_SWAPCONTEXT
static void ClearShadowMemoryForContextStack(uptr stack, uptr ssize) {
// Align to page size.
@ -428,6 +279,11 @@ INTERCEPTOR(int, swapcontext, struct ucontext_t *oucp,
}
#endif // ASAN_INTERCEPT_SWAPCONTEXT
#if SANITIZER_NETBSD
#define longjmp __longjmp14
#define siglongjmp __siglongjmp14
#endif
INTERCEPTOR(void, longjmp, void *env, int val) {
__asan_handle_no_return();
REAL(longjmp)(env, val);
@ -462,18 +318,6 @@ INTERCEPTOR(void, __cxa_throw, void *a, void *b, void *c) {
}
#endif
void *__asan_memcpy(void *to, const void *from, uptr size) {
ASAN_MEMCPY_IMPL(nullptr, to, from, size);
}
void *__asan_memset(void *block, int c, uptr size) {
ASAN_MEMSET_IMPL(nullptr, block, c, size);
}
void *__asan_memmove(void *to, const void *from, uptr size) {
ASAN_MEMMOVE_IMPL(nullptr, to, from, size);
}
#if ASAN_INTERCEPT_INDEX
# if ASAN_USE_ALIAS_ATTRIBUTE_FOR_INDEX
INTERCEPTOR(char*, index, const char *string, int c)
@ -711,6 +555,7 @@ void InitializeAsanInterceptors() {
CHECK(!was_called_once);
was_called_once = true;
InitializeCommonInterceptors();
InitializeSignalInterceptors();
// Intercept str* functions.
ASAN_INTERCEPT_FUNC(strcat); // NOLINT
@ -733,15 +578,9 @@ void InitializeAsanInterceptors() {
ASAN_INTERCEPT_FUNC(strtoll);
#endif
// Intecept signal- and jump-related functions.
// Intecept jump-related functions.
ASAN_INTERCEPT_FUNC(longjmp);
#if ASAN_INTERCEPT_SIGNAL_AND_SIGACTION
ASAN_INTERCEPT_FUNC(sigaction);
#if SANITIZER_ANDROID
ASAN_INTERCEPT_FUNC(bsd_signal);
#endif
ASAN_INTERCEPT_FUNC(signal);
#endif
#if ASAN_INTERCEPT_SWAPCONTEXT
ASAN_INTERCEPT_FUNC(swapcontext);
#endif
@ -785,3 +624,5 @@ void InitializeAsanInterceptors() {
}
} // namespace __asan
#endif // !SANITIZER_FUCHSIA

View File

@ -15,9 +15,30 @@
#define ASAN_INTERCEPTORS_H
#include "asan_internal.h"
#include "asan_interceptors_memintrinsics.h"
#include "interception/interception.h"
#include "sanitizer_common/sanitizer_platform_interceptors.h"
namespace __asan {
void InitializeAsanInterceptors();
void InitializePlatformInterceptors();
#define ENSURE_ASAN_INITED() \
do { \
CHECK(!asan_init_is_running); \
if (UNLIKELY(!asan_inited)) { \
AsanInitFromRtl(); \
} \
} while (0)
} // namespace __asan
// There is no general interception at all on Fuchsia.
// Only the functions in asan_interceptors_memintrinsics.h are
// really defined to replace libc functions.
#if !SANITIZER_FUCHSIA
// Use macro to describe if specific function should be
// intercepted on a given platform.
#if !SANITIZER_WINDOWS
@ -34,24 +55,19 @@
# define ASAN_INTERCEPT_FORK 0
#endif
#if SANITIZER_FREEBSD || SANITIZER_LINUX
#if SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_NETBSD || \
SANITIZER_SOLARIS
# define ASAN_USE_ALIAS_ATTRIBUTE_FOR_INDEX 1
#else
# define ASAN_USE_ALIAS_ATTRIBUTE_FOR_INDEX 0
#endif
#if SANITIZER_LINUX && !SANITIZER_ANDROID
#if (SANITIZER_LINUX && !SANITIZER_ANDROID) || SANITIZER_SOLARIS
# define ASAN_INTERCEPT_SWAPCONTEXT 1
#else
# define ASAN_INTERCEPT_SWAPCONTEXT 0
#endif
#if !SANITIZER_WINDOWS
# define ASAN_INTERCEPT_SIGNAL_AND_SIGACTION 1
#else
# define ASAN_INTERCEPT_SIGNAL_AND_SIGACTION 0
#endif
#if !SANITIZER_WINDOWS
# define ASAN_INTERCEPT_SIGLONGJMP 1
#else
@ -66,7 +82,8 @@
// Android bug: https://code.google.com/p/android/issues/detail?id=61799
#if ASAN_HAS_EXCEPTIONS && !SANITIZER_WINDOWS && \
!(SANITIZER_ANDROID && defined(__i386))
!(SANITIZER_ANDROID && defined(__i386)) && \
!SANITIZER_SOLARIS
# define ASAN_INTERCEPT___CXA_THROW 1
#else
# define ASAN_INTERCEPT___CXA_THROW 0
@ -85,16 +102,11 @@
#endif
DECLARE_REAL(int, memcmp, const void *a1, const void *a2, uptr size)
DECLARE_REAL(void*, memcpy, void *to, const void *from, uptr size)
DECLARE_REAL(void*, memset, void *block, int c, uptr size)
DECLARE_REAL(char*, strchr, const char *str, int c)
DECLARE_REAL(SIZE_T, strlen, const char *s)
DECLARE_REAL(char*, strncpy, char *to, const char *from, uptr size)
DECLARE_REAL(uptr, strnlen, const char *s, uptr maxlen)
DECLARE_REAL(char*, strstr, const char *s1, const char *s2)
struct sigaction;
DECLARE_REAL(int, sigaction, int signum, const struct sigaction *act,
struct sigaction *oldact)
#if !SANITIZER_MAC
#define ASAN_INTERCEPT_FUNC(name) \
@ -113,18 +125,6 @@ DECLARE_REAL(int, sigaction, int signum, const struct sigaction *act,
#define ASAN_INTERCEPT_FUNC(name)
#endif // SANITIZER_MAC
namespace __asan {
void InitializeAsanInterceptors();
void InitializePlatformInterceptors();
#define ENSURE_ASAN_INITED() do { \
CHECK(!asan_init_is_running); \
if (UNLIKELY(!asan_inited)) { \
AsanInitFromRtl(); \
} \
} while (0)
} // namespace __asan
#endif // !SANITIZER_FUCHSIA
#endif // ASAN_INTERCEPTORS_H

View File

@ -0,0 +1,44 @@
//===-- asan_interceptors_memintrinsics.cc --------------------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===---------------------------------------------------------------------===//
//
// This file is a part of AddressSanitizer, an address sanity checker.
//
// ASan versions of memcpy, memmove, and memset.
//===---------------------------------------------------------------------===//
#include "asan_interceptors_memintrinsics.h"
#include "asan_report.h"
#include "asan_stack.h"
#include "asan_suppressions.h"
using namespace __asan; // NOLINT
void *__asan_memcpy(void *to, const void *from, uptr size) {
ASAN_MEMCPY_IMPL(nullptr, to, from, size);
}
void *__asan_memset(void *block, int c, uptr size) {
ASAN_MEMSET_IMPL(nullptr, block, c, size);
}
void *__asan_memmove(void *to, const void *from, uptr size) {
ASAN_MEMMOVE_IMPL(nullptr, to, from, size);
}
#if SANITIZER_FUCHSIA
// Fuchsia doesn't use sanitizer_common_interceptors.inc, but the only
// things there it wants are these three. Just define them as aliases
// here rather than repeating the contents.
decltype(memcpy) memcpy[[gnu::alias("__asan_memcpy")]];
decltype(memmove) memmove[[gnu::alias("__asan_memmove")]];
decltype(memset) memset[[gnu::alias("__asan_memset")]];
#endif // SANITIZER_FUCHSIA

View File

@ -0,0 +1,148 @@
//===-- asan_interceptors_memintrinsics.h -----------------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===---------------------------------------------------------------------===//
//
// This file is a part of AddressSanitizer, an address sanity checker.
//
// ASan-private header for asan_memintrin.cc
//===---------------------------------------------------------------------===//
#ifndef ASAN_MEMINTRIN_H
#define ASAN_MEMINTRIN_H
#include "asan_interface_internal.h"
#include "asan_internal.h"
#include "asan_mapping.h"
#include "interception/interception.h"
DECLARE_REAL(void*, memcpy, void *to, const void *from, uptr size)
DECLARE_REAL(void*, memset, void *block, int c, uptr size)
namespace __asan {
// Return true if we can quickly decide that the region is unpoisoned.
// We assume that a redzone is at least 16 bytes.
static inline bool QuickCheckForUnpoisonedRegion(uptr beg, uptr size) {
if (size == 0) return true;
if (size <= 32)
return !AddressIsPoisoned(beg) &&
!AddressIsPoisoned(beg + size - 1) &&
!AddressIsPoisoned(beg + size / 2);
if (size <= 64)
return !AddressIsPoisoned(beg) &&
!AddressIsPoisoned(beg + size / 4) &&
!AddressIsPoisoned(beg + size - 1) &&
!AddressIsPoisoned(beg + 3 * size / 4) &&
!AddressIsPoisoned(beg + size / 2);
return false;
}
struct AsanInterceptorContext {
const char *interceptor_name;
};
// We implement ACCESS_MEMORY_RANGE, ASAN_READ_RANGE,
// and ASAN_WRITE_RANGE as macro instead of function so
// that no extra frames are created, and stack trace contains
// relevant information only.
// We check all shadow bytes.
#define ACCESS_MEMORY_RANGE(ctx, offset, size, isWrite) do { \
uptr __offset = (uptr)(offset); \
uptr __size = (uptr)(size); \
uptr __bad = 0; \
if (__offset > __offset + __size) { \
GET_STACK_TRACE_FATAL_HERE; \
ReportStringFunctionSizeOverflow(__offset, __size, &stack); \
} \
if (!QuickCheckForUnpoisonedRegion(__offset, __size) && \
(__bad = __asan_region_is_poisoned(__offset, __size))) { \
AsanInterceptorContext *_ctx = (AsanInterceptorContext *)ctx; \
bool suppressed = false; \
if (_ctx) { \
suppressed = IsInterceptorSuppressed(_ctx->interceptor_name); \
if (!suppressed && HaveStackTraceBasedSuppressions()) { \
GET_STACK_TRACE_FATAL_HERE; \
suppressed = IsStackTraceSuppressed(&stack); \
} \
} \
if (!suppressed) { \
GET_CURRENT_PC_BP_SP; \
ReportGenericError(pc, bp, sp, __bad, isWrite, __size, 0, false);\
} \
} \
} while (0)
// memcpy is called during __asan_init() from the internals of printf(...).
// We do not treat memcpy with to==from as a bug.
// See http://llvm.org/bugs/show_bug.cgi?id=11763.
#define ASAN_MEMCPY_IMPL(ctx, to, from, size) \
do { \
if (UNLIKELY(!asan_inited)) return internal_memcpy(to, from, size); \
if (asan_init_is_running) { \
return REAL(memcpy)(to, from, size); \
} \
ENSURE_ASAN_INITED(); \
if (flags()->replace_intrin) { \
if (to != from) { \
CHECK_RANGES_OVERLAP("memcpy", to, size, from, size); \
} \
ASAN_READ_RANGE(ctx, from, size); \
ASAN_WRITE_RANGE(ctx, to, size); \
} \
return REAL(memcpy)(to, from, size); \
} while (0)
// memset is called inside Printf.
#define ASAN_MEMSET_IMPL(ctx, block, c, size) \
do { \
if (UNLIKELY(!asan_inited)) return internal_memset(block, c, size); \
if (asan_init_is_running) { \
return REAL(memset)(block, c, size); \
} \
ENSURE_ASAN_INITED(); \
if (flags()->replace_intrin) { \
ASAN_WRITE_RANGE(ctx, block, size); \
} \
return REAL(memset)(block, c, size); \
} while (0)
#define ASAN_MEMMOVE_IMPL(ctx, to, from, size) \
do { \
if (UNLIKELY(!asan_inited)) return internal_memmove(to, from, size); \
ENSURE_ASAN_INITED(); \
if (flags()->replace_intrin) { \
ASAN_READ_RANGE(ctx, from, size); \
ASAN_WRITE_RANGE(ctx, to, size); \
} \
return internal_memmove(to, from, size); \
} while (0)
#define ASAN_READ_RANGE(ctx, offset, size) \
ACCESS_MEMORY_RANGE(ctx, offset, size, false)
#define ASAN_WRITE_RANGE(ctx, offset, size) \
ACCESS_MEMORY_RANGE(ctx, offset, size, true)
// Behavior of functions like "memcpy" or "strcpy" is undefined
// if memory intervals overlap. We report error in this case.
// Macro is used to avoid creation of new frames.
static inline bool RangesOverlap(const char *offset1, uptr length1,
const char *offset2, uptr length2) {
return !((offset1 + length1 <= offset2) || (offset2 + length2 <= offset1));
}
#define CHECK_RANGES_OVERLAP(name, _offset1, length1, _offset2, length2) do { \
const char *offset1 = (const char*)_offset1; \
const char *offset2 = (const char*)_offset2; \
if (RangesOverlap(offset1, length1, offset2, length2)) { \
GET_STACK_TRACE_FATAL_HERE; \
ReportStringFunctionMemoryRangesOverlap(name, offset1, length1, \
offset2, length2, &stack); \
} \
} while (0)
} // namespace __asan
#endif // ASAN_MEMINTRIN_H

View File

@ -69,8 +69,12 @@ void InitializePlatformExceptionHandlers();
bool IsSystemHeapAddress(uptr addr);
// asan_rtl.cc
void PrintAddressSpaceLayout();
void NORETURN ShowStatsAndAbort();
// asan_shadow_setup.cc
void InitializeShadowMemory();
// asan_malloc_linux.cc / asan_malloc_mac.cc
void ReplaceSystemMalloc();
@ -80,6 +84,9 @@ void *AsanDoesNotSupportStaticLinkage();
void AsanCheckDynamicRTPrereqs();
void AsanCheckIncompatibleRT();
// asan_thread.cc
AsanThread *CreateMainThread();
// Support function for __asan_(un)register_image_globals. Searches for the
// loaded image containing `needle' and then enumerates all global metadata
// structures declared in that image, applying `op' (e.g.,

View File

@ -13,10 +13,12 @@
//===----------------------------------------------------------------------===//
#include "sanitizer_common/sanitizer_platform.h"
#if SANITIZER_FREEBSD || SANITIZER_LINUX
#if SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_NETBSD || \
SANITIZER_SOLARIS
#include "asan_interceptors.h"
#include "asan_internal.h"
#include "asan_premap_shadow.h"
#include "asan_thread.h"
#include "sanitizer_common/sanitizer_flags.h"
#include "sanitizer_common/sanitizer_freebsd.h"
@ -39,9 +41,17 @@
#include <sys/link_elf.h>
#endif
#if SANITIZER_ANDROID || SANITIZER_FREEBSD
#if SANITIZER_SOLARIS
#include <link.h>
#endif
#if SANITIZER_ANDROID || SANITIZER_FREEBSD || SANITIZER_SOLARIS
#include <ucontext.h>
extern "C" void* _DYNAMIC;
#elif SANITIZER_NETBSD
#include <link_elf.h>
#include <ucontext.h>
extern Elf_Dyn _DYNAMIC;
#else
#include <sys/ucontext.h>
#include <link.h>
@ -77,9 +87,51 @@ void *AsanDoesNotSupportStaticLinkage() {
return &_DYNAMIC; // defined in link.h
}
static void UnmapFromTo(uptr from, uptr to) {
CHECK(to >= from);
if (to == from) return;
uptr res = internal_munmap(reinterpret_cast<void *>(from), to - from);
if (UNLIKELY(internal_iserror(res))) {
Report(
"ERROR: AddresSanitizer failed to unmap 0x%zx (%zd) bytes at address "
"%p\n",
to - from, to - from, from);
CHECK("unable to unmap" && 0);
}
}
#if ASAN_PREMAP_SHADOW
uptr FindPremappedShadowStart() {
uptr granularity = GetMmapGranularity();
uptr shadow_start = reinterpret_cast<uptr>(&__asan_shadow);
uptr premap_shadow_size = PremapShadowSize();
uptr shadow_size = RoundUpTo(kHighShadowEnd, granularity);
// We may have mapped too much. Release extra memory.
UnmapFromTo(shadow_start + shadow_size, shadow_start + premap_shadow_size);
return shadow_start;
}
#endif
uptr FindDynamicShadowStart() {
UNREACHABLE("FindDynamicShadowStart is not available");
return 0;
#if ASAN_PREMAP_SHADOW
if (!PremapShadowFailed())
return FindPremappedShadowStart();
#endif
uptr granularity = GetMmapGranularity();
uptr alignment = granularity * 8;
uptr left_padding = granularity;
uptr shadow_size = RoundUpTo(kHighShadowEnd, granularity);
uptr map_size = shadow_size + left_padding + alignment;
uptr map_start = (uptr)MmapNoAccess(map_size);
CHECK_NE(map_start, ~(uptr)0);
uptr shadow_start = RoundUpTo(map_start + left_padding, alignment);
UnmapFromTo(map_start, shadow_start - left_padding);
UnmapFromTo(shadow_start + shadow_size, map_start + map_size);
return shadow_start;
}
void AsanApplyToGlobals(globals_op_fptr op, const void *needle) {
@ -93,6 +145,9 @@ void AsanCheckIncompatibleRT() {}
#else
static int FindFirstDSOCallback(struct dl_phdr_info *info, size_t size,
void *data) {
VReport(2, "info->dlpi_name = %s\tinfo->dlpi_addr = %p\n",
info->dlpi_name, info->dlpi_addr);
// Continue until the first dynamic library is found
if (!info->dlpi_name || info->dlpi_name[0] == 0)
return 0;
@ -101,6 +156,21 @@ static int FindFirstDSOCallback(struct dl_phdr_info *info, size_t size,
if (internal_strncmp(info->dlpi_name, "linux-", sizeof("linux-") - 1) == 0)
return 0;
#if SANITIZER_FREEBSD || SANITIZER_NETBSD
// Ignore first entry (the main program)
char **p = (char **)data;
if (!(*p)) {
*p = (char *)-1;
return 0;
}
#endif
#if SANITIZER_SOLARIS
// Ignore executable on Solaris
if (info->dlpi_addr == 0)
return 0;
#endif
*(const char **)data = info->dlpi_name;
return 1;
}
@ -179,4 +249,5 @@ void *AsanDlSymNext(const char *sym) {
} // namespace __asan
#endif // SANITIZER_FREEBSD || SANITIZER_LINUX
#endif // SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_NETBSD ||
// SANITIZER_SOLARIS

View File

@ -15,7 +15,8 @@
//===----------------------------------------------------------------------===//
#include "sanitizer_common/sanitizer_platform.h"
#if SANITIZER_FREEBSD || SANITIZER_LINUX
#if SANITIZER_FREEBSD || SANITIZER_FUCHSIA || SANITIZER_LINUX || \
SANITIZER_NETBSD || SANITIZER_SOLARIS
#include "sanitizer_common/sanitizer_tls_get_addr.h"
#include "asan_allocator.h"
@ -30,9 +31,9 @@ static uptr allocated_for_dlsym;
static const uptr kDlsymAllocPoolSize = 1024;
static uptr alloc_memory_for_dlsym[kDlsymAllocPoolSize];
static bool IsInDlsymAllocPool(const void *ptr) {
static INLINE bool IsInDlsymAllocPool(const void *ptr) {
uptr off = (uptr)ptr - (uptr)alloc_memory_for_dlsym;
return off < sizeof(alloc_memory_for_dlsym);
return off < allocated_for_dlsym * sizeof(alloc_memory_for_dlsym[0]);
}
static void *AllocateFromLocalPool(uptr size_in_bytes) {
@ -43,6 +44,26 @@ static void *AllocateFromLocalPool(uptr size_in_bytes) {
return mem;
}
static INLINE bool MaybeInDlsym() {
// Fuchsia doesn't use dlsym-based interceptors.
return !SANITIZER_FUCHSIA && asan_init_is_running;
}
static void *ReallocFromLocalPool(void *ptr, uptr size) {
const uptr offset = (uptr)ptr - (uptr)alloc_memory_for_dlsym;
const uptr copy_size = Min(size, kDlsymAllocPoolSize - offset);
void *new_ptr;
if (UNLIKELY(MaybeInDlsym())) {
new_ptr = AllocateFromLocalPool(size);
} else {
ENSURE_ASAN_INITED();
GET_STACK_TRACE_MALLOC;
new_ptr = asan_malloc(size, &stack);
}
internal_memcpy(new_ptr, ptr, copy_size);
return new_ptr;
}
INTERCEPTOR(void, free, void *ptr) {
GET_STACK_TRACE_FREE;
if (UNLIKELY(IsInDlsymAllocPool(ptr)))
@ -60,7 +81,7 @@ INTERCEPTOR(void, cfree, void *ptr) {
#endif // SANITIZER_INTERCEPT_CFREE
INTERCEPTOR(void*, malloc, uptr size) {
if (UNLIKELY(asan_init_is_running))
if (UNLIKELY(MaybeInDlsym()))
// Hack: dlsym calls malloc before REAL(malloc) is retrieved from dlsym.
return AllocateFromLocalPool(size);
ENSURE_ASAN_INITED();
@ -69,7 +90,7 @@ INTERCEPTOR(void*, malloc, uptr size) {
}
INTERCEPTOR(void*, calloc, uptr nmemb, uptr size) {
if (UNLIKELY(asan_init_is_running))
if (UNLIKELY(MaybeInDlsym()))
// Hack: dlsym calls calloc before REAL(calloc) is retrieved from dlsym.
return AllocateFromLocalPool(nmemb * size);
ENSURE_ASAN_INITED();
@ -78,21 +99,9 @@ INTERCEPTOR(void*, calloc, uptr nmemb, uptr size) {
}
INTERCEPTOR(void*, realloc, void *ptr, uptr size) {
if (UNLIKELY(IsInDlsymAllocPool(ptr))) {
const uptr offset = (uptr)ptr - (uptr)alloc_memory_for_dlsym;
const uptr copy_size = Min(size, kDlsymAllocPoolSize - offset);
void *new_ptr;
if (UNLIKELY(asan_init_is_running)) {
new_ptr = AllocateFromLocalPool(size);
} else {
ENSURE_ASAN_INITED();
GET_STACK_TRACE_MALLOC;
new_ptr = asan_malloc(size, &stack);
}
internal_memcpy(new_ptr, ptr, copy_size);
return new_ptr;
}
if (UNLIKELY(asan_init_is_running))
if (UNLIKELY(IsInDlsymAllocPool(ptr)))
return ReallocFromLocalPool(ptr, size);
if (UNLIKELY(MaybeInDlsym()))
return AllocateFromLocalPool(size);
ENSURE_ASAN_INITED();
GET_STACK_TRACE_MALLOC;
@ -226,4 +235,5 @@ void ReplaceSystemMalloc() {
} // namespace __asan
#endif // SANITIZER_ANDROID
#endif // SANITIZER_FREEBSD || SANITIZER_LINUX
#endif // SANITIZER_FREEBSD || SANITIZER_FUCHSIA || SANITIZER_LINUX ||
// SANITIZER_NETBSD || SANITIZER_SOLARIS

View File

@ -115,6 +115,13 @@
// || `[0x40000000, 0x47ffffff]` || LowShadow ||
// || `[0x00000000, 0x3fffffff]` || LowMem ||
//
// Shadow mapping on NetBSD/x86-64 with SHADOW_OFFSET == 0x400000000000:
// || `[0x4feffffffe01, 0x7f7ffffff000]` || HighMem ||
// || `[0x49fdffffffc0, 0x4feffffffe00]` || HighShadow ||
// || `[0x480000000000, 0x49fdffffffbf]` || ShadowGap ||
// || `[0x400000000000, 0x47ffffffffff]` || LowShadow ||
// || `[0x000000000000, 0x3fffffffffff]` || LowMem ||
//
// Default Windows/i386 mapping:
// (the exact location of HighShadow/HighMem may vary depending
// on WoW64, /LARGEADDRESSAWARE, etc).
@ -124,11 +131,16 @@
// || `[0x30000000, 0x35ffffff]` || LowShadow ||
// || `[0x00000000, 0x2fffffff]` || LowMem ||
#if defined(ASAN_SHADOW_SCALE)
static const u64 kDefaultShadowScale = ASAN_SHADOW_SCALE;
#else
static const u64 kDefaultShadowScale = 3;
#endif
static const u64 kDefaultShadowSentinel = ~(uptr)0;
static const u64 kDefaultShadowOffset32 = 1ULL << 29; // 0x20000000
static const u64 kDefaultShadowOffset64 = 1ULL << 44;
static const u64 kDefaultShort64bitShadowOffset = 0x7FFF8000; // < 2G.
static const u64 kDefaultShort64bitShadowOffset =
0x7FFFFFFF & (~0xFFFULL << kDefaultShadowScale); // < 2G.
static const u64 kIosShadowOffset32 = 1ULL << 30; // 0x40000000
static const u64 kIosShadowOffset64 = 0x120200000;
static const u64 kIosSimShadowOffset32 = 1ULL << 30;
@ -136,18 +148,20 @@ static const u64 kIosSimShadowOffset64 = kDefaultShadowOffset64;
static const u64 kAArch64_ShadowOffset64 = 1ULL << 36;
static const u64 kMIPS32_ShadowOffset32 = 0x0aaa0000;
static const u64 kMIPS64_ShadowOffset64 = 1ULL << 37;
static const u64 kPPC64_ShadowOffset64 = 1ULL << 41;
static const u64 kPPC64_ShadowOffset64 = 1ULL << 44;
static const u64 kSystemZ_ShadowOffset64 = 1ULL << 52;
static const u64 kFreeBSD_ShadowOffset32 = 1ULL << 30; // 0x40000000
static const u64 kFreeBSD_ShadowOffset64 = 1ULL << 46; // 0x400000000000
static const u64 kNetBSD_ShadowOffset64 = 1ULL << 46; // 0x400000000000
static const u64 kWindowsShadowOffset32 = 3ULL << 28; // 0x30000000
#define SHADOW_SCALE kDefaultShadowScale
#if SANITIZER_WORDSIZE == 32
#if SANITIZER_FUCHSIA
# define SHADOW_OFFSET (0)
#elif SANITIZER_WORDSIZE == 32
# if SANITIZER_ANDROID
# define SHADOW_OFFSET (0)
# define SHADOW_OFFSET __asan_shadow_memory_dynamic_address
# elif defined(__mips__)
# define SHADOW_OFFSET kMIPS32_ShadowOffset32
# elif SANITIZER_FREEBSD
@ -178,6 +192,8 @@ static const u64 kWindowsShadowOffset32 = 3ULL << 28; // 0x30000000
# define SHADOW_OFFSET kSystemZ_ShadowOffset64
# elif SANITIZER_FREEBSD
# define SHADOW_OFFSET kFreeBSD_ShadowOffset64
# elif SANITIZER_NETBSD
# define SHADOW_OFFSET kNetBSD_ShadowOffset64
# elif SANITIZER_MAC
# define SHADOW_OFFSET kDefaultShadowOffset64
# elif defined(__mips64)
@ -189,6 +205,12 @@ static const u64 kWindowsShadowOffset32 = 3ULL << 28; // 0x30000000
# endif
#endif
#if SANITIZER_ANDROID && defined(__arm__)
# define ASAN_PREMAP_SHADOW 1
#else
# define ASAN_PREMAP_SHADOW 0
#endif
#define SHADOW_GRANULARITY (1ULL << SHADOW_SCALE)
#define MEM_TO_SHADOW(mem) (((mem) >> SHADOW_SCALE) + (SHADOW_OFFSET))

View File

@ -107,6 +107,9 @@ static void MemoryProfileCB(const SuspendedThreadsList &suspended_threads_list,
__lsan::ForEachChunk(ChunkCallback, &hp);
uptr *Arg = reinterpret_cast<uptr*>(argument);
hp.Print(Arg[0], Arg[1]);
if (Verbosity())
__asan_print_accumulated_stats();
}
} // namespace __asan

View File

@ -125,77 +125,69 @@ INTERCEPTOR(void *, _ZnwmRKSt9nothrow_t, size_t size, std::nothrow_t const&) {
INTERCEPTOR(void *, _ZnamRKSt9nothrow_t, size_t size, std::nothrow_t const&) {
OPERATOR_NEW_BODY(FROM_NEW_BR, true /*nothrow*/);
}
#endif
#endif // !SANITIZER_MAC
#define OPERATOR_DELETE_BODY(type) \
GET_STACK_TRACE_FREE;\
asan_free(ptr, &stack, type);
asan_delete(ptr, 0, 0, &stack, type);
#define OPERATOR_DELETE_BODY_SIZE(type) \
GET_STACK_TRACE_FREE;\
asan_delete(ptr, size, 0, &stack, type);
#define OPERATOR_DELETE_BODY_ALIGN(type) \
GET_STACK_TRACE_FREE;\
asan_delete(ptr, 0, static_cast<uptr>(align), &stack, type);
#define OPERATOR_DELETE_BODY_SIZE_ALIGN(type) \
GET_STACK_TRACE_FREE;\
asan_delete(ptr, size, static_cast<uptr>(align), &stack, type);
#if !SANITIZER_MAC
CXX_OPERATOR_ATTRIBUTE
void operator delete(void *ptr) NOEXCEPT {
OPERATOR_DELETE_BODY(FROM_NEW);
}
void operator delete(void *ptr) NOEXCEPT
{ OPERATOR_DELETE_BODY(FROM_NEW); }
CXX_OPERATOR_ATTRIBUTE
void operator delete[](void *ptr) NOEXCEPT {
OPERATOR_DELETE_BODY(FROM_NEW_BR);
}
void operator delete[](void *ptr) NOEXCEPT
{ OPERATOR_DELETE_BODY(FROM_NEW_BR); }
CXX_OPERATOR_ATTRIBUTE
void operator delete(void *ptr, std::nothrow_t const&) {
OPERATOR_DELETE_BODY(FROM_NEW);
}
void operator delete(void *ptr, std::nothrow_t const&)
{ OPERATOR_DELETE_BODY(FROM_NEW); }
CXX_OPERATOR_ATTRIBUTE
void operator delete[](void *ptr, std::nothrow_t const&) {
OPERATOR_DELETE_BODY(FROM_NEW_BR);
}
void operator delete[](void *ptr, std::nothrow_t const&)
{ OPERATOR_DELETE_BODY(FROM_NEW_BR); }
CXX_OPERATOR_ATTRIBUTE
void operator delete(void *ptr, size_t size) NOEXCEPT {
GET_STACK_TRACE_FREE;
asan_sized_free(ptr, size, &stack, FROM_NEW);
}
void operator delete(void *ptr, size_t size) NOEXCEPT
{ OPERATOR_DELETE_BODY_SIZE(FROM_NEW); }
CXX_OPERATOR_ATTRIBUTE
void operator delete[](void *ptr, size_t size) NOEXCEPT {
GET_STACK_TRACE_FREE;
asan_sized_free(ptr, size, &stack, FROM_NEW_BR);
}
void operator delete[](void *ptr, size_t size) NOEXCEPT
{ OPERATOR_DELETE_BODY_SIZE(FROM_NEW_BR); }
CXX_OPERATOR_ATTRIBUTE
void operator delete(void *ptr, std::align_val_t) NOEXCEPT {
OPERATOR_DELETE_BODY(FROM_NEW);
}
void operator delete(void *ptr, std::align_val_t align) NOEXCEPT
{ OPERATOR_DELETE_BODY_ALIGN(FROM_NEW); }
CXX_OPERATOR_ATTRIBUTE
void operator delete[](void *ptr, std::align_val_t) NOEXCEPT {
OPERATOR_DELETE_BODY(FROM_NEW_BR);
}
void operator delete[](void *ptr, std::align_val_t align) NOEXCEPT
{ OPERATOR_DELETE_BODY_ALIGN(FROM_NEW_BR); }
CXX_OPERATOR_ATTRIBUTE
void operator delete(void *ptr, std::align_val_t, std::nothrow_t const&) {
OPERATOR_DELETE_BODY(FROM_NEW);
}
void operator delete(void *ptr, std::align_val_t align, std::nothrow_t const&)
{ OPERATOR_DELETE_BODY_ALIGN(FROM_NEW); }
CXX_OPERATOR_ATTRIBUTE
void operator delete[](void *ptr, std::align_val_t, std::nothrow_t const&) {
OPERATOR_DELETE_BODY(FROM_NEW_BR);
}
void operator delete[](void *ptr, std::align_val_t align, std::nothrow_t const&)
{ OPERATOR_DELETE_BODY_ALIGN(FROM_NEW_BR); }
CXX_OPERATOR_ATTRIBUTE
void operator delete(void *ptr, size_t size, std::align_val_t) NOEXCEPT {
GET_STACK_TRACE_FREE;
asan_sized_free(ptr, size, &stack, FROM_NEW);
}
void operator delete(void *ptr, size_t size, std::align_val_t align) NOEXCEPT
{ OPERATOR_DELETE_BODY_SIZE_ALIGN(FROM_NEW); }
CXX_OPERATOR_ATTRIBUTE
void operator delete[](void *ptr, size_t size, std::align_val_t) NOEXCEPT {
GET_STACK_TRACE_FREE;
asan_sized_free(ptr, size, &stack, FROM_NEW_BR);
}
void operator delete[](void *ptr, size_t size, std::align_val_t align) NOEXCEPT
{ OPERATOR_DELETE_BODY_SIZE_ALIGN(FROM_NEW_BR); }
#else // SANITIZER_MAC
INTERCEPTOR(void, _ZdlPv, void *ptr) {
OPERATOR_DELETE_BODY(FROM_NEW);
}
INTERCEPTOR(void, _ZdaPv, void *ptr) {
OPERATOR_DELETE_BODY(FROM_NEW_BR);
}
INTERCEPTOR(void, _ZdlPvRKSt9nothrow_t, void *ptr, std::nothrow_t const&) {
OPERATOR_DELETE_BODY(FROM_NEW);
}
INTERCEPTOR(void, _ZdaPvRKSt9nothrow_t, void *ptr, std::nothrow_t const&) {
OPERATOR_DELETE_BODY(FROM_NEW_BR);
}
#endif
INTERCEPTOR(void, _ZdlPv, void *ptr)
{ OPERATOR_DELETE_BODY(FROM_NEW); }
INTERCEPTOR(void, _ZdaPv, void *ptr)
{ OPERATOR_DELETE_BODY(FROM_NEW_BR); }
INTERCEPTOR(void, _ZdlPvRKSt9nothrow_t, void *ptr, std::nothrow_t const&)
{ OPERATOR_DELETE_BODY(FROM_NEW); }
INTERCEPTOR(void, _ZdaPvRKSt9nothrow_t, void *ptr, std::nothrow_t const&)
{ OPERATOR_DELETE_BODY(FROM_NEW_BR); }
#endif // !SANITIZER_MAC

View File

@ -217,7 +217,7 @@ uptr __asan_region_is_poisoned(uptr beg, uptr size) {
uptr __bad = __asan_region_is_poisoned(__p, __size); \
__asan_report_error(pc, bp, sp, __bad, isWrite, __size, 0);\
} \
} while (false); \
} while (false)
extern "C" SANITIZER_INTERFACE_ATTRIBUTE

View File

@ -46,8 +46,11 @@ ALWAYS_INLINE void FastPoisonShadow(uptr aligned_beg, uptr aligned_size,
// for mapping shadow and zeroing out pages doesn't "just work", so we should
// probably provide higher-level interface for these operations.
// For now, just memset on Windows.
if (value ||
SANITIZER_WINDOWS == 1 ||
if (value || SANITIZER_WINDOWS == 1 ||
// TODO(mcgrathr): Fuchsia doesn't allow the shadow mapping to be
// changed at all. It doesn't currently have an efficient means
// to zero a bunch of pages, but maybe we should add one.
SANITIZER_FUCHSIA == 1 ||
shadow_end - shadow_beg < common_flags()->clear_shadow_mmap_threshold) {
REAL(memset)((void*)shadow_beg, value, shadow_end - shadow_beg);
} else {

View File

@ -25,7 +25,6 @@
#include "sanitizer_common/sanitizer_procmaps.h"
#include <pthread.h>
#include <signal.h>
#include <stdlib.h>
#include <sys/time.h>
#include <sys/resource.h>
@ -34,58 +33,9 @@
namespace __asan {
void AsanOnDeadlySignal(int signo, void *siginfo, void *context) {
ScopedDeadlySignal signal_scope(GetCurrentThread());
int code = (int)((siginfo_t*)siginfo)->si_code;
// Write the first message using fd=2, just in case.
// It may actually fail to write in case stderr is closed.
internal_write(2, "ASAN:DEADLYSIGNAL\n", 18);
SignalContext sig = SignalContext::Create(siginfo, context);
// Access at a reasonable offset above SP, or slightly below it (to account
// for x86_64 or PowerPC redzone, ARM push of multiple registers, etc) is
// probably a stack overflow.
#ifdef __s390__
// On s390, the fault address in siginfo points to start of the page, not
// to the precise word that was accessed. Mask off the low bits of sp to
// take it into account.
bool IsStackAccess = sig.addr >= (sig.sp & ~0xFFF) &&
sig.addr < sig.sp + 0xFFFF;
#else
bool IsStackAccess = sig.addr + 512 > sig.sp && sig.addr < sig.sp + 0xFFFF;
#endif
#if __powerpc__
// Large stack frames can be allocated with e.g.
// lis r0,-10000
// stdux r1,r1,r0 # store sp to [sp-10000] and update sp by -10000
// If the store faults then sp will not have been updated, so test above
// will not work, because the fault address will be more than just "slightly"
// below sp.
if (!IsStackAccess && IsAccessibleMemoryRange(sig.pc, 4)) {
u32 inst = *(unsigned *)sig.pc;
u32 ra = (inst >> 16) & 0x1F;
u32 opcd = inst >> 26;
u32 xo = (inst >> 1) & 0x3FF;
// Check for store-with-update to sp. The instructions we accept are:
// stbu rs,d(ra) stbux rs,ra,rb
// sthu rs,d(ra) sthux rs,ra,rb
// stwu rs,d(ra) stwux rs,ra,rb
// stdu rs,ds(ra) stdux rs,ra,rb
// where ra is r1 (the stack pointer).
if (ra == 1 &&
(opcd == 39 || opcd == 45 || opcd == 37 || opcd == 62 ||
(opcd == 31 && (xo == 247 || xo == 439 || xo == 183 || xo == 181))))
IsStackAccess = true;
}
#endif // __powerpc__
// We also check si_code to filter out SEGV caused by something else other
// then hitting the guard page or unmapped memory, like, for example,
// unaligned memory access.
if (IsStackAccess && (code == si_SEGV_MAPERR || code == si_SEGV_ACCERR))
ReportStackOverflow(sig);
else
ReportDeadlySignal(signo, sig);
StartReportDeadlySignal();
SignalContext sig(siginfo, context);
ReportDeadlySignal(sig);
}
// ---------------------- TSD ---------------- {{{1

View File

@ -0,0 +1,79 @@
//===-- asan_premap_shadow.cc ---------------------------------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file is a part of AddressSanitizer, an address sanity checker.
//
// Reserve shadow memory with an ifunc resolver.
//===----------------------------------------------------------------------===//
#include "asan_mapping.h"
#if ASAN_PREMAP_SHADOW
#include "asan_premap_shadow.h"
#include "sanitizer_common/sanitizer_posix.h"
namespace __asan {
// The code in this file needs to run in an unrelocated binary. It may not
// access any external symbol, including its own non-hidden globals.
// Conservative upper limit.
uptr PremapShadowSize() {
uptr granularity = GetMmapGranularity();
return RoundUpTo(GetMaxVirtualAddress() >> SHADOW_SCALE, granularity);
}
// Returns an address aligned to 8 pages, such that one page on the left and
// PremapShadowSize() bytes on the right of it are mapped r/o.
uptr PremapShadow() {
uptr granularity = GetMmapGranularity();
uptr alignment = granularity * 8;
uptr left_padding = granularity;
uptr shadow_size = PremapShadowSize();
uptr map_size = shadow_size + left_padding + alignment;
uptr map_start = (uptr)MmapNoAccess(map_size);
CHECK_NE(map_start, ~(uptr)0);
uptr shadow_start = RoundUpTo(map_start + left_padding, alignment);
uptr shadow_end = shadow_start + shadow_size;
internal_munmap(reinterpret_cast<void *>(map_start),
shadow_start - left_padding - map_start);
internal_munmap(reinterpret_cast<void *>(shadow_end),
map_start + map_size - shadow_end);
return shadow_start;
}
bool PremapShadowFailed() {
uptr shadow = reinterpret_cast<uptr>(&__asan_shadow);
uptr resolver = reinterpret_cast<uptr>(&__asan_premap_shadow);
// shadow == resolver is how Android KitKat and older handles ifunc.
// shadow == 0 just in case.
if (shadow == 0 || shadow == resolver)
return true;
return false;
}
} // namespace __asan
extern "C" {
decltype(__asan_shadow)* __asan_premap_shadow() {
// The resolver may be called multiple times. Map the shadow just once.
static uptr premapped_shadow = 0;
if (!premapped_shadow) premapped_shadow = __asan::PremapShadow();
return reinterpret_cast<decltype(__asan_shadow)*>(premapped_shadow);
}
// __asan_shadow is a "function" that has the same address as the first byte of
// the shadow mapping.
INTERFACE_ATTRIBUTE __attribute__((ifunc("__asan_premap_shadow"))) void
__asan_shadow();
}
#endif // ASAN_PREMAP_SHADOW

View File

@ -0,0 +1,30 @@
//===-- asan_mapping.h ------------------------------------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file is a part of AddressSanitizer, an address sanity checker.
//
// Premap shadow range with an ifunc resolver.
//===----------------------------------------------------------------------===//
#ifndef ASAN_PREMAP_SHADOW_H
#define ASAN_PREMAP_SHADOW_H
#if ASAN_PREMAP_SHADOW
namespace __asan {
// Conservative upper limit.
uptr PremapShadowSize();
bool PremapShadowFailed();
}
#endif
extern "C" INTERFACE_ATTRIBUTE void __asan_shadow();
extern "C" decltype(__asan_shadow)* __asan_premap_shadow();
#endif // ASAN_PREMAP_SHADOW_H

View File

@ -60,9 +60,8 @@ void PrintMemoryByte(InternalScopedString *str, const char *before, u8 byte,
bool in_shadow, const char *after) {
Decorator d;
str->append("%s%s%x%x%s%s", before,
in_shadow ? d.ShadowByte(byte) : d.MemoryByte(),
byte >> 4, byte & 15,
in_shadow ? d.EndShadowByte() : d.EndMemoryByte(), after);
in_shadow ? d.ShadowByte(byte) : d.MemoryByte(), byte >> 4,
byte & 15, d.Default(), after);
}
static void PrintZoneForPointer(uptr ptr, uptr zone_ptr,
@ -123,53 +122,15 @@ bool ParseFrameDescription(const char *frame_descr,
// immediately after printing error report.
class ScopedInErrorReport {
public:
explicit ScopedInErrorReport(bool fatal = false) {
halt_on_error_ = fatal || flags()->halt_on_error;
if (lock_.TryLock()) {
StartReporting();
return;
}
// ASan found two bugs in different threads simultaneously.
u32 current_tid = GetCurrentTidOrInvalid();
if (reporting_thread_tid_ == current_tid ||
reporting_thread_tid_ == kInvalidTid) {
// This is either asynch signal or nested error during error reporting.
// Fail simple to avoid deadlocks in Report().
// Can't use Report() here because of potential deadlocks
// in nested signal handlers.
const char msg[] = "AddressSanitizer: nested bug in the same thread, "
"aborting.\n";
WriteToFile(kStderrFd, msg, sizeof(msg));
internal__exit(common_flags()->exitcode);
}
if (halt_on_error_) {
// Do not print more than one report, otherwise they will mix up.
// Error reporting functions shouldn't return at this situation, as
// they are effectively no-returns.
Report("AddressSanitizer: while reporting a bug found another one. "
"Ignoring.\n");
// Sleep long enough to make sure that the thread which started
// to print an error report will finish doing it.
SleepForSeconds(Max(100, flags()->sleep_before_dying + 1));
// If we're still not dead for some reason, use raw _exit() instead of
// Die() to bypass any additional checks.
internal__exit(common_flags()->exitcode);
} else {
// The other thread will eventually finish reporting
// so it's safe to wait
lock_.Lock();
}
StartReporting();
explicit ScopedInErrorReport(bool fatal = false)
: halt_on_error_(fatal || flags()->halt_on_error) {
// Make sure the registry and sanitizer report mutexes are locked while
// we're printing an error report.
// We can lock them only here to avoid self-deadlock in case of
// recursive reports.
asanThreadRegistry().Lock();
Printf(
"=================================================================\n");
}
~ScopedInErrorReport() {
@ -217,9 +178,6 @@ class ScopedInErrorReport {
if (!halt_on_error_)
internal_memset(&current_error_, 0, sizeof(current_error_));
CommonSanitizerReportMutex.Unlock();
reporting_thread_tid_ = kInvalidTid;
lock_.Unlock();
if (halt_on_error_) {
Report("ABORTING\n");
Die();
@ -237,39 +195,18 @@ class ScopedInErrorReport {
}
private:
void StartReporting() {
// Make sure the registry and sanitizer report mutexes are locked while
// we're printing an error report.
// We can lock them only here to avoid self-deadlock in case of
// recursive reports.
asanThreadRegistry().Lock();
CommonSanitizerReportMutex.Lock();
reporting_thread_tid_ = GetCurrentTidOrInvalid();
Printf("===================================================="
"=============\n");
}
static StaticSpinMutex lock_;
static u32 reporting_thread_tid_;
ScopedErrorReportLock error_report_lock_;
// Error currently being reported. This enables the destructor to interact
// with the debugger and point it to an error description.
static ErrorDescription current_error_;
bool halt_on_error_;
};
StaticSpinMutex ScopedInErrorReport::lock_;
u32 ScopedInErrorReport::reporting_thread_tid_ = kInvalidTid;
ErrorDescription ScopedInErrorReport::current_error_;
void ReportStackOverflow(const SignalContext &sig) {
void ReportDeadlySignal(const SignalContext &sig) {
ScopedInErrorReport in_report(/*fatal*/ true);
ErrorStackOverflow error(GetCurrentTidOrInvalid(), sig);
in_report.ReportError(error);
}
void ReportDeadlySignal(int signo, const SignalContext &sig) {
ScopedInErrorReport in_report(/*fatal*/ true);
ErrorDeadlySignal error(GetCurrentTidOrInvalid(), sig, signo);
ErrorDeadlySignal error(GetCurrentTidOrInvalid(), sig);
in_report.ReportError(error);
}
@ -279,11 +216,12 @@ void ReportDoubleFree(uptr addr, BufferedStackTrace *free_stack) {
in_report.ReportError(error);
}
void ReportNewDeleteSizeMismatch(uptr addr, uptr delete_size,
void ReportNewDeleteTypeMismatch(uptr addr, uptr delete_size,
uptr delete_alignment,
BufferedStackTrace *free_stack) {
ScopedInErrorReport in_report;
ErrorNewDeleteSizeMismatch error(GetCurrentTidOrInvalid(), free_stack, addr,
delete_size);
ErrorNewDeleteTypeMismatch error(GetCurrentTidOrInvalid(), free_stack, addr,
delete_size, delete_alignment);
in_report.ReportError(error);
}
@ -360,17 +298,58 @@ static NOINLINE void ReportInvalidPointerPair(uptr pc, uptr bp, uptr sp,
in_report.ReportError(error);
}
static bool IsInvalidPointerPair(uptr a1, uptr a2) {
if (a1 == a2)
return false;
// 256B in shadow memory can be iterated quite fast
static const uptr kMaxOffset = 2048;
uptr left = a1 < a2 ? a1 : a2;
uptr right = a1 < a2 ? a2 : a1;
uptr offset = right - left;
if (offset <= kMaxOffset)
return __asan_region_is_poisoned(left, offset);
AsanThread *t = GetCurrentThread();
// check whether left is a stack memory pointer
if (uptr shadow_offset1 = t->GetStackVariableShadowStart(left)) {
uptr shadow_offset2 = t->GetStackVariableShadowStart(right);
return shadow_offset2 == 0 || shadow_offset1 != shadow_offset2;
}
// check whether left is a heap memory address
HeapAddressDescription hdesc1, hdesc2;
if (GetHeapAddressInformation(left, 0, &hdesc1) &&
hdesc1.chunk_access.access_type == kAccessTypeInside)
return !GetHeapAddressInformation(right, 0, &hdesc2) ||
hdesc2.chunk_access.access_type != kAccessTypeInside ||
hdesc1.chunk_access.chunk_begin != hdesc2.chunk_access.chunk_begin;
// check whether left is an address of a global variable
GlobalAddressDescription gdesc1, gdesc2;
if (GetGlobalAddressInformation(left, 0, &gdesc1))
return !GetGlobalAddressInformation(right - 1, 0, &gdesc2) ||
!gdesc1.PointsInsideTheSameVariable(gdesc2);
if (t->GetStackVariableShadowStart(right) ||
GetHeapAddressInformation(right, 0, &hdesc2) ||
GetGlobalAddressInformation(right - 1, 0, &gdesc2))
return true;
// At this point we know nothing about both a1 and a2 addresses.
return false;
}
static INLINE void CheckForInvalidPointerPair(void *p1, void *p2) {
if (!flags()->detect_invalid_pointer_pairs) return;
uptr a1 = reinterpret_cast<uptr>(p1);
uptr a2 = reinterpret_cast<uptr>(p2);
AsanChunkView chunk1 = FindHeapChunkByAddress(a1);
AsanChunkView chunk2 = FindHeapChunkByAddress(a2);
bool valid1 = chunk1.IsAllocated();
bool valid2 = chunk2.IsAllocated();
if (!valid1 || !valid2 || !chunk1.Eq(chunk2)) {
if (IsInvalidPointerPair(a1, a2)) {
GET_CALLER_PC_BP_SP;
return ReportInvalidPointerPair(pc, bp, sp, a1, a2);
ReportInvalidPointerPair(pc, bp, sp, a1, a2);
}
}
// ----------------------- Mac-specific reports ----------------- {{{1

View File

@ -46,9 +46,9 @@ bool ParseFrameDescription(const char *frame_descr,
// Different kinds of error reports.
void ReportGenericError(uptr pc, uptr bp, uptr sp, uptr addr, bool is_write,
uptr access_size, u32 exp, bool fatal);
void ReportStackOverflow(const SignalContext &sig);
void ReportDeadlySignal(int signo, const SignalContext &sig);
void ReportNewDeleteSizeMismatch(uptr addr, uptr delete_size,
void ReportDeadlySignal(const SignalContext &sig);
void ReportNewDeleteTypeMismatch(uptr addr, uptr delete_size,
uptr delete_alignment,
BufferedStackTrace *free_stack);
void ReportDoubleFree(uptr addr, BufferedStackTrace *free_stack);
void ReportFreeNotMalloced(uptr addr, BufferedStackTrace *free_stack);

View File

@ -84,26 +84,6 @@ void ShowStatsAndAbort() {
Die();
}
// ---------------------- mmap -------------------- {{{1
// Reserve memory range [beg, end].
// We need to use inclusive range because end+1 may not be representable.
void ReserveShadowMemoryRange(uptr beg, uptr end, const char *name) {
CHECK_EQ((beg % GetMmapGranularity()), 0);
CHECK_EQ(((end + 1) % GetMmapGranularity()), 0);
uptr size = end - beg + 1;
DecreaseTotalMmap(size); // Don't count the shadow against mmap_limit_mb.
void *res = MmapFixedNoReserve(beg, size, name);
if (res != (void*)beg) {
Report("ReserveShadowMemoryRange failed while trying to map 0x%zx bytes. "
"Perhaps you're using ulimit -v\n", size);
Abort();
}
if (common_flags()->no_huge_pages_for_shadow)
NoHugePagesInRegion(beg, size);
if (common_flags()->use_madv_dontdump)
DontDumpShadowMemory(beg, size);
}
// --------------- LowLevelAllocateCallbac ---------- {{{1
static void OnLowLevelAllocate(uptr ptr, uptr size) {
PoisonShadow(ptr, size, kAsanInternalHeapMagic);
@ -327,7 +307,7 @@ static void asan_atexit() {
static void InitializeHighMemEnd() {
#if !ASAN_FIXED_MAPPING
kHighMemEnd = GetMaxVirtualAddress();
kHighMemEnd = GetMaxUserVirtualAddress();
// Increase kHighMemEnd to make sure it's properly
// aligned together with kHighMemBeg:
kHighMemEnd |= SHADOW_GRANULARITY * GetMmapGranularity() - 1;
@ -335,46 +315,7 @@ static void InitializeHighMemEnd() {
CHECK_EQ((kHighMemBeg % GetMmapGranularity()), 0);
}
static void ProtectGap(uptr addr, uptr size) {
if (!flags()->protect_shadow_gap) {
// The shadow gap is unprotected, so there is a chance that someone
// is actually using this memory. Which means it needs a shadow...
uptr GapShadowBeg = RoundDownTo(MEM_TO_SHADOW(addr), GetPageSizeCached());
uptr GapShadowEnd =
RoundUpTo(MEM_TO_SHADOW(addr + size), GetPageSizeCached()) - 1;
if (Verbosity())
Printf("protect_shadow_gap=0:"
" not protecting shadow gap, allocating gap's shadow\n"
"|| `[%p, %p]` || ShadowGap's shadow ||\n", GapShadowBeg,
GapShadowEnd);
ReserveShadowMemoryRange(GapShadowBeg, GapShadowEnd,
"unprotected gap shadow");
return;
}
void *res = MmapFixedNoAccess(addr, size, "shadow gap");
if (addr == (uptr)res)
return;
// A few pages at the start of the address space can not be protected.
// But we really want to protect as much as possible, to prevent this memory
// being returned as a result of a non-FIXED mmap().
if (addr == kZeroBaseShadowStart) {
uptr step = GetMmapGranularity();
while (size > step && addr < kZeroBaseMaxShadowStart) {
addr += step;
size -= step;
void *res = MmapFixedNoAccess(addr, size, "shadow gap");
if (addr == (uptr)res)
return;
}
}
Report("ERROR: Failed to protect the shadow gap. "
"ASan cannot proceed correctly. ABORTING.\n");
DumpProcessMap();
Die();
}
static void PrintAddressSpaceLayout() {
void PrintAddressSpaceLayout() {
Printf("|| `[%p, %p]` || HighMem ||\n",
(void*)kHighMemBeg, (void*)kHighMemEnd);
Printf("|| `[%p, %p]` || HighShadow ||\n",
@ -426,71 +367,6 @@ static void PrintAddressSpaceLayout() {
kHighShadowBeg > kMidMemEnd);
}
static void InitializeShadowMemory() {
// Set the shadow memory address to uninitialized.
__asan_shadow_memory_dynamic_address = kDefaultShadowSentinel;
uptr shadow_start = kLowShadowBeg;
// Detect if a dynamic shadow address must used and find a available location
// when necessary. When dynamic address is used, the macro |kLowShadowBeg|
// expands to |__asan_shadow_memory_dynamic_address| which is
// |kDefaultShadowSentinel|.
if (shadow_start == kDefaultShadowSentinel) {
__asan_shadow_memory_dynamic_address = 0;
CHECK_EQ(0, kLowShadowBeg);
shadow_start = FindDynamicShadowStart();
}
// Update the shadow memory address (potentially) used by instrumentation.
__asan_shadow_memory_dynamic_address = shadow_start;
if (kLowShadowBeg)
shadow_start -= GetMmapGranularity();
bool full_shadow_is_available =
MemoryRangeIsAvailable(shadow_start, kHighShadowEnd);
#if SANITIZER_LINUX && defined(__x86_64__) && defined(_LP64) && \
!ASAN_FIXED_MAPPING
if (!full_shadow_is_available) {
kMidMemBeg = kLowMemEnd < 0x3000000000ULL ? 0x3000000000ULL : 0;
kMidMemEnd = kLowMemEnd < 0x3000000000ULL ? 0x4fffffffffULL : 0;
}
#endif
if (Verbosity()) PrintAddressSpaceLayout();
if (full_shadow_is_available) {
// mmap the low shadow plus at least one page at the left.
if (kLowShadowBeg)
ReserveShadowMemoryRange(shadow_start, kLowShadowEnd, "low shadow");
// mmap the high shadow.
ReserveShadowMemoryRange(kHighShadowBeg, kHighShadowEnd, "high shadow");
// protect the gap.
ProtectGap(kShadowGapBeg, kShadowGapEnd - kShadowGapBeg + 1);
CHECK_EQ(kShadowGapEnd, kHighShadowBeg - 1);
} else if (kMidMemBeg &&
MemoryRangeIsAvailable(shadow_start, kMidMemBeg - 1) &&
MemoryRangeIsAvailable(kMidMemEnd + 1, kHighShadowEnd)) {
CHECK(kLowShadowBeg != kLowShadowEnd);
// mmap the low shadow plus at least one page at the left.
ReserveShadowMemoryRange(shadow_start, kLowShadowEnd, "low shadow");
// mmap the mid shadow.
ReserveShadowMemoryRange(kMidShadowBeg, kMidShadowEnd, "mid shadow");
// mmap the high shadow.
ReserveShadowMemoryRange(kHighShadowBeg, kHighShadowEnd, "high shadow");
// protect the gaps.
ProtectGap(kShadowGapBeg, kShadowGapEnd - kShadowGapBeg + 1);
ProtectGap(kShadowGap2Beg, kShadowGap2End - kShadowGap2Beg + 1);
ProtectGap(kShadowGap3Beg, kShadowGap3End - kShadowGap3Beg + 1);
} else {
Report("Shadow memory range interleaves with an existing memory mapping. "
"ASan cannot proceed correctly. ABORTING.\n");
Report("ASan shadow was supposed to be located in the [%p-%p] range.\n",
shadow_start, kHighShadowEnd);
DumpProcessMap();
Die();
}
}
static void AsanInitInternal() {
if (LIKELY(asan_inited)) return;
SanitizerToolName = "AddressSanitizer";
@ -531,6 +407,7 @@ static void AsanInitInternal() {
MaybeReexec();
// Setup internal allocator callback.
SetLowLevelAllocateMinAlignment(SHADOW_GRANULARITY);
SetLowLevelAllocateCallback(OnLowLevelAllocate);
InitializeAsanInterceptors();
@ -575,20 +452,18 @@ static void AsanInitInternal() {
InitTlsSize();
// Create main thread.
AsanThread *main_thread = AsanThread::Create(
/* start_routine */ nullptr, /* arg */ nullptr, /* parent_tid */ 0,
/* stack */ nullptr, /* detached */ true);
AsanThread *main_thread = CreateMainThread();
CHECK_EQ(0, main_thread->tid());
SetCurrentThread(main_thread);
main_thread->ThreadStart(internal_getpid(),
/* signal_thread_is_registered */ nullptr);
force_interface_symbols(); // no-op.
SanitizerInitializeUnwinder();
if (CAN_SANITIZE_LEAKS) {
__lsan::InitCommonLsan();
if (common_flags()->detect_leaks && common_flags()->leak_check_at_exit) {
Atexit(__lsan::DoLeakCheck);
if (flags()->halt_on_error)
Atexit(__lsan::DoLeakCheck);
else
Atexit(__lsan::DoRecoverableLeakCheckVoid);
}
}
@ -608,6 +483,11 @@ static void AsanInitInternal() {
}
VReport(1, "AddressSanitizer Init done\n");
if (flags()->sleep_after_init) {
Report("Sleeping for %d second(s)\n", flags()->sleep_after_init);
SleepForSeconds(flags()->sleep_after_init);
}
}
// Initialize as requested from some part of ASan runtime library (interceptors,
@ -647,6 +527,7 @@ void NOINLINE __asan_handle_no_return() {
top = curr_thread->stack_top();
bottom = ((uptr)&local_stack - PageSize) & ~(PageSize - 1);
} else {
CHECK(!SANITIZER_FUCHSIA);
// If we haven't seen this thread, try asking the OS for stack bounds.
uptr tls_addr, tls_size, stack_size;
GetThreadStackAndTls(/*main=*/false, &bottom, &stack_size, &tls_addr,

View File

@ -47,7 +47,7 @@ struct ScarinessScoreBase {
};
int GetScore() const { return score; }
const char *GetDescription() const { return descr; }
void Print() {
void Print() const {
if (score && flags()->print_scariness)
Printf("SCARINESS: %d (%s)\n", score, descr);
}

View File

@ -0,0 +1,165 @@
//===-- asan_shadow_setup.cc ----------------------------------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file is a part of AddressSanitizer, an address sanity checker.
//
// Set up the shadow memory.
//===----------------------------------------------------------------------===//
#include "sanitizer_common/sanitizer_platform.h"
// asan_fuchsia.cc has its own InitializeShadowMemory implementation.
#if !SANITIZER_FUCHSIA
#include "asan_internal.h"
#include "asan_mapping.h"
namespace __asan {
// ---------------------- mmap -------------------- {{{1
// Reserve memory range [beg, end].
// We need to use inclusive range because end+1 may not be representable.
void ReserveShadowMemoryRange(uptr beg, uptr end, const char *name) {
CHECK_EQ((beg % GetMmapGranularity()), 0);
CHECK_EQ(((end + 1) % GetMmapGranularity()), 0);
uptr size = end - beg + 1;
DecreaseTotalMmap(size); // Don't count the shadow against mmap_limit_mb.
void *res = MmapFixedNoReserve(beg, size, name);
if (res != (void *)beg) {
Report(
"ReserveShadowMemoryRange failed while trying to map 0x%zx bytes. "
"Perhaps you're using ulimit -v\n",
size);
Abort();
}
if (common_flags()->no_huge_pages_for_shadow) NoHugePagesInRegion(beg, size);
if (common_flags()->use_madv_dontdump) DontDumpShadowMemory(beg, size);
}
static void ProtectGap(uptr addr, uptr size) {
if (!flags()->protect_shadow_gap) {
// The shadow gap is unprotected, so there is a chance that someone
// is actually using this memory. Which means it needs a shadow...
uptr GapShadowBeg = RoundDownTo(MEM_TO_SHADOW(addr), GetPageSizeCached());
uptr GapShadowEnd =
RoundUpTo(MEM_TO_SHADOW(addr + size), GetPageSizeCached()) - 1;
if (Verbosity())
Printf(
"protect_shadow_gap=0:"
" not protecting shadow gap, allocating gap's shadow\n"
"|| `[%p, %p]` || ShadowGap's shadow ||\n",
GapShadowBeg, GapShadowEnd);
ReserveShadowMemoryRange(GapShadowBeg, GapShadowEnd,
"unprotected gap shadow");
return;
}
void *res = MmapFixedNoAccess(addr, size, "shadow gap");
if (addr == (uptr)res) return;
// A few pages at the start of the address space can not be protected.
// But we really want to protect as much as possible, to prevent this memory
// being returned as a result of a non-FIXED mmap().
if (addr == kZeroBaseShadowStart) {
uptr step = GetMmapGranularity();
while (size > step && addr < kZeroBaseMaxShadowStart) {
addr += step;
size -= step;
void *res = MmapFixedNoAccess(addr, size, "shadow gap");
if (addr == (uptr)res) return;
}
}
Report(
"ERROR: Failed to protect the shadow gap. "
"ASan cannot proceed correctly. ABORTING.\n");
DumpProcessMap();
Die();
}
static void MaybeReportLinuxPIEBug() {
#if SANITIZER_LINUX && (defined(__x86_64__) || defined(__aarch64__))
Report("This might be related to ELF_ET_DYN_BASE change in Linux 4.12.\n");
Report(
"See https://github.com/google/sanitizers/issues/856 for possible "
"workarounds.\n");
#endif
}
void InitializeShadowMemory() {
// Set the shadow memory address to uninitialized.
__asan_shadow_memory_dynamic_address = kDefaultShadowSentinel;
uptr shadow_start = kLowShadowBeg;
// Detect if a dynamic shadow address must used and find a available location
// when necessary. When dynamic address is used, the macro |kLowShadowBeg|
// expands to |__asan_shadow_memory_dynamic_address| which is
// |kDefaultShadowSentinel|.
bool full_shadow_is_available = false;
if (shadow_start == kDefaultShadowSentinel) {
__asan_shadow_memory_dynamic_address = 0;
CHECK_EQ(0, kLowShadowBeg);
shadow_start = FindDynamicShadowStart();
if (SANITIZER_LINUX) full_shadow_is_available = true;
}
// Update the shadow memory address (potentially) used by instrumentation.
__asan_shadow_memory_dynamic_address = shadow_start;
if (kLowShadowBeg) shadow_start -= GetMmapGranularity();
if (!full_shadow_is_available)
full_shadow_is_available =
MemoryRangeIsAvailable(shadow_start, kHighShadowEnd);
#if SANITIZER_LINUX && defined(__x86_64__) && defined(_LP64) && \
!ASAN_FIXED_MAPPING
if (!full_shadow_is_available) {
kMidMemBeg = kLowMemEnd < 0x3000000000ULL ? 0x3000000000ULL : 0;
kMidMemEnd = kLowMemEnd < 0x3000000000ULL ? 0x4fffffffffULL : 0;
}
#endif
if (Verbosity()) PrintAddressSpaceLayout();
if (full_shadow_is_available) {
// mmap the low shadow plus at least one page at the left.
if (kLowShadowBeg)
ReserveShadowMemoryRange(shadow_start, kLowShadowEnd, "low shadow");
// mmap the high shadow.
ReserveShadowMemoryRange(kHighShadowBeg, kHighShadowEnd, "high shadow");
// protect the gap.
ProtectGap(kShadowGapBeg, kShadowGapEnd - kShadowGapBeg + 1);
CHECK_EQ(kShadowGapEnd, kHighShadowBeg - 1);
} else if (kMidMemBeg &&
MemoryRangeIsAvailable(shadow_start, kMidMemBeg - 1) &&
MemoryRangeIsAvailable(kMidMemEnd + 1, kHighShadowEnd)) {
CHECK(kLowShadowBeg != kLowShadowEnd);
// mmap the low shadow plus at least one page at the left.
ReserveShadowMemoryRange(shadow_start, kLowShadowEnd, "low shadow");
// mmap the mid shadow.
ReserveShadowMemoryRange(kMidShadowBeg, kMidShadowEnd, "mid shadow");
// mmap the high shadow.
ReserveShadowMemoryRange(kHighShadowBeg, kHighShadowEnd, "high shadow");
// protect the gaps.
ProtectGap(kShadowGapBeg, kShadowGapEnd - kShadowGapBeg + 1);
ProtectGap(kShadowGap2Beg, kShadowGap2End - kShadowGap2Beg + 1);
ProtectGap(kShadowGap3Beg, kShadowGap3End - kShadowGap3Beg + 1);
} else {
Report(
"Shadow memory range interleaves with an existing memory mapping. "
"ASan cannot proceed correctly. ABORTING.\n");
Report("ASan shadow was supposed to be located in the [%p-%p] range.\n",
shadow_start, kHighShadowEnd);
MaybeReportLinuxPIEBug();
DumpProcessMap();
Die();
}
}
} // namespace __asan
#endif // !SANITIZER_FUCHSIA

View File

@ -31,9 +31,8 @@ u32 GetMallocContextSize();
// The pc will be in the position 0 of the resulting stack trace.
// The bp may refer to the current frame or to the caller's frame.
ALWAYS_INLINE
void GetStackTraceWithPcBpAndContext(BufferedStackTrace *stack, uptr max_depth,
uptr pc, uptr bp, void *context,
bool fast) {
void GetStackTrace(BufferedStackTrace *stack, uptr max_depth, uptr pc, uptr bp,
void *context, bool fast) {
#if SANITIZER_WINDOWS
stack->Unwind(max_depth, pc, bp, context, 0, 0, fast);
#else
@ -41,10 +40,6 @@ void GetStackTraceWithPcBpAndContext(BufferedStackTrace *stack, uptr max_depth,
stack->size = 0;
if (LIKELY(asan_inited)) {
if ((t = GetCurrentThread()) && !t->isUnwinding()) {
// On FreeBSD the slow unwinding that leverages _Unwind_Backtrace()
// yields the call stack of the signal's handler and not of the code
// that raised the signal (as it does on Linux).
if (SANITIZER_FREEBSD && t->isInDeadlySignal()) fast = true;
uptr stack_top = t->stack_top();
uptr stack_bottom = t->stack_bottom();
ScopedUnwinding unwind_scope(t);
@ -66,32 +61,29 @@ void GetStackTraceWithPcBpAndContext(BufferedStackTrace *stack, uptr max_depth,
// as early as possible (in functions exposed to the user), as we generally
// don't want stack trace to contain functions from ASan internals.
#define GET_STACK_TRACE(max_size, fast) \
BufferedStackTrace stack; \
if (max_size <= 2) { \
stack.size = max_size; \
if (max_size > 0) { \
stack.top_frame_bp = GET_CURRENT_FRAME(); \
stack.trace_buffer[0] = StackTrace::GetCurrentPc(); \
if (max_size > 1) \
stack.trace_buffer[1] = GET_CALLER_PC(); \
} \
} else { \
GetStackTraceWithPcBpAndContext(&stack, max_size, \
StackTrace::GetCurrentPc(), \
GET_CURRENT_FRAME(), 0, fast); \
#define GET_STACK_TRACE(max_size, fast) \
BufferedStackTrace stack; \
if (max_size <= 2) { \
stack.size = max_size; \
if (max_size > 0) { \
stack.top_frame_bp = GET_CURRENT_FRAME(); \
stack.trace_buffer[0] = StackTrace::GetCurrentPc(); \
if (max_size > 1) stack.trace_buffer[1] = GET_CALLER_PC(); \
} \
} else { \
GetStackTrace(&stack, max_size, StackTrace::GetCurrentPc(), \
GET_CURRENT_FRAME(), 0, fast); \
}
#define GET_STACK_TRACE_FATAL(pc, bp) \
BufferedStackTrace stack; \
GetStackTraceWithPcBpAndContext(&stack, kStackTraceMax, pc, bp, 0, \
common_flags()->fast_unwind_on_fatal)
#define GET_STACK_TRACE_FATAL(pc, bp) \
BufferedStackTrace stack; \
GetStackTrace(&stack, kStackTraceMax, pc, bp, 0, \
common_flags()->fast_unwind_on_fatal)
#define GET_STACK_TRACE_SIGNAL(sig) \
BufferedStackTrace stack; \
GetStackTraceWithPcBpAndContext(&stack, kStackTraceMax, \
(sig).pc, (sig).bp, (sig).context, \
common_flags()->fast_unwind_on_fatal)
#define GET_STACK_TRACE_SIGNAL(sig) \
BufferedStackTrace stack; \
GetStackTrace(&stack, kStackTraceMax, (sig).pc, (sig).bp, (sig).context, \
common_flags()->fast_unwind_on_fatal)
#define GET_STACK_TRACE_FATAL_HERE \
GET_STACK_TRACE(kStackTraceMax, common_flags()->fast_unwind_on_fatal)

View File

@ -27,11 +27,6 @@ namespace __asan {
// AsanThreadContext implementation.
struct CreateThreadContextArgs {
AsanThread *thread;
StackTrace *stack;
};
void AsanThreadContext::OnCreated(void *arg) {
CreateThreadContextArgs *args = static_cast<CreateThreadContextArgs*>(arg);
if (args->stack)
@ -88,7 +83,7 @@ AsanThread *AsanThread::Create(thread_callback_t start_routine, void *arg,
AsanThread *thread = (AsanThread*)MmapOrDie(size, __func__);
thread->start_routine_ = start_routine;
thread->arg_ = arg;
CreateThreadContextArgs args = { thread, stack };
AsanThreadContext::CreateThreadContextArgs args = {thread, stack};
asanThreadRegistry().CreateThread(*reinterpret_cast<uptr *>(thread), detached,
parent_tid, &args);
@ -223,12 +218,12 @@ FakeStack *AsanThread::AsyncSignalSafeLazyInitFakeStack() {
return nullptr;
}
void AsanThread::Init() {
void AsanThread::Init(const InitOptions *options) {
next_stack_top_ = next_stack_bottom_ = 0;
atomic_store(&stack_switching_, false, memory_order_release);
fake_stack_ = nullptr; // Will be initialized lazily if needed.
CHECK_EQ(this->stack_size(), 0U);
SetThreadStackAndTls();
SetThreadStackAndTls(options);
CHECK_GT(this->stack_size(), 0U);
CHECK(AddrIsInMem(stack_bottom_));
CHECK(AddrIsInMem(stack_top_ - 1));
@ -239,6 +234,10 @@ void AsanThread::Init() {
&local);
}
// Fuchsia doesn't use ThreadStart.
// asan_fuchsia.c defines CreateMainThread and SetThreadStackAndTls.
#if !SANITIZER_FUCHSIA
thread_return_t AsanThread::ThreadStart(
tid_t os_id, atomic_uintptr_t *signal_thread_is_registered) {
Init();
@ -270,7 +269,21 @@ thread_return_t AsanThread::ThreadStart(
return res;
}
void AsanThread::SetThreadStackAndTls() {
AsanThread *CreateMainThread() {
AsanThread *main_thread = AsanThread::Create(
/* start_routine */ nullptr, /* arg */ nullptr, /* parent_tid */ 0,
/* stack */ nullptr, /* detached */ true);
SetCurrentThread(main_thread);
main_thread->ThreadStart(internal_getpid(),
/* signal_thread_is_registered */ nullptr);
return main_thread;
}
// This implementation doesn't use the argument, which is just passed down
// from the caller of Init (which see, above). It's only there to support
// OS-specific implementations that need more information passed through.
void AsanThread::SetThreadStackAndTls(const InitOptions *options) {
DCHECK_EQ(options, nullptr);
uptr tls_size = 0;
uptr stack_size = 0;
GetThreadStackAndTls(tid() == 0, const_cast<uptr *>(&stack_bottom_),
@ -283,6 +296,8 @@ void AsanThread::SetThreadStackAndTls() {
CHECK(AddrIsInStack((uptr)&local));
}
#endif // !SANITIZER_FUCHSIA
void AsanThread::ClearShadowForThreadStackAndTLS() {
PoisonShadow(stack_bottom_, stack_top_ - stack_bottom_, 0);
if (tls_begin_ != tls_end_)
@ -302,7 +317,7 @@ bool AsanThread::GetStackFrameAccessByAddr(uptr addr,
access->frame_descr = (const char *)((uptr*)bottom)[1];
return true;
}
uptr aligned_addr = addr & ~(SANITIZER_WORDSIZE/8 - 1); // align addr.
uptr aligned_addr = RoundDownTo(addr, SANITIZER_WORDSIZE / 8); // align addr.
uptr mem_ptr = RoundDownTo(aligned_addr, SHADOW_GRANULARITY);
u8 *shadow_ptr = (u8*)MemToShadow(aligned_addr);
u8 *shadow_bottom = (u8*)MemToShadow(bottom);
@ -331,6 +346,29 @@ bool AsanThread::GetStackFrameAccessByAddr(uptr addr,
return true;
}
uptr AsanThread::GetStackVariableShadowStart(uptr addr) {
uptr bottom = 0;
if (AddrIsInStack(addr)) {
bottom = stack_bottom();
} else if (has_fake_stack()) {
bottom = fake_stack()->AddrIsInFakeStack(addr);
CHECK(bottom);
} else
return 0;
uptr aligned_addr = RoundDownTo(addr, SANITIZER_WORDSIZE / 8); // align addr.
u8 *shadow_ptr = (u8*)MemToShadow(aligned_addr);
u8 *shadow_bottom = (u8*)MemToShadow(bottom);
while (shadow_ptr >= shadow_bottom &&
(*shadow_ptr != kAsanStackLeftRedzoneMagic &&
*shadow_ptr != kAsanStackMidRedzoneMagic &&
*shadow_ptr != kAsanStackRightRedzoneMagic))
shadow_ptr--;
return (uptr)shadow_ptr + 1;
}
bool AsanThread::AddrIsInStack(uptr addr) {
const auto bounds = GetStackBounds();
return addr >= bounds.bottom && addr < bounds.top;

View File

@ -49,6 +49,11 @@ class AsanThreadContext : public ThreadContextBase {
void OnCreated(void *arg) override;
void OnFinished() override;
struct CreateThreadContextArgs {
AsanThread *thread;
StackTrace *stack;
};
};
// AsanThreadContext objects are never freed, so we need many of them.
@ -62,7 +67,9 @@ class AsanThread {
static void TSDDtor(void *tsd);
void Destroy();
void Init(); // Should be called from the thread itself.
struct InitOptions;
void Init(const InitOptions *options = nullptr);
thread_return_t ThreadStart(tid_t os_id,
atomic_uintptr_t *signal_thread_is_registered);
@ -83,6 +90,9 @@ class AsanThread {
};
bool GetStackFrameAccessByAddr(uptr addr, StackFrameAccess *access);
// Returns a pointer to the start of the stack variable's shadow memory.
uptr GetStackVariableShadowStart(uptr addr);
bool AddrIsInStack(uptr addr);
void DeleteFakeStack(int tid) {
@ -118,17 +128,15 @@ class AsanThread {
bool isUnwinding() const { return unwinding_; }
void setUnwinding(bool b) { unwinding_ = b; }
// True if we are in a deadly signal handler.
bool isInDeadlySignal() const { return in_deadly_signal_; }
void setInDeadlySignal(bool b) { in_deadly_signal_ = b; }
AsanThreadLocalMallocStorage &malloc_storage() { return malloc_storage_; }
AsanStats &stats() { return stats_; }
private:
// NOTE: There is no AsanThread constructor. It is allocated
// via mmap() and *must* be valid in zero-initialized state.
void SetThreadStackAndTls();
void SetThreadStackAndTls(const InitOptions *options);
void ClearShadowForThreadStackAndTLS();
FakeStack *AsyncSignalSafeLazyInitFakeStack();
@ -158,7 +166,6 @@ class AsanThread {
AsanThreadLocalMallocStorage malloc_storage_;
AsanStats stats_;
bool unwinding_;
bool in_deadly_signal_;
};
// ScopedUnwinding is a scope for stacktracing member of a context
@ -173,20 +180,6 @@ class ScopedUnwinding {
AsanThread *thread;
};
// ScopedDeadlySignal is a scope for handling deadly signals.
class ScopedDeadlySignal {
public:
explicit ScopedDeadlySignal(AsanThread *t) : thread(t) {
if (thread) thread->setInDeadlySignal(true);
}
~ScopedDeadlySignal() {
if (thread) thread->setInDeadlySignal(false);
}
private:
AsanThread *thread;
};
// Returns a single instance of registry.
ThreadRegistry &asanThreadRegistry();

View File

@ -57,8 +57,8 @@ long __asan_unhandled_exception_filter(EXCEPTION_POINTERS *info) {
// FIXME: Handle EXCEPTION_STACK_OVERFLOW here.
SignalContext sig = SignalContext::Create(exception_record, context);
ReportDeadlySignal(exception_record->ExceptionCode, sig);
SignalContext sig(exception_record, context);
ReportDeadlySignal(sig);
UNREACHABLE("returned from reporting deadly signal");
}

View File

@ -20,8 +20,11 @@ COMPILER_RT_ABI double __adddf3(double a, double b){
}
#if defined(__ARM_EABI__)
#if defined(COMPILER_RT_ARMHF_TARGET)
AEABI_RTABI double __aeabi_dadd(double a, double b) {
return __adddf3(a, b);
}
#else
AEABI_RTABI double __aeabi_dadd(double a, double b) COMPILER_RT_ALIAS(__adddf3);
#endif
#endif

View File

@ -20,8 +20,11 @@ COMPILER_RT_ABI float __addsf3(float a, float b) {
}
#if defined(__ARM_EABI__)
#if defined(COMPILER_RT_ARMHF_TARGET)
AEABI_RTABI float __aeabi_fadd(float a, float b) {
return __addsf3(a, b);
}
#else
AEABI_RTABI float __aeabi_fadd(float a, float b) COMPILER_RT_ALIAS(__addsf3);
#endif
#endif

View File

@ -30,7 +30,7 @@ DEFINE_COMPILERRT_FUNCTION(__aeabi_cdcmpeq)
push {r0-r3, lr}
bl __aeabi_cdcmpeq_check_nan
cmp r0, #1
#if __ARM_ARCH_ISA_THUMB == 1
#if defined(USE_THUMB_1)
beq 1f
// NaN has been ruled out, so __aeabi_cdcmple can't trap
mov r0, sp
@ -46,9 +46,12 @@ DEFINE_COMPILERRT_FUNCTION(__aeabi_cdcmpeq)
pop {r0-r3, lr}
// NaN has been ruled out, so __aeabi_cdcmple can't trap
// Use "it ne" + unconditional branch to guarantee a supported relocation if
// __aeabi_cdcmple is in a different section for some builds.
IT(ne)
bne __aeabi_cdcmple
#if defined(__ARM_ARCH_7M__) || defined(__ARM_ARCH_7EM__)
#if defined(USE_THUMB_2)
mov ip, #APSR_C
msr APSR_nzcvq, ip
#else
@ -78,7 +81,7 @@ DEFINE_COMPILERRT_FUNCTION(__aeabi_cdcmple)
bl __aeabi_dcmplt
cmp r0, #1
#if __ARM_ARCH_ISA_THUMB == 1
#if defined(USE_THUMB_1)
bne 1f
// Z = 0, C = 0
movs r0, #1

View File

@ -30,7 +30,7 @@ DEFINE_COMPILERRT_FUNCTION(__aeabi_cfcmpeq)
push {r0-r3, lr}
bl __aeabi_cfcmpeq_check_nan
cmp r0, #1
#if __ARM_ARCH_ISA_THUMB == 1
#if defined(USE_THUMB_1)
beq 1f
// NaN has been ruled out, so __aeabi_cfcmple can't trap
mov r0, sp
@ -46,9 +46,12 @@ DEFINE_COMPILERRT_FUNCTION(__aeabi_cfcmpeq)
pop {r0-r3, lr}
// NaN has been ruled out, so __aeabi_cfcmple can't trap
// Use "it ne" + unconditional branch to guarantee a supported relocation if
// __aeabi_cfcmple is in a different section for some builds.
IT(ne)
bne __aeabi_cfcmple
#if defined(__ARM_ARCH_7M__) || defined(__ARM_ARCH_7EM__)
#if defined(USE_THUMB_2)
mov ip, #APSR_C
msr APSR_nzcvq, ip
#else
@ -78,7 +81,7 @@ DEFINE_COMPILERRT_FUNCTION(__aeabi_cfcmple)
bl __aeabi_fcmplt
cmp r0, #1
#if __ARM_ARCH_ISA_THUMB == 1
#if defined(USE_THUMB_1)
bne 1f
// Z = 0, C = 0
movs r0, #1

View File

@ -19,18 +19,11 @@
#define __aeabi_idivmod __rt_sdiv
#endif
.syntax unified
.syntax unified
.text
#if defined(USE_THUMB_PROLOGUE)
.thumb
#endif
DEFINE_CODE_STATE
.p2align 2
#if defined(USE_THUMB_PROLOGUE)
DEFINE_COMPILERRT_THUMB_FUNCTION(__aeabi_idivmod)
#else
DEFINE_COMPILERRT_FUNCTION(__aeabi_idivmod)
#endif
#if defined(USE_THUMB_1)
push {r0, r1, lr}
bl SYMBOL_NAME(__divsi3)

View File

@ -14,7 +14,13 @@
.syntax unified
.p2align 2
DEFINE_COMPILERRT_FUNCTION(__aeabi_memcmp)
#ifdef USE_THUMB_1
push {r7, lr}
bl memcmp
pop {r7, pc}
#else
b memcmp
#endif
END_COMPILERRT_FUNCTION(__aeabi_memcmp)
DEFINE_AEABI_FUNCTION_ALIAS(__aeabi_memcmp4, __aeabi_memcmp)

View File

@ -14,7 +14,13 @@
.syntax unified
.p2align 2
DEFINE_COMPILERRT_FUNCTION(__aeabi_memcpy)
#ifdef USE_THUMB_1
push {r7, lr}
bl memcpy
pop {r7, pc}
#else
b memcpy
#endif
END_COMPILERRT_FUNCTION(__aeabi_memcpy)
DEFINE_AEABI_FUNCTION_ALIAS(__aeabi_memcpy4, __aeabi_memcpy)

View File

@ -13,7 +13,13 @@
.p2align 2
DEFINE_COMPILERRT_FUNCTION(__aeabi_memmove)
#ifdef USE_THUMB_1
push {r7, lr}
bl memmove
pop {r7, pc}
#else
b memmove
#endif
END_COMPILERRT_FUNCTION(__aeabi_memmove)
DEFINE_AEABI_FUNCTION_ALIAS(__aeabi_memmove4, __aeabi_memmove)

View File

@ -18,16 +18,29 @@ DEFINE_COMPILERRT_FUNCTION(__aeabi_memset)
mov r3, r1
mov r1, r2
mov r2, r3
#ifdef USE_THUMB_1
push {r7, lr}
bl memset
pop {r7, pc}
#else
b memset
#endif
END_COMPILERRT_FUNCTION(__aeabi_memset)
DEFINE_AEABI_FUNCTION_ALIAS(__aeabi_memset4, __aeabi_memset)
DEFINE_AEABI_FUNCTION_ALIAS(__aeabi_memset8, __aeabi_memset)
.p2align 2
DEFINE_COMPILERRT_FUNCTION(__aeabi_memclr)
mov r2, r1
movs r1, #0
#ifdef USE_THUMB_1
push {r7, lr}
bl memset
pop {r7, pc}
#else
b memset
#endif
END_COMPILERRT_FUNCTION(__aeabi_memclr)
DEFINE_AEABI_FUNCTION_ALIAS(__aeabi_memclr4, __aeabi_memclr)

View File

@ -22,16 +22,10 @@
.syntax unified
.text
#if defined(USE_THUMB_PROLOGUE)
.thumb
#endif
DEFINE_CODE_STATE
.p2align 2
#if defined(USE_THUMB_PROLOGUE)
DEFINE_COMPILERRT_THUMB_FUNCTION(__aeabi_uidivmod)
#else
DEFINE_COMPILERRT_FUNCTION(__aeabi_uidivmod)
#endif
#if __ARM_ARCH_ISA_THUMB == 1
#if defined(USE_THUMB_1)
cmp r0, r1
bcc LOCAL_LABEL(case_denom_larger)
push {r0, r1, lr}
@ -44,7 +38,7 @@ LOCAL_LABEL(case_denom_larger):
movs r1, r0
movs r0, #0
JMP (lr)
#else
#else // defined(USE_THUMB_1)
push { lr }
sub sp, sp, #4
mov r2, sp

View File

@ -11,9 +11,7 @@
.syntax unified
.text
#if defined(USE_THUMB_PROLOGUE)
.thumb
#endif
DEFINE_CODE_STATE
//
// extern uint64_t __bswapdi2(uint64_t);
@ -21,11 +19,7 @@
// Reverse all the bytes in a 64-bit integer.
//
.p2align 2
#if defined(USE_THUMB_PROLOGUE)
DEFINE_COMPILERRT_THUMB_FUNCTION(__bswapdi2)
#else
DEFINE_COMPILERRT_FUNCTION(__bswapdi2)
#endif
#if __ARM_ARCH < 6
// before armv6 does not have "rev" instruction
// r2 = rev(r0)

View File

@ -11,9 +11,7 @@
.syntax unified
.text
#if defined(USE_THUMB_PROLOGUE)
.thumb
#endif
DEFINE_CODE_STATE
//
// extern uint32_t __bswapsi2(uint32_t);
@ -21,11 +19,7 @@
// Reverse all the bytes in a 32-bit integer.
//
.p2align 2
#if defined(USE_THUMB_PROLOGUE)
DEFINE_COMPILERRT_THUMB_FUNCTION(__bswapsi2)
#else
DEFINE_COMPILERRT_FUNCTION(__bswapsi2)
#endif
#if __ARM_ARCH < 6
// before armv6 does not have "rev" instruction
eor r1, r0, r0, ror #16

View File

@ -15,17 +15,10 @@
.syntax unified
.text
#if defined(USE_THUMB_PROLOGUE)
.thumb
#endif
DEFINE_CODE_STATE
.p2align 2
#if defined(USE_THUMB_PROLOGUE)
DEFINE_COMPILERRT_THUMB_FUNCTION(__clzdi2)
#else
DEFINE_COMPILERRT_FUNCTION(__clzdi2)
#endif
#ifdef __ARM_FEATURE_CLZ
#ifdef __ARMEB__
cmp r0, 0

View File

@ -15,16 +15,10 @@
.syntax unified
.text
#if defined(USE_THUMB_PROLOGUE)
.thumb
#endif
DEFINE_CODE_STATE
.p2align 2
#if defined(USE_THUMB_PROLOGUE)
DEFINE_COMPILERRT_THUMB_FUNCTION(__clzsi2)
#else
DEFINE_COMPILERRT_FUNCTION(__clzsi2)
#endif
#ifdef __ARM_FEATURE_CLZ
clz r0, r0
JMP(lr)

View File

@ -40,25 +40,19 @@
#include "../assembly.h"
.syntax unified
.text
#if defined(USE_THUMB_PROLOGUE)
.thumb
#endif
DEFINE_CODE_STATE
@ int __eqsf2(float a, float b)
.p2align 2
#if defined(USE_THUMB_PROLOGUE)
DEFINE_COMPILERRT_THUMB_FUNCTION(__eqsf2)
#else
DEFINE_COMPILERRT_FUNCTION(__eqsf2)
#endif
#if defined(COMPILER_RT_ARMHF_TARGET)
vmov r0, s0
vmov r1, s1
#endif
// Make copies of a and b with the sign bit shifted off the top. These will
// be used to detect zeros and NaNs.
#if __ARM_ARCH_ISA_THUMB == 1
#if defined(USE_THUMB_1)
push {r6, lr}
lsls r2, r0, #1
lsls r3, r1, #1
@ -150,7 +144,7 @@ DEFINE_COMPILERRT_FUNCTION(__eqsf2)
// If a == b, then the Z flag is set, so we can get the correct final value
// into r0 by simply or'ing with 1 if Z is clear.
// For Thumb-1, r0 contains -1 if a < b, 0 if a > b and 0 if a == b.
#if defined(USE_THUMB_1)
#if !defined(USE_THUMB_1)
it ne
orrne r0, r0, #1
#endif
@ -185,11 +179,7 @@ DEFINE_COMPILERRT_FUNCTION_ALIAS(__nesf2, __eqsf2)
@ int __gtsf2(float a, float b)
.p2align 2
#if defined(USE_THUMB)
DEFINE_COMPILERRT_THUMB_FUNCTION(__gtsf2)
#else
DEFINE_COMPILERRT_FUNCTION(__gtsf2)
#endif
// Identical to the preceding except in that we return -1 for NaN values.
// Given that the two paths share so much code, one might be tempted to
// unify them; however, the extra code needed to do so makes the code size
@ -263,11 +253,7 @@ DEFINE_COMPILERRT_FUNCTION_ALIAS(__gesf2, __gtsf2)
@ int __unordsf2(float a, float b)
.p2align 2
#if defined(USE_THUMB)
DEFINE_COMPILERRT_THUMB_FUNCTION(__unordsf2)
#else
DEFINE_COMPILERRT_FUNCTION(__unordsf2)
#endif
#if defined(COMPILER_RT_ARMHF_TARGET)
vmov r0, s0

View File

@ -23,20 +23,14 @@
.syntax unified
.text
#if defined(USE_THUMB_PROLOGUE)
.thumb
#endif
DEFINE_CODE_STATE
@ int __divmodsi4(int divident, int divisor, int *remainder)
@ Calculate the quotient and remainder of the (signed) division. The return
@ value is the quotient, the remainder is placed in the variable.
.p2align 3
#if defined(USE_THUMB_PROLOGUE)
DEFINE_COMPILERRT_THUMB_FUNCTION(__divmodsi4)
#else
DEFINE_COMPILERRT_FUNCTION(__divmodsi4)
#endif
#if __ARM_ARCH_EXT_IDIV__
tst r1, r1
beq LOCAL_LABEL(divzero)

View File

@ -22,9 +22,7 @@
.syntax unified
.text
#if defined(USE_THUMB_PROLOGUE)
.thumb
#endif
DEFINE_CODE_STATE
.p2align 3
// Ok, APCS and AAPCS agree on 32 bit args, so it's safe to use the same routine.
@ -33,11 +31,7 @@ DEFINE_AEABI_FUNCTION_ALIAS(__aeabi_idiv, __divsi3)
@ int __divsi3(int divident, int divisor)
@ Calculate and return the quotient of the (signed) division.
#if defined(USE_THUMB_PROLOGUE)
DEFINE_COMPILERRT_THUMB_FUNCTION(__divsi3)
#else
DEFINE_COMPILERRT_FUNCTION(__divsi3)
#endif
#if __ARM_ARCH_EXT_IDIV__
tst r1,r1
beq LOCAL_LABEL(divzero)
@ -49,14 +43,14 @@ LOCAL_LABEL(divzero):
#else
ESTABLISH_FRAME
// Set aside the sign of the quotient.
# if __ARM_ARCH_ISA_THUMB == 1
# if defined(USE_THUMB_1)
movs r4, r0
eors r4, r1
# else
eor r4, r0, r1
# endif
// Take absolute value of a and b via abs(x) = (x^(x >> 31)) - (x >> 31).
# if __ARM_ARCH_ISA_THUMB == 1
# if defined(USE_THUMB_1)
asrs r2, r0, #31
asrs r3, r1, #31
eors r0, r2

View File

@ -22,19 +22,13 @@
.syntax unified
.text
#if defined(USE_THUMB_PROLOGUE)
.thumb
#endif
DEFINE_CODE_STATE
@ int __modsi3(int divident, int divisor)
@ Calculate and return the remainder of the (signed) division.
.p2align 3
#if defined(USE_THUMB_PROLOGUE)
DEFINE_COMPILERRT_THUMB_FUNCTION(__modsi3)
#else
DEFINE_COMPILERRT_FUNCTION(__modsi3)
#endif
#if __ARM_ARCH_EXT_IDIV__
tst r1, r1
beq LOCAL_LABEL(divzero)

View File

@ -16,9 +16,7 @@
.syntax unified
.text
#if defined(USE_THUMB_PROLOGUE)
.thumb
#endif
DEFINE_CODE_STATE
@ unsigned int __udivmodsi4(unsigned int divident, unsigned int divisor,
@ unsigned int *remainder)
@ -26,11 +24,7 @@
@ value is the quotient, the remainder is placed in the variable.
.p2align 2
#if defined(USE_THUMB_PROLOGUE)
DEFINE_COMPILERRT_THUMB_FUNCTION(__udivmodsi4)
#else
DEFINE_COMPILERRT_FUNCTION(__udivmodsi4)
#endif
#if __ARM_ARCH_EXT_IDIV__
tst r1, r1
beq LOCAL_LABEL(divby0)

View File

@ -16,9 +16,8 @@
.syntax unified
.text
#if defined(USE_THUMB_PROLOGUE)
.thumb
#endif
DEFINE_CODE_STATE
.p2align 2
DEFINE_AEABI_FUNCTION_ALIAS(__aeabi_uidiv, __udivsi3)
@ -26,11 +25,7 @@ DEFINE_AEABI_FUNCTION_ALIAS(__aeabi_uidiv, __udivsi3)
@ unsigned int __udivsi3(unsigned int divident, unsigned int divisor)
@ Calculate and return the quotient of the (unsigned) division.
#if defined(USE_THUMB_PROLOGUE)
DEFINE_COMPILERRT_THUMB_FUNCTION(__udivsi3)
#else
DEFINE_COMPILERRT_FUNCTION(__udivsi3)
#endif
#if __ARM_ARCH_EXT_IDIV__
tst r1, r1
beq LOCAL_LABEL(divby0)

View File

@ -16,19 +16,13 @@
.syntax unified
.text
#if defined(USE_THUMB_PROLOGUE)
.thumb
#endif
DEFINE_CODE_STATE
@ unsigned int __umodsi3(unsigned int divident, unsigned int divisor)
@ Calculate and return the remainder of the (unsigned) division.
.p2align 2
#if defined(USE_THUMB_PROLOGUE)
DEFINE_COMPILERRT_THUMB_FUNCTION(__umodsi3)
#else
DEFINE_COMPILERRT_FUNCTION(__umodsi3)
#endif
#if __ARM_ARCH_EXT_IDIV__
tst r1, r1
beq LOCAL_LABEL(divby0)

View File

@ -41,8 +41,5 @@ __ashldi3(di_int a, si_int b)
}
#if defined(__ARM_EABI__)
AEABI_RTABI di_int __aeabi_llsl(di_int a, si_int b) {
return __ashldi3(a, b);
}
AEABI_RTABI di_int __aeabi_llsl(di_int a, si_int b) COMPILER_RT_ALIAS(__ashldi3);
#endif

View File

@ -42,8 +42,5 @@ __ashrdi3(di_int a, si_int b)
}
#if defined(__ARM_EABI__)
AEABI_RTABI di_int __aeabi_lasr(di_int a, si_int b) {
return __ashrdi3(a, b);
}
AEABI_RTABI di_int __aeabi_lasr(di_int a, si_int b) COMPILER_RT_ALIAS(__ashrdi3);
#endif

View File

@ -68,10 +68,42 @@
#endif
#if defined(__arm__)
/*
* Determine actual [ARM][THUMB[1][2]] ISA using compiler predefined macros:
* - for '-mthumb -march=armv6' compiler defines '__thumb__'
* - for '-mthumb -march=armv7' compiler defines '__thumb__' and '__thumb2__'
*/
#if defined(__thumb2__) || defined(__thumb__)
#define DEFINE_CODE_STATE .thumb SEPARATOR
#define DECLARE_FUNC_ENCODING .thumb_func SEPARATOR
#if defined(__thumb2__)
#define USE_THUMB_2
#define IT(cond) it cond
#define ITT(cond) itt cond
#define ITE(cond) ite cond
#else
#define USE_THUMB_1
#define IT(cond)
#define ITT(cond)
#define ITE(cond)
#endif // defined(__thumb__2)
#else // !defined(__thumb2__) && !defined(__thumb__)
#define DEFINE_CODE_STATE .arm SEPARATOR
#define DECLARE_FUNC_ENCODING
#define IT(cond)
#define ITT(cond)
#define ITE(cond)
#endif
#if defined(USE_THUMB_1) && defined(USE_THUMB_2)
#error "USE_THUMB_1 and USE_THUMB_2 can't be defined together."
#endif
#if defined(__ARM_ARCH_4T__) || __ARM_ARCH >= 5
#define ARM_HAS_BX
#endif
#if !defined(__ARM_FEATURE_CLZ) && __ARM_ARCH_ISA_THUMB != 1 && \
#if !defined(__ARM_FEATURE_CLZ) && !defined(USE_THUMB_1) && \
(__ARM_ARCH >= 6 || (__ARM_ARCH == 5 && !defined(__ARM_ARCH_5__)))
#define __ARM_FEATURE_CLZ
#endif
@ -93,37 +125,15 @@
JMP(ip)
#endif
/*
* Determine actual [ARM][THUMB[1][2]] ISA using compiler predefined macros:
* - for '-mthumb -march=armv6' compiler defines '__thumb__'
* - for '-mthumb -march=armv7' compiler defines '__thumb__' and '__thumb2__'
*/
#if defined(__thumb2__)
#define USE_THUMB_2 1
#elif defined(__thumb__)
#define USE_THUMB_1 1
#endif
#if defined(USE_THUMB_1) && defined(USE_THUMB_2)
#error "USE_THUMB_1 and USE_THUMB_2 can't be defined together."
#endif
#if defined(USE_THUMB_1) || defined(USE_THUMB_1)
#define USE_THUMB_PROLOGUE 1
#endif
#if defined(USE_THUMB_2)
#define IT(cond) it cond
#define ITT(cond) itt cond
#define ITE(cond) ite cond
#define WIDE(op) op.w
#else
#define IT(cond)
#define ITT(cond)
#define ITE(cond)
#define WIDE(op) op
#endif
#endif /* defined(__arm__) */
#else // !defined(__arm)
#define DECLARE_FUNC_ENCODING
#define DEFINE_CODE_STATE
#endif
#define GLUE2(a, b) a##b
#define GLUE(a, b) GLUE2(a, b)
@ -137,13 +147,16 @@
#endif
#define DEFINE_COMPILERRT_FUNCTION(name) \
DEFINE_CODE_STATE \
FILE_LEVEL_DIRECTIVE SEPARATOR \
.globl SYMBOL_NAME(name) SEPARATOR \
SYMBOL_IS_FUNC(SYMBOL_NAME(name)) SEPARATOR \
DECLARE_SYMBOL_VISIBILITY(name) \
DECLARE_FUNC_ENCODING \
SYMBOL_NAME(name):
#define DEFINE_COMPILERRT_THUMB_FUNCTION(name) \
DEFINE_CODE_STATE \
FILE_LEVEL_DIRECTIVE SEPARATOR \
.globl SYMBOL_NAME(name) SEPARATOR \
SYMBOL_IS_FUNC(SYMBOL_NAME(name)) SEPARATOR \
@ -152,16 +165,20 @@
SYMBOL_NAME(name):
#define DEFINE_COMPILERRT_PRIVATE_FUNCTION(name) \
DEFINE_CODE_STATE \
FILE_LEVEL_DIRECTIVE SEPARATOR \
.globl SYMBOL_NAME(name) SEPARATOR \
SYMBOL_IS_FUNC(SYMBOL_NAME(name)) SEPARATOR \
HIDDEN(SYMBOL_NAME(name)) SEPARATOR \
DECLARE_FUNC_ENCODING \
SYMBOL_NAME(name):
#define DEFINE_COMPILERRT_PRIVATE_FUNCTION_UNMANGLED(name) \
DEFINE_CODE_STATE \
.globl name SEPARATOR \
SYMBOL_IS_FUNC(name) SEPARATOR \
HIDDEN(name) SEPARATOR \
DECLARE_FUNC_ENCODING \
name:
#define DEFINE_COMPILERRT_FUNCTION_ALIAS(name, target) \

View File

@ -9,6 +9,7 @@
*/
#include "int_lib.h"
#include <assert.h>
#include <stddef.h>
#if __APPLE__
@ -23,7 +24,7 @@ uint32_t FlushInstructionCache(uintptr_t hProcess, void *lpBaseAddress,
uintptr_t GetCurrentProcess(void);
#endif
#if (defined(__FreeBSD__) || defined(__Bitrig__)) && defined(__arm__)
#if defined(__FreeBSD__) && defined(__arm__)
#include <sys/types.h>
#include <machine/sysarch.h>
#endif
@ -32,7 +33,7 @@ uintptr_t GetCurrentProcess(void);
#include <machine/sysarch.h>
#endif
#if defined(__mips__) && !defined(__FreeBSD__)
#if defined(__linux__) && defined(__mips__)
#include <sys/cachectl.h>
#include <sys/syscall.h>
#include <unistd.h>
@ -41,7 +42,7 @@ uintptr_t GetCurrentProcess(void);
* clear_mips_cache - Invalidates instruction cache for Mips.
*/
static void clear_mips_cache(const void* Addr, size_t Size) {
asm volatile (
__asm__ volatile (
".set push\n"
".set noreorder\n"
".set noat\n"
@ -96,7 +97,7 @@ void __clear_cache(void *start, void *end) {
* so there is nothing to do
*/
#elif defined(__arm__) && !defined(__APPLE__)
#if defined(__FreeBSD__) || defined(__NetBSD__) || defined(__Bitrig__)
#if defined(__FreeBSD__) || defined(__NetBSD__)
struct arm_sync_icache_args arg;
arg.addr = (uintptr_t)start;
@ -121,15 +122,13 @@ void __clear_cache(void *start, void *end) {
: "=r"(start_reg)
: "r"(syscall_nr), "r"(start_reg), "r"(end_reg),
"r"(flags));
if (start_reg != 0) {
compilerrt_abort();
}
assert(start_reg == 0 && "Cache flush syscall failed.");
#elif defined(_WIN32)
FlushInstructionCache(GetCurrentProcess(), start, end - start);
#else
compilerrt_abort();
#endif
#elif defined(__mips__) && !defined(__FreeBSD__)
#elif defined(__linux__) && defined(__mips__)
const uintptr_t start_int = (uintptr_t) start;
const uintptr_t end_int = (uintptr_t) end;
#if defined(__ANDROID__) && defined(__LP64__)
@ -165,6 +164,21 @@ void __clear_cache(void *start, void *end) {
for (addr = xstart; addr < xend; addr += icache_line_size)
__asm __volatile("ic ivau, %0" :: "r"(addr));
__asm __volatile("isb sy");
#elif defined (__powerpc64__)
const size_t line_size = 32;
const size_t len = (uintptr_t)end - (uintptr_t)start;
const uintptr_t mask = ~(line_size - 1);
const uintptr_t start_line = ((uintptr_t)start) & mask;
const uintptr_t end_line = ((uintptr_t)start + len + line_size - 1) & mask;
for (uintptr_t line = start_line; line < end_line; line += line_size)
__asm__ volatile("dcbf 0, %0" : : "r"(line));
__asm__ volatile("sync");
for (uintptr_t line = start_line; line < end_line; line += line_size)
__asm__ volatile("icbi 0, %0" : : "r"(line));
__asm__ volatile("isync");
#else
#if __APPLE__
/* On Darwin, sys_icache_invalidate() provides this functionality */
@ -174,4 +188,3 @@ void __clear_cache(void *start, void *end) {
#endif
#endif
}

View File

@ -143,8 +143,11 @@ __gtdf2(fp_t a, fp_t b) {
}
#if defined(__ARM_EABI__)
#if defined(COMPILER_RT_ARMHF_TARGET)
AEABI_RTABI int __aeabi_dcmpun(fp_t a, fp_t b) {
return __unorddf2(a, b);
}
#else
AEABI_RTABI int __aeabi_dcmpun(fp_t a, fp_t b) COMPILER_RT_ALIAS(__unorddf2);
#endif
#endif

View File

@ -143,8 +143,11 @@ __gtsf2(fp_t a, fp_t b) {
}
#if defined(__ARM_EABI__)
#if defined(COMPILER_RT_ARMHF_TARGET)
AEABI_RTABI int __aeabi_fcmpun(fp_t a, fp_t b) {
return __unordsf2(a, b);
}
#else
AEABI_RTABI int __aeabi_fcmpun(fp_t a, fp_t b) COMPILER_RT_ALIAS(__unordsf2);
#endif
#endif

View File

@ -54,6 +54,7 @@ enum ProcessorTypes {
AMD_BTVER1,
AMD_BTVER2,
AMDFAM17H,
INTEL_KNM,
CPU_TYPE_MAX
};
@ -74,6 +75,7 @@ enum ProcessorSubtypes {
INTEL_COREI7_BROADWELL,
INTEL_COREI7_SKYLAKE,
INTEL_COREI7_SKYLAKE_AVX512,
INTEL_COREI7_CANNONLAKE,
CPU_SUBTYPE_MAX
};
@ -339,6 +341,12 @@ getIntelProcessorTypeAndSubtype(unsigned Family, unsigned Model,
*Subtype = INTEL_COREI7_SKYLAKE_AVX512; // "skylake-avx512"
break;
// Cannonlake:
case 0x66:
*Type = INTEL_COREI7;
*Subtype = INTEL_COREI7_CANNONLAKE; // "cannonlake"
break;
case 0x1c: // Most 45 nm Intel Atom processors
case 0x26: // 45 nm Atom Lincroft
case 0x27: // 32 nm Atom Medfield
@ -361,6 +369,10 @@ getIntelProcessorTypeAndSubtype(unsigned Family, unsigned Model,
*Type = INTEL_KNL; // knl
break;
case 0x85:
*Type = INTEL_KNM; // knm
break;
default: // Unknown family 6 CPU.
break;
break;

View File

@ -183,8 +183,11 @@ __divdf3(fp_t a, fp_t b) {
}
#if defined(__ARM_EABI__)
#if defined(COMPILER_RT_ARMHF_TARGET)
AEABI_RTABI fp_t __aeabi_ddiv(fp_t a, fp_t b) {
return __divdf3(a, b);
}
#else
AEABI_RTABI fp_t __aeabi_ddiv(fp_t a, fp_t b) COMPILER_RT_ALIAS(__divdf3);
#endif
#endif

View File

@ -167,8 +167,11 @@ __divsf3(fp_t a, fp_t b) {
}
#if defined(__ARM_EABI__)
#if defined(COMPILER_RT_ARMHF_TARGET)
AEABI_RTABI fp_t __aeabi_fdiv(fp_t a, fp_t b) {
return __divsf3(a, b);
}
#else
AEABI_RTABI fp_t __aeabi_fdiv(fp_t a, fp_t b) COMPILER_RT_ALIAS(__divsf3);
#endif
#endif

View File

@ -35,8 +35,5 @@ __divsi3(si_int a, si_int b)
}
#if defined(__ARM_EABI__)
AEABI_RTABI si_int __aeabi_idiv(si_int a, si_int b) {
return __divsi3(a, b);
}
AEABI_RTABI si_int __aeabi_idiv(si_int a, si_int b) COMPILER_RT_ALIAS(__divsi3);
#endif

View File

@ -102,7 +102,6 @@ static __inline emutls_address_array* emutls_getspecific() {
#include <malloc.h>
#include <stdio.h>
#include <assert.h>
#include <immintrin.h>
static LPCRITICAL_SECTION emutls_mutex;
static DWORD emutls_tls_index = TLS_OUT_OF_INDEXES;
@ -203,25 +202,24 @@ static __inline emutls_address_array* emutls_getspecific() {
/* Provide atomic load/store functions for emutls_get_index if built with MSVC.
*/
#if !defined(__ATOMIC_RELEASE)
#include <intrin.h>
enum { __ATOMIC_ACQUIRE = 2, __ATOMIC_RELEASE = 3 };
static __inline uintptr_t __atomic_load_n(void *ptr, unsigned type) {
assert(type == __ATOMIC_ACQUIRE);
// These return the previous value - but since we do an OR with 0,
// it's equivalent to a plain load.
#ifdef _WIN64
return (uintptr_t) _load_be_u64(ptr);
return InterlockedOr64(ptr, 0);
#else
return (uintptr_t) _load_be_u32(ptr);
return InterlockedOr(ptr, 0);
#endif
}
static __inline void __atomic_store_n(void *ptr, uintptr_t val, unsigned type) {
assert(type == __ATOMIC_RELEASE);
#ifdef _WIN64
_store_be_u64(ptr, val);
#else
_store_be_u32(ptr, val);
#endif
InterlockedExchangePointer((void *volatile *)ptr, (void *)val);
}
#endif

View File

@ -22,7 +22,7 @@
#ifdef _WIN32
#define WIN32_LEAN_AND_MEAN
#include <Windows.h>
#include <windows.h>
#else
#ifndef __APPLE__
#include <unistd.h>

View File

@ -23,8 +23,11 @@ COMPILER_RT_ABI float __gnu_h2f_ieee(uint16_t a) {
}
#if defined(__ARM_EABI__)
#if defined(COMPILER_RT_ARMHF_TARGET)
AEABI_RTABI float __aeabi_h2f(uint16_t a) {
return __extendhfsf2(a);
}
#else
AEABI_RTABI float __aeabi_h2f(uint16_t a) COMPILER_RT_ALIAS(__extendhfsf2);
#endif
#endif

View File

@ -17,8 +17,11 @@ COMPILER_RT_ABI double __extendsfdf2(float a) {
}
#if defined(__ARM_EABI__)
#if defined(COMPILER_RT_ARMHF_TARGET)
AEABI_RTABI double __aeabi_f2d(float a) {
return __extendsfdf2(a);
}
#else
AEABI_RTABI double __aeabi_f2d(float a) COMPILER_RT_ALIAS(__extendsfdf2);
#endif
#endif

View File

@ -45,13 +45,11 @@ __fixdfdi(fp_t a) {
#endif
#if defined(__ARM_EABI__)
AEABI_RTABI di_int
#if defined(__SOFT_FP__)
__aeabi_d2lz(fp_t a) {
#else
__aeabi_d2lz(double a) {
#endif
#if defined(COMPILER_RT_ARMHF_TARGET)
AEABI_RTABI di_int __aeabi_d2lz(fp_t a) {
return __fixdfdi(a);
}
#else
AEABI_RTABI di_int __aeabi_d2lz(fp_t a) COMPILER_RT_ALIAS(__fixdfdi);
#endif
#endif

View File

@ -20,8 +20,11 @@ __fixdfsi(fp_t a) {
}
#if defined(__ARM_EABI__)
#if defined(COMPILER_RT_ARMHF_TARGET)
AEABI_RTABI si_int __aeabi_d2iz(fp_t a) {
return __fixdfsi(a);
}
#else
AEABI_RTABI si_int __aeabi_d2iz(fp_t a) COMPILER_RT_ALIAS(__fixdfsi);
#endif
#endif

View File

@ -45,13 +45,11 @@ __fixsfdi(fp_t a) {
#endif
#if defined(__ARM_EABI__)
AEABI_RTABI di_int
#if defined(__SOFT_FP__)
__aeabi_f2lz(fp_t a) {
#else
__aeabi_f2lz(float a) {
#endif
#if defined(COMPILER_RT_ARMHF_TARGET)
AEABI_RTABI di_int __aeabi_f2lz(fp_t a) {
return __fixsfdi(a);
}
#else
AEABI_RTABI di_int __aeabi_f2lz(fp_t a) COMPILER_RT_ALIAS(__fixsfdi);
#endif
#endif

View File

@ -20,8 +20,11 @@ __fixsfsi(fp_t a) {
}
#if defined(__ARM_EABI__)
#if defined(COMPILER_RT_ARMHF_TARGET)
AEABI_RTABI si_int __aeabi_f2iz(fp_t a) {
return __fixsfsi(a);
}
#else
AEABI_RTABI si_int __aeabi_f2iz(fp_t a) COMPILER_RT_ALIAS(__fixsfsi);
#endif
#endif

View File

@ -42,13 +42,11 @@ __fixunsdfdi(fp_t a) {
#endif
#if defined(__ARM_EABI__)
AEABI_RTABI du_int
#if defined(__SOFT_FP__)
__aeabi_d2ulz(fp_t a) {
#else
__aeabi_d2ulz(double a) {
#endif
#if defined(COMPILER_RT_ARMHF_TARGET)
AEABI_RTABI du_int __aeabi_d2ulz(fp_t a) {
return __fixunsdfdi(a);
}
#else
AEABI_RTABI du_int __aeabi_d2ulz(fp_t a) COMPILER_RT_ALIAS(__fixunsdfdi);
#endif
#endif

View File

@ -19,8 +19,11 @@ __fixunsdfsi(fp_t a) {
}
#if defined(__ARM_EABI__)
#if defined(COMPILER_RT_ARMHF_TARGET)
AEABI_RTABI su_int __aeabi_d2uiz(fp_t a) {
return __fixunsdfsi(a);
}
#else
AEABI_RTABI su_int __aeabi_d2uiz(fp_t a) COMPILER_RT_ALIAS(__fixunsdfsi);
#endif
#endif

View File

@ -43,13 +43,11 @@ __fixunssfdi(fp_t a) {
#endif
#if defined(__ARM_EABI__)
AEABI_RTABI du_int
#if defined(__SOFT_FP__)
__aeabi_f2ulz(fp_t a) {
#else
__aeabi_f2ulz(float a) {
#endif
#if defined(COMPILER_RT_ARMHF_TARGET)
AEABI_RTABI du_int __aeabi_f2ulz(fp_t a) {
return __fixunssfdi(a);
}
#else
AEABI_RTABI du_int __aeabi_f2ulz(fp_t a) COMPILER_RT_ALIAS(__fixunssfdi);
#endif
#endif

View File

@ -23,8 +23,11 @@ __fixunssfsi(fp_t a) {
}
#if defined(__ARM_EABI__)
#if defined(COMPILER_RT_ARMHF_TARGET)
AEABI_RTABI su_int __aeabi_f2uiz(fp_t a) {
return __fixunssfsi(a);
}
#else
AEABI_RTABI su_int __aeabi_f2uiz(fp_t a) COMPILER_RT_ALIAS(__fixunssfsi);
#endif
#endif

View File

@ -105,8 +105,11 @@ __floatdidf(di_int a)
#endif
#if defined(__ARM_EABI__)
#if defined(COMPILER_RT_ARMHF_TARGET)
AEABI_RTABI double __aeabi_l2d(di_int a) {
return __floatdidf(a);
}
#else
AEABI_RTABI double __aeabi_l2d(di_int a) COMPILER_RT_ALIAS(__floatdidf);
#endif
#endif

View File

@ -78,8 +78,11 @@ __floatdisf(di_int a)
}
#if defined(__ARM_EABI__)
#if defined(COMPILER_RT_ARMHF_TARGET)
AEABI_RTABI float __aeabi_l2f(di_int a) {
return __floatdisf(a);
}
#else
AEABI_RTABI float __aeabi_l2f(di_int a) COMPILER_RT_ALIAS(__floatdisf);
#endif
#endif

View File

@ -51,8 +51,11 @@ __floatsidf(int a) {
}
#if defined(__ARM_EABI__)
#if defined(COMPILER_RT_ARMHF_TARGET)
AEABI_RTABI fp_t __aeabi_i2d(int a) {
return __floatsidf(a);
}
#else
AEABI_RTABI fp_t __aeabi_i2d(int a) COMPILER_RT_ALIAS(__floatsidf);
#endif
#endif

View File

@ -57,8 +57,11 @@ __floatsisf(int a) {
}
#if defined(__ARM_EABI__)
#if defined(COMPILER_RT_ARMHF_TARGET)
AEABI_RTABI fp_t __aeabi_i2f(int a) {
return __floatsisf(a);
}
#else
AEABI_RTABI fp_t __aeabi_i2f(int a) COMPILER_RT_ALIAS(__floatsisf);
#endif
#endif

View File

@ -104,8 +104,11 @@ __floatundidf(du_int a)
#endif
#if defined(__ARM_EABI__)
#if defined(COMPILER_RT_ARMHF_TARGET)
AEABI_RTABI double __aeabi_ul2d(du_int a) {
return __floatundidf(a);
}
#else
AEABI_RTABI double __aeabi_ul2d(du_int a) COMPILER_RT_ALIAS(__floatundidf);
#endif
#endif

View File

@ -75,8 +75,11 @@ __floatundisf(du_int a)
}
#if defined(__ARM_EABI__)
#if defined(COMPILER_RT_ARMHF_TARGET)
AEABI_RTABI float __aeabi_ul2f(du_int a) {
return __floatundisf(a);
}
#else
AEABI_RTABI float __aeabi_ul2f(du_int a) COMPILER_RT_ALIAS(__floatundisf);
#endif
#endif

View File

@ -40,8 +40,11 @@ __floatunsidf(unsigned int a) {
}
#if defined(__ARM_EABI__)
#if defined(COMPILER_RT_ARMHF_TARGET)
AEABI_RTABI fp_t __aeabi_ui2d(unsigned int a) {
return __floatunsidf(a);
}
#else
AEABI_RTABI fp_t __aeabi_ui2d(unsigned int a) COMPILER_RT_ALIAS(__floatunsidf);
#endif
#endif

View File

@ -48,8 +48,11 @@ __floatunsisf(unsigned int a) {
}
#if defined(__ARM_EABI__)
#if defined(COMPILER_RT_ARMHF_TARGET)
AEABI_RTABI fp_t __aeabi_ui2f(unsigned int a) {
return __floatunsisf(a);
}
#else
AEABI_RTABI fp_t __aeabi_ui2f(unsigned int a) COMPILER_RT_ALIAS(__floatunsisf);
#endif
#endif

Some files were not shown because too many files have changed in this diff Show More