Vendor import of compiler-rt trunk r308421:

https://llvm.org/svn/llvm-project/compiler-rt/trunk@308421
This commit is contained in:
Dimitry Andric 2017-07-19 07:02:40 +00:00
parent 1992b790c2
commit 462d72ec21
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/vendor/compiler-rt/dist/; revision=321188
svn path=/vendor/compiler-rt/compiler-rt-release_50-r311219/; revision=322731; tag=vendor/compiler-rt/compiler-rt-release_50-r311219
41 changed files with 1066 additions and 499 deletions

View File

@ -172,10 +172,16 @@ endif()
append_list_if(COMPILER_RT_DEBUG -DSANITIZER_DEBUG=1 SANITIZER_COMMON_CFLAGS)
# Build with optimization, unless we're in debug mode. If we're using MSVC,
# If we're using MSVC,
# always respect the optimization flags set by CMAKE_BUILD_TYPE instead.
if(NOT COMPILER_RT_DEBUG AND NOT MSVC)
list(APPEND SANITIZER_COMMON_CFLAGS -O3)
if (NOT MSVC)
# Build with optimization, unless we're in debug mode.
if(COMPILER_RT_DEBUG)
list(APPEND SANITIZER_COMMON_CFLAGS -O0)
else()
list(APPEND SANITIZER_COMMON_CFLAGS -O3)
endif()
endif()
# Determine if we should restrict stack frame sizes.

View File

@ -21,7 +21,9 @@
#include "asan_report.h"
#include "asan_stack.h"
#include "asan_thread.h"
#include "sanitizer_common/sanitizer_allocator_checks.h"
#include "sanitizer_common/sanitizer_allocator_interface.h"
#include "sanitizer_common/sanitizer_errno.h"
#include "sanitizer_common/sanitizer_flags.h"
#include "sanitizer_common/sanitizer_internal_defs.h"
#include "sanitizer_common/sanitizer_list.h"
@ -799,11 +801,6 @@ void PrintInternalAllocatorStats() {
instance.PrintStats();
}
void *asan_memalign(uptr alignment, uptr size, BufferedStackTrace *stack,
AllocType alloc_type) {
return instance.Allocate(size, alignment, stack, alloc_type, true);
}
void asan_free(void *ptr, BufferedStackTrace *stack, AllocType alloc_type) {
instance.Deallocate(ptr, 0, stack, alloc_type);
}
@ -814,16 +811,16 @@ void asan_sized_free(void *ptr, uptr size, BufferedStackTrace *stack,
}
void *asan_malloc(uptr size, BufferedStackTrace *stack) {
return instance.Allocate(size, 8, stack, FROM_MALLOC, true);
return SetErrnoOnNull(instance.Allocate(size, 8, stack, FROM_MALLOC, true));
}
void *asan_calloc(uptr nmemb, uptr size, BufferedStackTrace *stack) {
return instance.Calloc(nmemb, size, stack);
return SetErrnoOnNull(instance.Calloc(nmemb, size, stack));
}
void *asan_realloc(void *p, uptr size, BufferedStackTrace *stack) {
if (!p)
return instance.Allocate(size, 8, stack, FROM_MALLOC, true);
return SetErrnoOnNull(instance.Allocate(size, 8, stack, FROM_MALLOC, true));
if (size == 0) {
if (flags()->allocator_frees_and_returns_null_on_realloc_zero) {
instance.Deallocate(p, 0, stack, FROM_MALLOC);
@ -832,26 +829,41 @@ void *asan_realloc(void *p, uptr size, BufferedStackTrace *stack) {
// Allocate a size of 1 if we shouldn't free() on Realloc to 0
size = 1;
}
return instance.Reallocate(p, size, stack);
return SetErrnoOnNull(instance.Reallocate(p, size, stack));
}
void *asan_valloc(uptr size, BufferedStackTrace *stack) {
return instance.Allocate(size, GetPageSizeCached(), stack, FROM_MALLOC, true);
return SetErrnoOnNull(
instance.Allocate(size, GetPageSizeCached(), stack, FROM_MALLOC, true));
}
void *asan_pvalloc(uptr size, BufferedStackTrace *stack) {
uptr PageSize = GetPageSizeCached();
size = RoundUpTo(size, PageSize);
if (size == 0) {
// pvalloc(0) should allocate one page.
size = PageSize;
// pvalloc(0) should allocate one page.
size = size ? RoundUpTo(size, PageSize) : PageSize;
return SetErrnoOnNull(
instance.Allocate(size, PageSize, stack, FROM_MALLOC, true));
}
void *asan_memalign(uptr alignment, uptr size, BufferedStackTrace *stack,
AllocType alloc_type) {
if (UNLIKELY(!IsPowerOfTwo(alignment))) {
errno = errno_EINVAL;
return AsanAllocator::FailureHandler::OnBadRequest();
}
return instance.Allocate(size, PageSize, stack, FROM_MALLOC, true);
return SetErrnoOnNull(
instance.Allocate(size, alignment, stack, alloc_type, true));
}
int asan_posix_memalign(void **memptr, uptr alignment, uptr size,
BufferedStackTrace *stack) {
if (UNLIKELY(!CheckPosixMemalignAlignment(alignment))) {
AsanAllocator::FailureHandler::OnBadRequest();
return errno_EINVAL;
}
void *ptr = instance.Allocate(size, alignment, stack, FROM_MALLOC, true);
if (UNLIKELY(!ptr))
return errno_ENOMEM;
CHECK(IsAligned((uptr)ptr, alignment));
*memptr = ptr;
return 0;

View File

@ -178,6 +178,10 @@ void SetThreadName(const char *name) {
}
int OnExit() {
if (CAN_SANITIZE_LEAKS && common_flags()->detect_leaks &&
__lsan::HasReportedLeaks()) {
return common_flags()->exitcode;
}
// FIXME: ask frontend whether we need to return failure.
return 0;
}

View File

@ -12,6 +12,8 @@
//===----------------------------------------------------------------------===//
#include "asan_test_utils.h"
#include <errno.h>
NOINLINE void *malloc_fff(size_t size) {
void *res = malloc/**/(size); break_optimization(0); return res;}
NOINLINE void *malloc_eee(size_t size) {
@ -74,9 +76,11 @@ TEST(AddressSanitizer, VariousMallocsTest) {
delete c;
#if SANITIZER_TEST_HAS_POSIX_MEMALIGN
int *pm;
int pm_res = posix_memalign((void**)&pm, kPageSize, kPageSize);
void *pm = 0;
// Valid allocation.
int pm_res = posix_memalign(&pm, kPageSize, kPageSize);
EXPECT_EQ(0, pm_res);
EXPECT_NE(nullptr, pm);
free(pm);
#endif // SANITIZER_TEST_HAS_POSIX_MEMALIGN

View File

@ -190,8 +190,8 @@ static bool getX86CpuIDAndInfo(unsigned value, unsigned *rEAX, unsigned *rEBX,
static bool getX86CpuIDAndInfoEx(unsigned value, unsigned subleaf,
unsigned *rEAX, unsigned *rEBX, unsigned *rECX,
unsigned *rEDX) {
#if defined(__x86_64__) || defined(_M_X64)
#if defined(__GNUC__) || defined(__clang__)
#if defined(__x86_64__)
// gcc doesn't know cpuid would clobber ebx/rbx. Preserve it manually.
// FIXME: should we save this for Clang?
__asm__("movq\t%%rbx, %%rsi\n\t"
@ -200,6 +200,16 @@ static bool getX86CpuIDAndInfoEx(unsigned value, unsigned subleaf,
: "=a"(*rEAX), "=S"(*rEBX), "=c"(*rECX), "=d"(*rEDX)
: "a"(value), "c"(subleaf));
return false;
#elif defined(__i386__)
__asm__("movl\t%%ebx, %%esi\n\t"
"cpuid\n\t"
"xchgl\t%%ebx, %%esi\n\t"
: "=a"(*rEAX), "=S"(*rEBX), "=c"(*rECX), "=d"(*rEDX)
: "a"(value), "c"(subleaf));
return false;
#else
return true;
#endif
#elif defined(_MSC_VER)
int registers[4];
__cpuidex(registers, value, subleaf);
@ -211,35 +221,6 @@ static bool getX86CpuIDAndInfoEx(unsigned value, unsigned subleaf,
#else
return true;
#endif
#elif defined(__i386__) || defined(_M_IX86)
#if defined(__GNUC__) || defined(__clang__)
__asm__("movl\t%%ebx, %%esi\n\t"
"cpuid\n\t"
"xchgl\t%%ebx, %%esi\n\t"
: "=a"(*rEAX), "=S"(*rEBX), "=c"(*rECX), "=d"(*rEDX)
: "a"(value), "c"(subleaf));
return false;
#elif defined(_MSC_VER)
__asm {
mov eax,value
mov ecx,subleaf
cpuid
mov esi,rEAX
mov dword ptr [esi],eax
mov esi,rEBX
mov dword ptr [esi],ebx
mov esi,rECX
mov dword ptr [esi],ecx
mov esi,rEDX
mov dword ptr [esi],edx
}
return false;
#else
return true;
#endif
#else
return true;
#endif
}
// Read control register 0 (XCR0). Used to detect features such as AVX.

View File

@ -15,7 +15,9 @@
#include "lsan_allocator.h"
#include "sanitizer_common/sanitizer_allocator.h"
#include "sanitizer_common/sanitizer_allocator_checks.h"
#include "sanitizer_common/sanitizer_allocator_interface.h"
#include "sanitizer_common/sanitizer_errno.h"
#include "sanitizer_common/sanitizer_internal_defs.h"
#include "sanitizer_common/sanitizer_stackdepot.h"
#include "sanitizer_common/sanitizer_stacktrace.h"
@ -86,6 +88,13 @@ void *Allocate(const StackTrace &stack, uptr size, uptr alignment,
return p;
}
static void *Calloc(uptr nmemb, uptr size, const StackTrace &stack) {
if (UNLIKELY(CheckForCallocOverflow(size, nmemb)))
return Allocator::FailureHandler::OnBadRequest();
size *= nmemb;
return Allocate(stack, size, 1, true);
}
void Deallocate(void *p) {
if (&__sanitizer_free_hook) __sanitizer_free_hook(p);
RunFreeHooks(p);
@ -118,11 +127,15 @@ uptr GetMallocUsableSize(const void *p) {
}
void *lsan_memalign(uptr alignment, uptr size, const StackTrace &stack) {
return Allocate(stack, size, alignment, kAlwaysClearMemory);
if (UNLIKELY(!IsPowerOfTwo(alignment))) {
errno = errno_EINVAL;
return Allocator::FailureHandler::OnBadRequest();
}
return SetErrnoOnNull(Allocate(stack, size, alignment, kAlwaysClearMemory));
}
void *lsan_malloc(uptr size, const StackTrace &stack) {
return Allocate(stack, size, 1, kAlwaysClearMemory);
return SetErrnoOnNull(Allocate(stack, size, 1, kAlwaysClearMemory));
}
void lsan_free(void *p) {
@ -130,20 +143,16 @@ void lsan_free(void *p) {
}
void *lsan_realloc(void *p, uptr size, const StackTrace &stack) {
return Reallocate(stack, p, size, 1);
return SetErrnoOnNull(Reallocate(stack, p, size, 1));
}
void *lsan_calloc(uptr nmemb, uptr size, const StackTrace &stack) {
if (CheckForCallocOverflow(size, nmemb))
return Allocator::FailureHandler::OnBadRequest();
size *= nmemb;
return Allocate(stack, size, 1, true);
return SetErrnoOnNull(Calloc(nmemb, size, stack));
}
void *lsan_valloc(uptr size, const StackTrace &stack) {
if (size == 0)
size = GetPageSizeCached();
return Allocate(stack, size, GetPageSizeCached(), kAlwaysClearMemory);
return SetErrnoOnNull(
Allocate(stack, size, GetPageSizeCached(), kAlwaysClearMemory));
}
uptr lsan_mz_size(const void *p) {

View File

@ -576,18 +576,16 @@ static bool CheckForLeaks() {
return false;
}
static bool has_reported_leaks = false;
bool HasReportedLeaks() { return has_reported_leaks; }
void DoLeakCheck() {
BlockingMutexLock l(&global_mutex);
static bool already_done;
if (already_done) return;
already_done = true;
bool have_leaks = CheckForLeaks();
if (!have_leaks) {
return;
}
if (common_flags()->exitcode) {
Die();
}
has_reported_leaks = CheckForLeaks();
if (has_reported_leaks) HandleLeaks();
}
static int DoRecoverableLeakCheck() {

View File

@ -226,6 +226,12 @@ IgnoreObjectResult IgnoreObjectLocked(const void *p);
// Return the linker module, if valid for the platform.
LoadedModule *GetLinker();
// Return true if LSan has finished leak checking and reported leaks.
bool HasReportedLeaks();
// Run platform-specific leak handlers.
void HandleLeaks();
// Wrapper for chunk metadata operations.
class LsanMetadata {
public:

View File

@ -100,6 +100,13 @@ struct DoStopTheWorldParam {
void *argument;
};
// While calling Die() here is undefined behavior and can potentially
// cause race conditions, it isn't possible to intercept exit on linux,
// so we have no choice but to call Die() from the atexit handler.
void HandleLeaks() {
if (common_flags()->exitcode) Die();
}
static int DoStopTheWorldCallback(struct dl_phdr_info *info, size_t size,
void *data) {
DoStopTheWorldParam *param = reinterpret_cast<DoStopTheWorldParam *>(data);

View File

@ -164,6 +164,11 @@ void ProcessPlatformSpecificAllocations(Frontier *frontier) {
}
}
// On darwin, we can intercept _exit gracefully, and return a failing exit code
// if required at that point. Calling Die() here is undefined behavior and
// causes rare race conditions.
void HandleLeaks() {}
void DoStopTheWorld(StopTheWorldCallback callback, void *argument) {
StopTheWorld(callback, argument);
}

View File

@ -352,6 +352,11 @@ INTERCEPTOR(int, pthread_join, void *th, void **ret) {
return res;
}
INTERCEPTOR(void, _exit, int status) {
if (status == 0 && HasReportedLeaks()) status = common_flags()->exitcode;
REAL(_exit)(status);
}
namespace __lsan {
void InitializeInterceptors() {
@ -371,6 +376,7 @@ void InitializeInterceptors() {
LSAN_MAYBE_INTERCEPT_MALLOPT;
INTERCEPT_FUNCTION(pthread_create);
INTERCEPT_FUNCTION(pthread_join);
INTERCEPT_FUNCTION(_exit);
if (pthread_key_create(&g_thread_finalize_key, &thread_finalize)) {
Report("LeakSanitizer: failed to create thread key.\n");

View File

@ -280,10 +280,18 @@ void InitializeInterceptors();
void MsanAllocatorInit();
void MsanAllocatorThreadFinish();
void *MsanCalloc(StackTrace *stack, uptr nmemb, uptr size);
void *MsanReallocate(StackTrace *stack, void *oldp, uptr size,
uptr alignment, bool zeroise);
void MsanDeallocate(StackTrace *stack, void *ptr);
void *msan_malloc(uptr size, StackTrace *stack);
void *msan_calloc(uptr nmemb, uptr size, StackTrace *stack);
void *msan_realloc(void *ptr, uptr size, StackTrace *stack);
void *msan_valloc(uptr size, StackTrace *stack);
void *msan_pvalloc(uptr size, StackTrace *stack);
void *msan_aligned_alloc(uptr alignment, uptr size, StackTrace *stack);
void *msan_memalign(uptr alignment, uptr size, StackTrace *stack);
int msan_posix_memalign(void **memptr, uptr alignment, uptr size,
StackTrace *stack);
void InstallTrapHandler();
void InstallAtExitHandler();

View File

@ -13,7 +13,9 @@
//===----------------------------------------------------------------------===//
#include "sanitizer_common/sanitizer_allocator.h"
#include "sanitizer_common/sanitizer_allocator_checks.h"
#include "sanitizer_common/sanitizer_allocator_interface.h"
#include "sanitizer_common/sanitizer_errno.h"
#include "msan.h"
#include "msan_allocator.h"
#include "msan_origin.h"
@ -194,20 +196,8 @@ void MsanDeallocate(StackTrace *stack, void *p) {
}
}
void *MsanCalloc(StackTrace *stack, uptr nmemb, uptr size) {
if (CheckForCallocOverflow(size, nmemb))
return Allocator::FailureHandler::OnBadRequest();
return MsanReallocate(stack, nullptr, nmemb * size, sizeof(u64), true);
}
void *MsanReallocate(StackTrace *stack, void *old_p, uptr new_size,
uptr alignment, bool zeroise) {
if (!old_p)
return MsanAllocate(stack, new_size, alignment, zeroise);
if (!new_size) {
MsanDeallocate(stack, old_p);
return nullptr;
}
uptr alignment) {
Metadata *meta = reinterpret_cast<Metadata*>(allocator.GetMetaData(old_p));
uptr old_size = meta->requested_size;
uptr actually_allocated_size = allocator.GetActuallyAllocatedSize(old_p);
@ -215,10 +205,7 @@ void *MsanReallocate(StackTrace *stack, void *old_p, uptr new_size,
// We are not reallocating here.
meta->requested_size = new_size;
if (new_size > old_size) {
if (zeroise) {
__msan_clear_and_unpoison((char *)old_p + old_size,
new_size - old_size);
} else if (flags()->poison_in_malloc) {
if (flags()->poison_in_malloc) {
stack->tag = StackTrace::TAG_ALLOC;
PoisonMemory((char *)old_p + old_size, new_size - old_size, stack);
}
@ -226,8 +213,7 @@ void *MsanReallocate(StackTrace *stack, void *old_p, uptr new_size,
return old_p;
}
uptr memcpy_size = Min(new_size, old_size);
void *new_p = MsanAllocate(stack, new_size, alignment, zeroise);
// Printf("realloc: old_size %zd new_size %zd\n", old_size, new_size);
void *new_p = MsanAllocate(stack, new_size, alignment, false /*zeroise*/);
if (new_p) {
CopyMemory(new_p, old_p, memcpy_size, stack);
MsanDeallocate(stack, old_p);
@ -243,6 +229,67 @@ static uptr AllocationSize(const void *p) {
return b->requested_size;
}
void *msan_malloc(uptr size, StackTrace *stack) {
return SetErrnoOnNull(MsanAllocate(stack, size, sizeof(u64), false));
}
void *msan_calloc(uptr nmemb, uptr size, StackTrace *stack) {
if (UNLIKELY(CheckForCallocOverflow(size, nmemb)))
return SetErrnoOnNull(Allocator::FailureHandler::OnBadRequest());
return SetErrnoOnNull(MsanAllocate(stack, nmemb * size, sizeof(u64), true));
}
void *msan_realloc(void *ptr, uptr size, StackTrace *stack) {
if (!ptr)
return SetErrnoOnNull(MsanAllocate(stack, size, sizeof(u64), false));
if (size == 0) {
MsanDeallocate(stack, ptr);
return nullptr;
}
return SetErrnoOnNull(MsanReallocate(stack, ptr, size, sizeof(u64)));
}
void *msan_valloc(uptr size, StackTrace *stack) {
return SetErrnoOnNull(MsanAllocate(stack, size, GetPageSizeCached(), false));
}
void *msan_pvalloc(uptr size, StackTrace *stack) {
uptr PageSize = GetPageSizeCached();
// pvalloc(0) should allocate one page.
size = size == 0 ? PageSize : RoundUpTo(size, PageSize);
return SetErrnoOnNull(MsanAllocate(stack, size, PageSize, false));
}
void *msan_aligned_alloc(uptr alignment, uptr size, StackTrace *stack) {
if (UNLIKELY(!CheckAlignedAllocAlignmentAndSize(alignment, size))) {
errno = errno_EINVAL;
return Allocator::FailureHandler::OnBadRequest();
}
return SetErrnoOnNull(MsanAllocate(stack, size, alignment, false));
}
void *msan_memalign(uptr alignment, uptr size, StackTrace *stack) {
if (UNLIKELY(!IsPowerOfTwo(alignment))) {
errno = errno_EINVAL;
return Allocator::FailureHandler::OnBadRequest();
}
return SetErrnoOnNull(MsanAllocate(stack, size, alignment, false));
}
int msan_posix_memalign(void **memptr, uptr alignment, uptr size,
StackTrace *stack) {
if (UNLIKELY(!CheckPosixMemalignAlignment(alignment))) {
Allocator::FailureHandler::OnBadRequest();
return errno_EINVAL;
}
void *ptr = MsanAllocate(stack, size, alignment, false);
if (UNLIKELY(!ptr))
return errno_ENOMEM;
CHECK(IsAligned((uptr)ptr, alignment));
*memptr = ptr;
return 0;
}
} // namespace __msan
using namespace __msan;

View File

@ -161,58 +161,45 @@ INTERCEPTOR(void *, bcopy, const void *src, void *dest, SIZE_T n) {
INTERCEPTOR(int, posix_memalign, void **memptr, SIZE_T alignment, SIZE_T size) {
GET_MALLOC_STACK_TRACE;
CHECK_EQ(alignment & (alignment - 1), 0);
CHECK_NE(memptr, 0);
*memptr = MsanReallocate(&stack, nullptr, size, alignment, false);
CHECK_NE(*memptr, 0);
__msan_unpoison(memptr, sizeof(*memptr));
return 0;
int res = msan_posix_memalign(memptr, alignment, size, &stack);
if (!res)
__msan_unpoison(memptr, sizeof(*memptr));
return res;
}
#if !SANITIZER_FREEBSD
INTERCEPTOR(void *, memalign, SIZE_T boundary, SIZE_T size) {
INTERCEPTOR(void *, memalign, SIZE_T alignment, SIZE_T size) {
GET_MALLOC_STACK_TRACE;
CHECK_EQ(boundary & (boundary - 1), 0);
void *ptr = MsanReallocate(&stack, nullptr, size, boundary, false);
return ptr;
return msan_memalign(alignment, size, &stack);
}
#define MSAN_MAYBE_INTERCEPT_MEMALIGN INTERCEPT_FUNCTION(memalign)
#else
#define MSAN_MAYBE_INTERCEPT_MEMALIGN
#endif
INTERCEPTOR(void *, aligned_alloc, SIZE_T boundary, SIZE_T size) {
INTERCEPTOR(void *, aligned_alloc, SIZE_T alignment, SIZE_T size) {
GET_MALLOC_STACK_TRACE;
CHECK_EQ(boundary & (boundary - 1), 0);
void *ptr = MsanReallocate(&stack, nullptr, size, boundary, false);
return ptr;
return msan_aligned_alloc(alignment, size, &stack);
}
INTERCEPTOR(void *, __libc_memalign, SIZE_T boundary, SIZE_T size) {
INTERCEPTOR(void *, __libc_memalign, SIZE_T alignment, SIZE_T size) {
GET_MALLOC_STACK_TRACE;
CHECK_EQ(boundary & (boundary - 1), 0);
void *ptr = MsanReallocate(&stack, nullptr, size, boundary, false);
DTLS_on_libc_memalign(ptr, size);
void *ptr = msan_memalign(alignment, size, &stack);
if (ptr)
DTLS_on_libc_memalign(ptr, size);
return ptr;
}
INTERCEPTOR(void *, valloc, SIZE_T size) {
GET_MALLOC_STACK_TRACE;
void *ptr = MsanReallocate(&stack, nullptr, size, GetPageSizeCached(), false);
return ptr;
return msan_valloc(size, &stack);
}
#if !SANITIZER_FREEBSD
INTERCEPTOR(void *, pvalloc, SIZE_T size) {
GET_MALLOC_STACK_TRACE;
uptr PageSize = GetPageSizeCached();
size = RoundUpTo(size, PageSize);
if (size == 0) {
// pvalloc(0) should allocate one page.
size = PageSize;
}
void *ptr = MsanReallocate(&stack, nullptr, size, PageSize, false);
return ptr;
return msan_pvalloc(size, &stack);
}
#define MSAN_MAYBE_INTERCEPT_PVALLOC INTERCEPT_FUNCTION(pvalloc)
#else
@ -853,7 +840,7 @@ INTERCEPTOR(void *, calloc, SIZE_T nmemb, SIZE_T size) {
if (UNLIKELY(!msan_inited))
// Hack: dlsym calls calloc before REAL(calloc) is retrieved from dlsym.
return AllocateFromLocalPool(nmemb * size);
return MsanCalloc(&stack, nmemb, size);
return msan_calloc(nmemb, size, &stack);
}
INTERCEPTOR(void *, realloc, void *ptr, SIZE_T size) {
@ -866,12 +853,12 @@ INTERCEPTOR(void *, realloc, void *ptr, SIZE_T size) {
new_ptr = AllocateFromLocalPool(copy_size);
} else {
copy_size = size;
new_ptr = MsanReallocate(&stack, nullptr, copy_size, sizeof(u64), false);
new_ptr = msan_malloc(copy_size, &stack);
}
internal_memcpy(new_ptr, ptr, copy_size);
return new_ptr;
}
return MsanReallocate(&stack, ptr, size, sizeof(u64), false);
return msan_realloc(ptr, size, &stack);
}
INTERCEPTOR(void *, malloc, SIZE_T size) {
@ -879,7 +866,7 @@ INTERCEPTOR(void *, malloc, SIZE_T size) {
if (UNLIKELY(!msan_inited))
// Hack: dlsym calls malloc before REAL(malloc) is retrieved from dlsym.
return AllocateFromLocalPool(size);
return MsanReallocate(&stack, nullptr, size, sizeof(u64), false);
return msan_malloc(size, &stack);
}
void __msan_allocated_memory(const void *data, uptr size) {

View File

@ -31,7 +31,7 @@ namespace std {
// TODO(alekseys): throw std::bad_alloc instead of dying on OOM.
#define OPERATOR_NEW_BODY(nothrow) \
GET_MALLOC_STACK_TRACE; \
void *res = MsanReallocate(&stack, 0, size, sizeof(u64), false);\
void *res = msan_malloc(size, &stack);\
if (!nothrow && UNLIKELY(!res)) DieOnFailure::OnOOM();\
return res

View File

@ -14,6 +14,7 @@
#include "sanitizer_allocator.h"
#include "sanitizer_allocator_checks.h"
#include "sanitizer_allocator_internal.h"
#include "sanitizer_atomic.h"
#include "sanitizer_common.h"
@ -160,7 +161,7 @@ void *InternalRealloc(void *addr, uptr size, InternalAllocatorCache *cache) {
}
void *InternalCalloc(uptr count, uptr size, InternalAllocatorCache *cache) {
if (CheckForCallocOverflow(count, size))
if (UNLIKELY(CheckForCallocOverflow(count, size)))
return InternalAllocator::FailureHandler::OnBadRequest();
void *p = InternalAlloc(count * size, cache);
if (p) internal_memset(p, 0, count * size);
@ -202,12 +203,6 @@ void SetLowLevelAllocateCallback(LowLevelAllocateCallback callback) {
low_level_alloc_callback = callback;
}
bool CheckForCallocOverflow(uptr size, uptr n) {
if (!size) return false;
uptr max = (uptr)-1L;
return (max / size) < n;
}
static atomic_uint8_t allocator_out_of_memory = {0};
static atomic_uint8_t allocator_may_return_null = {0};

View File

@ -56,11 +56,6 @@ struct NoOpMapUnmapCallback {
// Callback type for iterating over chunks.
typedef void (*ForEachChunkCallback)(uptr chunk, void *arg);
// Returns true if calloc(size, n) call overflows on size*n calculation.
// The caller should "return POLICY::OnBadRequest();" where POLICY is the
// current allocator failure handling policy.
bool CheckForCallocOverflow(uptr size, uptr n);
#include "sanitizer_allocator_size_class_map.h"
#include "sanitizer_allocator_stats.h"
#include "sanitizer_allocator_primary64.h"

View File

@ -0,0 +1,64 @@
//===-- sanitizer_allocator_checks.h ----------------------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// Various checks shared between ThreadSanitizer, MemorySanitizer, etc. memory
// allocators.
//
//===----------------------------------------------------------------------===//
#ifndef SANITIZER_ALLOCATOR_CHECKS_H
#define SANITIZER_ALLOCATOR_CHECKS_H
#include "sanitizer_errno.h"
#include "sanitizer_internal_defs.h"
#include "sanitizer_common.h"
#include "sanitizer_platform.h"
namespace __sanitizer {
// A common errno setting logic shared by almost all sanitizer allocator APIs.
INLINE void *SetErrnoOnNull(void *ptr) {
if (UNLIKELY(!ptr))
errno = errno_ENOMEM;
return ptr;
}
// In case of the check failure, the caller of the following Check... functions
// should "return POLICY::OnBadRequest();" where POLICY is the current allocator
// failure handling policy.
// Checks aligned_alloc() parameters, verifies that the alignment is a power of
// two and that the size is a multiple of alignment for POSIX implementation,
// and a bit relaxed requirement for non-POSIX ones, that the size is a multiple
// of alignment.
INLINE bool CheckAlignedAllocAlignmentAndSize(uptr alignment, uptr size) {
#if SANITIZER_POSIX
return IsPowerOfTwo(alignment) && (size & (alignment - 1)) == 0;
#else
return size % alignment == 0;
#endif
}
// Checks posix_memalign() parameters, verifies that alignment is a power of two
// and a multiple of sizeof(void *).
INLINE bool CheckPosixMemalignAlignment(uptr alignment) {
return IsPowerOfTwo(alignment) && (alignment % sizeof(void *)) == 0; // NOLINT
}
// Returns true if calloc(size, n) call overflows on size*n calculation.
INLINE bool CheckForCallocOverflow(uptr size, uptr n) {
if (!size)
return false;
uptr max = (uptr)-1L;
return (max / size) < n;
}
} // namespace __sanitizer
#endif // SANITIZER_ALLOCATOR_CHECKS_H

View File

@ -26,6 +26,8 @@
# define __errno_location __error
#elif SANITIZER_ANDROID
# define __errno_location __errno
#elif SANITIZER_WINDOWS
# define __errno_location _errno
#endif
extern "C" int *__errno_location();

View File

@ -629,8 +629,7 @@ uptr internal_prctl(int option, uptr arg2, uptr arg3, uptr arg4, uptr arg5) {
}
#endif
uptr internal_sigaltstack(const struct sigaltstack *ss,
struct sigaltstack *oss) {
uptr internal_sigaltstack(const void *ss, void *oss) {
return internal_syscall(SYSCALL(sigaltstack), (uptr)ss, (uptr)oss);
}

View File

@ -21,7 +21,6 @@
#include "sanitizer_platform_limits_posix.h"
struct link_map; // Opaque type returned by dlopen().
struct sigaltstack;
namespace __sanitizer {
// Dirent structure for getdents(). Note that this structure is different from
@ -30,8 +29,7 @@ struct linux_dirent;
// Syscall wrappers.
uptr internal_getdents(fd_t fd, struct linux_dirent *dirp, unsigned int count);
uptr internal_sigaltstack(const struct sigaltstack* ss,
struct sigaltstack* oss);
uptr internal_sigaltstack(const void* ss, void* oss);
uptr internal_sigprocmask(int how, __sanitizer_sigset_t *set,
__sanitizer_sigset_t *oldset);

View File

@ -805,14 +805,35 @@ char **GetArgv() {
// fields only available in 10.12+. Declare the struct manually to be able to
// build against older SDKs.
struct __sanitizer_task_vm_info {
uptr _unused[(SANITIZER_WORDSIZE == 32) ? 20 : 19];
uptr min_address;
uptr max_address;
mach_vm_size_t virtual_size;
integer_t region_count;
integer_t page_size;
mach_vm_size_t resident_size;
mach_vm_size_t resident_size_peak;
mach_vm_size_t device;
mach_vm_size_t device_peak;
mach_vm_size_t internal;
mach_vm_size_t internal_peak;
mach_vm_size_t external;
mach_vm_size_t external_peak;
mach_vm_size_t reusable;
mach_vm_size_t reusable_peak;
mach_vm_size_t purgeable_volatile_pmap;
mach_vm_size_t purgeable_volatile_resident;
mach_vm_size_t purgeable_volatile_virtual;
mach_vm_size_t compressed;
mach_vm_size_t compressed_peak;
mach_vm_size_t compressed_lifetime;
mach_vm_size_t phys_footprint;
mach_vm_address_t min_address;
mach_vm_address_t max_address;
};
#define __SANITIZER_TASK_VM_INFO_COUNT ((mach_msg_type_number_t) \
(sizeof(__sanitizer_task_vm_info) / sizeof(natural_t)))
uptr GetTaskInfoMaxAddress() {
__sanitizer_task_vm_info vm_info = {{0}, 0, 0};
mach_msg_type_number_t count = sizeof(vm_info) / sizeof(int);
__sanitizer_task_vm_info vm_info = {};
mach_msg_type_number_t count = __SANITIZER_TASK_VM_INFO_COUNT;
int err = task_info(mach_task_self(), TASK_VM_INFO, (int *)&vm_info, &count);
if (err == 0) {
return vm_info.max_address - 1;

View File

@ -13,7 +13,7 @@
#ifndef SANITIZER_PLATFORM_H
#define SANITIZER_PLATFORM_H
#if !defined(__linux__) && !defined(__FreeBSD__) && \
#if !defined(__linux__) && !defined(__FreeBSD__) && !defined(__NetBSD__) && \
!defined(__APPLE__) && !defined(_WIN32)
# error "This operating system is not supported"
#endif
@ -30,6 +30,12 @@
# define SANITIZER_FREEBSD 0
#endif
#if defined(__NetBSD__)
# define SANITIZER_NETBSD 1
#else
# define SANITIZER_NETBSD 0
#endif
#if defined(__APPLE__)
# define SANITIZER_MAC 1
# include <TargetConditionals.h>
@ -79,7 +85,8 @@
# define SANITIZER_ANDROID 0
#endif
#define SANITIZER_POSIX (SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_MAC)
#define SANITIZER_POSIX \
(SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_MAC || SANITIZER_NETBSD)
#if __LP64__ || defined(_WIN64)
# define SANITIZER_WORDSIZE 64

View File

@ -49,6 +49,12 @@
# define SI_FREEBSD 0
#endif
#if SANITIZER_NETBSD
# define SI_NETBSD 1
#else
# define SI_NETBSD 0
#endif
#if SANITIZER_LINUX
# define SI_LINUX 1
#else
@ -109,9 +115,9 @@
// memmem on Darwin doesn't exist on 10.6
// FIXME: enable memmem on Windows.
#define SANITIZER_INTERCEPT_MEMMEM \
SI_NOT_WINDOWS && !SI_MAC_DEPLOYMENT_BELOW_10_7
(SI_NOT_WINDOWS && !SI_MAC_DEPLOYMENT_BELOW_10_7)
#define SANITIZER_INTERCEPT_MEMCHR 1
#define SANITIZER_INTERCEPT_MEMRCHR SI_FREEBSD || SI_LINUX
#define SANITIZER_INTERCEPT_MEMRCHR (SI_FREEBSD || SI_LINUX || SI_NETBSD)
#define SANITIZER_INTERCEPT_READ SI_NOT_WINDOWS
#define SANITIZER_INTERCEPT_PREAD SI_NOT_WINDOWS
@ -127,7 +133,8 @@
#define SANITIZER_INTERCEPT_READV SI_NOT_WINDOWS
#define SANITIZER_INTERCEPT_WRITEV SI_NOT_WINDOWS
#define SANITIZER_INTERCEPT_PREADV SI_FREEBSD || SI_LINUX_NOT_ANDROID
#define SANITIZER_INTERCEPT_PREADV \
(SI_FREEBSD || SI_NETBSD || SI_LINUX_NOT_ANDROID)
#define SANITIZER_INTERCEPT_PWRITEV SI_LINUX_NOT_ANDROID
#define SANITIZER_INTERCEPT_PREADV64 SI_LINUX_NOT_ANDROID
#define SANITIZER_INTERCEPT_PWRITEV64 SI_LINUX_NOT_ANDROID
@ -142,7 +149,7 @@
#ifndef SANITIZER_INTERCEPT_PRINTF
# define SANITIZER_INTERCEPT_PRINTF SI_NOT_WINDOWS
# define SANITIZER_INTERCEPT_PRINTF_L SI_FREEBSD
# define SANITIZER_INTERCEPT_PRINTF_L (SI_FREEBSD || SI_NETBSD)
# define SANITIZER_INTERCEPT_ISOC99_PRINTF SI_LINUX_NOT_ANDROID
#endif
@ -151,13 +158,14 @@
#define SANITIZER_INTERCEPT_GETPWNAM_AND_FRIENDS SI_NOT_WINDOWS
#define SANITIZER_INTERCEPT_GETPWNAM_R_AND_FRIENDS \
SI_FREEBSD || SI_MAC || SI_LINUX_NOT_ANDROID
(SI_FREEBSD || SI_NETBSD || SI_MAC || SI_LINUX_NOT_ANDROID)
#define SANITIZER_INTERCEPT_GETPWENT \
SI_FREEBSD || SI_MAC || SI_LINUX_NOT_ANDROID
(SI_FREEBSD || SI_NETBSD || SI_MAC || SI_LINUX_NOT_ANDROID)
#define SANITIZER_INTERCEPT_FGETPWENT SI_LINUX_NOT_ANDROID
#define SANITIZER_INTERCEPT_GETPWENT_R SI_FREEBSD || SI_LINUX_NOT_ANDROID
#define SANITIZER_INTERCEPT_SETPWENT SI_MAC || SI_LINUX_NOT_ANDROID
#define SANITIZER_INTERCEPT_CLOCK_GETTIME SI_FREEBSD || SI_LINUX
#define SANITIZER_INTERCEPT_GETPWENT_R \
(SI_FREEBSD || SI_NETBSD || SI_LINUX_NOT_ANDROID)
#define SANITIZER_INTERCEPT_SETPWENT (SI_MAC || SI_LINUX_NOT_ANDROID)
#define SANITIZER_INTERCEPT_CLOCK_GETTIME (SI_FREEBSD || SI_NETBSD || SI_LINUX)
#define SANITIZER_INTERCEPT_GETITIMER SI_NOT_WINDOWS
#define SANITIZER_INTERCEPT_TIME SI_NOT_WINDOWS
#define SANITIZER_INTERCEPT_GLOB SI_LINUX_NOT_ANDROID
@ -168,10 +176,11 @@
#define SANITIZER_INTERCEPT_GETNAMEINFO SI_NOT_WINDOWS
#define SANITIZER_INTERCEPT_GETSOCKNAME SI_NOT_WINDOWS
#define SANITIZER_INTERCEPT_GETHOSTBYNAME SI_NOT_WINDOWS
#define SANITIZER_INTERCEPT_GETHOSTBYNAME_R SI_FREEBSD || SI_LINUX
#define SANITIZER_INTERCEPT_GETHOSTBYNAME2_R SI_FREEBSD || SI_LINUX_NOT_ANDROID
#define SANITIZER_INTERCEPT_GETHOSTBYADDR_R SI_FREEBSD || SI_LINUX_NOT_ANDROID
#define SANITIZER_INTERCEPT_GETHOSTENT_R SI_FREEBSD || SI_LINUX_NOT_ANDROID
#define SANITIZER_INTERCEPT_GETHOSTBYNAME_R (SI_FREEBSD || SI_LINUX)
#define SANITIZER_INTERCEPT_GETHOSTBYNAME2_R \
(SI_FREEBSD || SI_LINUX_NOT_ANDROID)
#define SANITIZER_INTERCEPT_GETHOSTBYADDR_R (SI_FREEBSD || SI_LINUX_NOT_ANDROID)
#define SANITIZER_INTERCEPT_GETHOSTENT_R (SI_FREEBSD || SI_LINUX_NOT_ANDROID)
#define SANITIZER_INTERCEPT_GETSOCKOPT SI_NOT_WINDOWS
#define SANITIZER_INTERCEPT_ACCEPT SI_NOT_WINDOWS
#define SANITIZER_INTERCEPT_ACCEPT4 SI_LINUX_NOT_ANDROID
@ -197,63 +206,67 @@
#define SANITIZER_INTERCEPT_GET_CURRENT_DIR_NAME SI_LINUX_NOT_ANDROID
#define SANITIZER_INTERCEPT_STRTOIMAX SI_NOT_WINDOWS
#define SANITIZER_INTERCEPT_MBSTOWCS SI_NOT_WINDOWS
#define SANITIZER_INTERCEPT_MBSNRTOWCS SI_MAC || SI_LINUX_NOT_ANDROID
#define SANITIZER_INTERCEPT_MBSNRTOWCS (SI_MAC || SI_LINUX_NOT_ANDROID)
#define SANITIZER_INTERCEPT_WCSTOMBS SI_NOT_WINDOWS
#define SANITIZER_INTERCEPT_WCSNRTOMBS \
SI_FREEBSD || SI_MAC || SI_LINUX_NOT_ANDROID
(SI_FREEBSD || SI_NETBSD || SI_MAC || SI_LINUX_NOT_ANDROID)
#define SANITIZER_INTERCEPT_WCRTOMB \
SI_FREEBSD || SI_MAC || SI_LINUX_NOT_ANDROID
(SI_FREEBSD || SI_NETBSD || SI_MAC || SI_LINUX_NOT_ANDROID)
#define SANITIZER_INTERCEPT_TCGETATTR SI_LINUX_NOT_ANDROID
#define SANITIZER_INTERCEPT_REALPATH SI_NOT_WINDOWS
#define SANITIZER_INTERCEPT_CANONICALIZE_FILE_NAME SI_LINUX_NOT_ANDROID
#define SANITIZER_INTERCEPT_CONFSTR \
SI_FREEBSD || SI_MAC || SI_LINUX_NOT_ANDROID
(SI_FREEBSD || SI_NETBSD || SI_MAC || SI_LINUX_NOT_ANDROID)
#define SANITIZER_INTERCEPT_SCHED_GETAFFINITY SI_LINUX_NOT_ANDROID
#define SANITIZER_INTERCEPT_SCHED_GETPARAM SI_LINUX_NOT_ANDROID
#define SANITIZER_INTERCEPT_STRERROR SI_NOT_WINDOWS
#define SANITIZER_INTERCEPT_STRERROR_R SI_NOT_WINDOWS
#define SANITIZER_INTERCEPT_XPG_STRERROR_R SI_LINUX_NOT_ANDROID
#define SANITIZER_INTERCEPT_SCANDIR \
SI_FREEBSD || SI_LINUX_NOT_ANDROID
(SI_FREEBSD || SI_NETBSD || SI_LINUX_NOT_ANDROID)
#define SANITIZER_INTERCEPT_SCANDIR64 SI_LINUX_NOT_ANDROID
#define SANITIZER_INTERCEPT_GETGROUPS SI_NOT_WINDOWS
#define SANITIZER_INTERCEPT_POLL SI_NOT_WINDOWS
#define SANITIZER_INTERCEPT_PPOLL SI_LINUX_NOT_ANDROID
#define SANITIZER_INTERCEPT_WORDEXP \
SI_FREEBSD || (SI_MAC && !SI_IOS) || SI_LINUX_NOT_ANDROID
(SI_FREEBSD || SI_NETBSD || (SI_MAC && !SI_IOS) || SI_LINUX_NOT_ANDROID)
#define SANITIZER_INTERCEPT_SIGWAIT SI_NOT_WINDOWS
#define SANITIZER_INTERCEPT_SIGWAITINFO SI_LINUX_NOT_ANDROID
#define SANITIZER_INTERCEPT_SIGTIMEDWAIT SI_LINUX_NOT_ANDROID
#define SANITIZER_INTERCEPT_SIGSETOPS \
SI_FREEBSD || SI_MAC || SI_LINUX_NOT_ANDROID
(SI_FREEBSD || SI_NETBSD || SI_MAC || SI_LINUX_NOT_ANDROID)
#define SANITIZER_INTERCEPT_SIGPENDING SI_NOT_WINDOWS
#define SANITIZER_INTERCEPT_SIGPROCMASK SI_NOT_WINDOWS
#define SANITIZER_INTERCEPT_BACKTRACE SI_FREEBSD || SI_LINUX_NOT_ANDROID
#define SANITIZER_INTERCEPT_BACKTRACE \
(SI_FREEBSD || SI_NETBSD || SI_LINUX_NOT_ANDROID)
#define SANITIZER_INTERCEPT_GETMNTENT SI_LINUX
#define SANITIZER_INTERCEPT_GETMNTENT_R SI_LINUX_NOT_ANDROID
#define SANITIZER_INTERCEPT_STATFS SI_FREEBSD || SI_MAC || SI_LINUX_NOT_ANDROID
#define SANITIZER_INTERCEPT_STATFS \
(SI_FREEBSD || SI_MAC || SI_LINUX_NOT_ANDROID)
#define SANITIZER_INTERCEPT_STATFS64 \
(SI_MAC && !SI_IOS) || SI_LINUX_NOT_ANDROID
#define SANITIZER_INTERCEPT_STATVFS SI_FREEBSD || SI_LINUX_NOT_ANDROID
((SI_MAC && !SI_IOS) || SI_LINUX_NOT_ANDROID)
#define SANITIZER_INTERCEPT_STATVFS \
(SI_FREEBSD || SI_NETBSD || SI_LINUX_NOT_ANDROID)
#define SANITIZER_INTERCEPT_STATVFS64 SI_LINUX_NOT_ANDROID
#define SANITIZER_INTERCEPT_INITGROUPS SI_NOT_WINDOWS
#define SANITIZER_INTERCEPT_ETHER_NTOA_ATON SI_NOT_WINDOWS
#define SANITIZER_INTERCEPT_ETHER_HOST \
SI_FREEBSD || SI_MAC || SI_LINUX_NOT_ANDROID
#define SANITIZER_INTERCEPT_ETHER_R SI_FREEBSD || SI_LINUX_NOT_ANDROID
#define SANITIZER_INTERCEPT_SHMCTL \
((SI_FREEBSD || SI_LINUX_NOT_ANDROID) && SANITIZER_WORDSIZE == 64)
(SI_FREEBSD || SI_MAC || SI_LINUX_NOT_ANDROID)
#define SANITIZER_INTERCEPT_ETHER_R (SI_FREEBSD || SI_LINUX_NOT_ANDROID)
#define SANITIZER_INTERCEPT_SHMCTL \
(SI_NETBSD || ((SI_FREEBSD || SI_LINUX_NOT_ANDROID) && \
SANITIZER_WORDSIZE == 64)) // NOLINT
#define SANITIZER_INTERCEPT_RANDOM_R SI_LINUX_NOT_ANDROID
#define SANITIZER_INTERCEPT_PTHREAD_ATTR_GET SI_NOT_WINDOWS
#define SANITIZER_INTERCEPT_PTHREAD_ATTR_GETINHERITSCHED \
SI_FREEBSD || SI_MAC || SI_LINUX_NOT_ANDROID
(SI_FREEBSD || SI_NETBSD || SI_MAC || SI_LINUX_NOT_ANDROID)
#define SANITIZER_INTERCEPT_PTHREAD_ATTR_GETAFFINITY_NP SI_LINUX_NOT_ANDROID
#define SANITIZER_INTERCEPT_PTHREAD_MUTEXATTR_GETPSHARED SI_NOT_WINDOWS
#define SANITIZER_INTERCEPT_PTHREAD_MUTEXATTR_GETTYPE SI_NOT_WINDOWS
#define SANITIZER_INTERCEPT_PTHREAD_MUTEXATTR_GETPROTOCOL \
SI_MAC || SI_LINUX_NOT_ANDROID
(SI_MAC || SI_NETBSD || SI_LINUX_NOT_ANDROID)
#define SANITIZER_INTERCEPT_PTHREAD_MUTEXATTR_GETPRIOCEILING \
SI_MAC || SI_LINUX_NOT_ANDROID
(SI_MAC || SI_NETBSD || SI_LINUX_NOT_ANDROID)
#define SANITIZER_INTERCEPT_PTHREAD_MUTEXATTR_GETROBUST SI_LINUX_NOT_ANDROID
#define SANITIZER_INTERCEPT_PTHREAD_MUTEXATTR_GETROBUST_NP SI_LINUX_NOT_ANDROID
#define SANITIZER_INTERCEPT_PTHREAD_RWLOCKATTR_GETPSHARED SI_NOT_WINDOWS
@ -268,33 +281,36 @@
#define SANITIZER_INTERCEPT_SINCOS SI_LINUX
#define SANITIZER_INTERCEPT_REMQUO SI_NOT_WINDOWS
#define SANITIZER_INTERCEPT_LGAMMA SI_NOT_WINDOWS
#define SANITIZER_INTERCEPT_LGAMMA_R SI_FREEBSD || SI_LINUX
#define SANITIZER_INTERCEPT_LGAMMA_R (SI_FREEBSD || SI_LINUX)
#define SANITIZER_INTERCEPT_LGAMMAL_R SI_LINUX_NOT_ANDROID
#define SANITIZER_INTERCEPT_DRAND48_R SI_LINUX_NOT_ANDROID
#define SANITIZER_INTERCEPT_RAND_R \
SI_FREEBSD || SI_MAC || SI_LINUX_NOT_ANDROID
#define SANITIZER_INTERCEPT_ICONV SI_FREEBSD || SI_LINUX_NOT_ANDROID
(SI_FREEBSD || SI_NETBSD || SI_MAC || SI_LINUX_NOT_ANDROID)
#define SANITIZER_INTERCEPT_ICONV \
(SI_FREEBSD || SI_NETBSD || SI_LINUX_NOT_ANDROID)
#define SANITIZER_INTERCEPT_TIMES SI_NOT_WINDOWS
// FIXME: getline seems to be available on OSX 10.7
#define SANITIZER_INTERCEPT_GETLINE SI_FREEBSD || SI_LINUX_NOT_ANDROID
#define SANITIZER_INTERCEPT_GETLINE \
(SI_FREEBSD || SI_NETBSD || SI_LINUX_NOT_ANDROID)
#define SANITIZER_INTERCEPT__EXIT SI_LINUX || SI_FREEBSD || SI_MAC
#define SANITIZER_INTERCEPT__EXIT \
(SI_LINUX || SI_FREEBSD || SI_NETBSD || SI_MAC)
#define SANITIZER_INTERCEPT_PHTREAD_MUTEX SI_NOT_WINDOWS
#define SANITIZER_INTERCEPT_PTHREAD_SETNAME_NP \
SI_FREEBSD || SI_LINUX_NOT_ANDROID
(SI_FREEBSD || SI_NETBSD || SI_LINUX_NOT_ANDROID)
#define SANITIZER_INTERCEPT_TLS_GET_ADDR \
SI_FREEBSD || SI_LINUX_NOT_ANDROID
(SI_FREEBSD || SI_NETBSD || SI_LINUX_NOT_ANDROID)
#define SANITIZER_INTERCEPT_LISTXATTR SI_LINUX
#define SANITIZER_INTERCEPT_GETXATTR SI_LINUX
#define SANITIZER_INTERCEPT_GETRESID SI_LINUX
#define SANITIZER_INTERCEPT_GETIFADDRS \
SI_FREEBSD || SI_LINUX_NOT_ANDROID || SI_MAC
(SI_FREEBSD || SI_NETBSD || SI_LINUX_NOT_ANDROID || SI_MAC)
#define SANITIZER_INTERCEPT_IF_INDEXTONAME \
SI_FREEBSD || SI_LINUX_NOT_ANDROID || SI_MAC
(SI_FREEBSD || SI_NETBSD || SI_LINUX_NOT_ANDROID || SI_MAC)
#define SANITIZER_INTERCEPT_CAPGET SI_LINUX_NOT_ANDROID
#if SI_LINUX && defined(__arm__)
#define SANITIZER_INTERCEPT_AEABI_MEM 1
@ -302,55 +318,61 @@
#define SANITIZER_INTERCEPT_AEABI_MEM 0
#endif
#define SANITIZER_INTERCEPT___BZERO SI_MAC
#define SANITIZER_INTERCEPT_FTIME !SI_FREEBSD && SI_NOT_WINDOWS
#define SANITIZER_INTERCEPT_FTIME (!SI_FREEBSD && !SI_NETBSD && SI_NOT_WINDOWS)
#define SANITIZER_INTERCEPT_XDR SI_LINUX_NOT_ANDROID
#define SANITIZER_INTERCEPT_TSEARCH SI_LINUX_NOT_ANDROID || SI_MAC
#define SANITIZER_INTERCEPT_TSEARCH \
(SI_LINUX_NOT_ANDROID || SI_MAC || SI_NETBSD)
#define SANITIZER_INTERCEPT_LIBIO_INTERNALS SI_LINUX_NOT_ANDROID
#define SANITIZER_INTERCEPT_FOPEN SI_NOT_WINDOWS
#define SANITIZER_INTERCEPT_FOPEN64 SI_LINUX_NOT_ANDROID
#define SANITIZER_INTERCEPT_OPEN_MEMSTREAM SI_LINUX_NOT_ANDROID
#define SANITIZER_INTERCEPT_OPEN_MEMSTREAM (SI_LINUX_NOT_ANDROID || SI_NETBSD)
#define SANITIZER_INTERCEPT_OBSTACK SI_LINUX_NOT_ANDROID
#define SANITIZER_INTERCEPT_FFLUSH SI_NOT_WINDOWS
#define SANITIZER_INTERCEPT_FCLOSE SI_NOT_WINDOWS
#ifndef SANITIZER_INTERCEPT_DLOPEN_DLCLOSE
#define SANITIZER_INTERCEPT_DLOPEN_DLCLOSE \
SI_FREEBSD || SI_LINUX_NOT_ANDROID || SI_MAC
(SI_FREEBSD || SI_NETBSD || SI_LINUX_NOT_ANDROID || SI_MAC)
#endif
#define SANITIZER_INTERCEPT_GETPASS SI_LINUX_NOT_ANDROID || SI_MAC
#define SANITIZER_INTERCEPT_GETPASS \
(SI_LINUX_NOT_ANDROID || SI_MAC || SI_NETBSD)
#define SANITIZER_INTERCEPT_TIMERFD SI_LINUX_NOT_ANDROID
#define SANITIZER_INTERCEPT_MLOCKX SI_NOT_WINDOWS
#define SANITIZER_INTERCEPT_FOPENCOOKIE SI_LINUX_NOT_ANDROID
#define SANITIZER_INTERCEPT_SEM SI_LINUX || SI_FREEBSD
#define SANITIZER_INTERCEPT_SEM (SI_LINUX || SI_FREEBSD || SI_NETBSD)
#define SANITIZER_INTERCEPT_PTHREAD_SETCANCEL SI_NOT_WINDOWS
#define SANITIZER_INTERCEPT_MINCORE SI_LINUX
#define SANITIZER_INTERCEPT_MINCORE (SI_LINUX || SI_NETBSD)
#define SANITIZER_INTERCEPT_PROCESS_VM_READV SI_LINUX
#define SANITIZER_INTERCEPT_CTERMID SI_LINUX || SI_MAC || SI_FREEBSD
#define SANITIZER_INTERCEPT_CTERMID_R SI_MAC || SI_FREEBSD
#define SANITIZER_INTERCEPT_CTERMID \
(SI_LINUX || SI_MAC || SI_FREEBSD || SI_NETBSD)
#define SANITIZER_INTERCEPT_CTERMID_R (SI_MAC || SI_FREEBSD)
#define SANITIZER_INTERCEPTOR_HOOKS SI_LINUX || SI_MAC || SI_WINDOWS
#define SANITIZER_INTERCEPTOR_HOOKS (SI_LINUX || SI_MAC || SI_WINDOWS)
#define SANITIZER_INTERCEPT_RECV_RECVFROM SI_NOT_WINDOWS
#define SANITIZER_INTERCEPT_SEND_SENDTO SI_NOT_WINDOWS
#define SANITIZER_INTERCEPT_EVENTFD_READ_WRITE SI_LINUX
#define SANITIZER_INTERCEPT_STAT (SI_FREEBSD || SI_MAC || SI_ANDROID)
#define SANITIZER_INTERCEPT___XSTAT !SANITIZER_INTERCEPT_STAT && SI_NOT_WINDOWS
#define SANITIZER_INTERCEPT_STAT \
(SI_FREEBSD || SI_MAC || SI_ANDROID || SI_NETBSD)
#define SANITIZER_INTERCEPT___XSTAT \
(!SANITIZER_INTERCEPT_STAT && SI_NOT_WINDOWS)
#define SANITIZER_INTERCEPT___XSTAT64 SI_LINUX_NOT_ANDROID
#define SANITIZER_INTERCEPT___LXSTAT SANITIZER_INTERCEPT___XSTAT
#define SANITIZER_INTERCEPT___LXSTAT64 SI_LINUX_NOT_ANDROID
#define SANITIZER_INTERCEPT_UTMP SI_NOT_WINDOWS && !SI_MAC && !SI_FREEBSD
#define SANITIZER_INTERCEPT_UTMPX SI_LINUX_NOT_ANDROID || SI_MAC || SI_FREEBSD
#define SANITIZER_INTERCEPT_UTMP (SI_NOT_WINDOWS && !SI_MAC && !SI_FREEBSD)
#define SANITIZER_INTERCEPT_UTMPX (SI_LINUX_NOT_ANDROID || SI_MAC || SI_FREEBSD)
#define SANITIZER_INTERCEPT_GETLOADAVG \
SI_LINUX_NOT_ANDROID || SI_MAC || SI_FREEBSD
(SI_LINUX_NOT_ANDROID || SI_MAC || SI_FREEBSD || SI_NETBSD)
#define SANITIZER_INTERCEPT_MALLOPT_AND_MALLINFO (!SI_FREEBSD && !SI_MAC)
#define SANITIZER_INTERCEPT_MEMALIGN (!SI_FREEBSD && !SI_MAC)
#define SANITIZER_INTERCEPT_PVALLOC (!SI_FREEBSD && !SI_MAC)
#define SANITIZER_INTERCEPT_CFREE (!SI_FREEBSD && !SI_MAC)
#define SANITIZER_INTERCEPT_MALLOPT_AND_MALLINFO \
(!SI_FREEBSD && !SI_MAC && !SI_NETBSD)
#define SANITIZER_INTERCEPT_MEMALIGN (!SI_FREEBSD && !SI_MAC && !SI_NETBSD)
#define SANITIZER_INTERCEPT_PVALLOC (!SI_FREEBSD && !SI_MAC && !SI_NETBSD)
#define SANITIZER_INTERCEPT_CFREE (!SI_FREEBSD && !SI_MAC && !SI_NETBSD)
#define SANITIZER_INTERCEPT_ALIGNED_ALLOC (!SI_MAC)
#define SANITIZER_INTERCEPT_MALLOC_USABLE_SIZE (!SI_MAC)
#define SANITIZER_INTERCEPT_MCHECK_MPROBE SI_LINUX_NOT_ANDROID

View File

@ -287,7 +287,7 @@ static int TracerThread(void* argument) {
// Alternate stack for signal handling.
InternalScopedBuffer<char> handler_stack_memory(kHandlerStackSize);
struct sigaltstack handler_stack;
stack_t handler_stack;
internal_memset(&handler_stack, 0, sizeof(handler_stack));
handler_stack.ss_sp = handler_stack_memory.data();
handler_stack.ss_size = kHandlerStackSize;

View File

@ -62,14 +62,14 @@ void Print(const BV &bv) {
t.copyFrom(bv);
while (!t.empty()) {
uptr idx = t.getAndClearFirstOne();
fprintf(stderr, "%zd ", idx);
fprintf(stderr, "%lu ", idx);
}
fprintf(stderr, "\n");
}
void Print(const set<uptr> &s) {
for (set<uptr>::iterator it = s.begin(); it != s.end(); ++it) {
fprintf(stderr, "%zd ", *it);
fprintf(stderr, "%lu ", *it);
}
fprintf(stderr, "\n");
}

View File

@ -19,10 +19,11 @@
#include "scudo_tls.h"
#include "scudo_utils.h"
#include "sanitizer_common/sanitizer_allocator_checks.h"
#include "sanitizer_common/sanitizer_allocator_interface.h"
#include "sanitizer_common/sanitizer_errno.h"
#include "sanitizer_common/sanitizer_quarantine.h"
#include <errno.h>
#include <string.h>
namespace __scudo {
@ -73,7 +74,7 @@ struct ScudoChunk : UnpackedHeader {
// beginning of the user data to the end of the backend allocated chunk.
uptr getUsableSize(UnpackedHeader *Header) {
uptr Size =
getBackendAllocator().GetActuallyAllocatedSize(getAllocBeg(Header),
getBackendAllocator().getActuallyAllocatedSize(getAllocBeg(Header),
Header->FromPrimary);
if (Size == 0)
return 0;
@ -232,7 +233,10 @@ struct QuarantineCallback {
}
Chunk->eraseHeader();
void *Ptr = Chunk->getAllocBeg(&Header);
getBackendAllocator().Deallocate(Cache_, Ptr, Header.FromPrimary);
if (Header.FromPrimary)
getBackendAllocator().deallocatePrimary(Cache_, Ptr);
else
getBackendAllocator().deallocateSecondary(Ptr);
}
// Internal quarantine allocation and deallocation functions. We first check
@ -240,11 +244,11 @@ struct QuarantineCallback {
// TODO(kostyak): figure out the best way to protect the batches.
COMPILER_CHECK(sizeof(QuarantineBatch) < SizeClassMap::kMaxSize);
void *Allocate(uptr Size) {
return getBackendAllocator().Allocate(Cache_, Size, MinAlignment, true);
return getBackendAllocator().allocatePrimary(Cache_, Size);
}
void Deallocate(void *Ptr) {
getBackendAllocator().Deallocate(Cache_, Ptr, true);
getBackendAllocator().deallocatePrimary(Cache_, Ptr);
}
AllocatorCache *Cache_;
@ -277,6 +281,9 @@ struct ScudoAllocator {
ScudoBackendAllocator BackendAllocator;
ScudoQuarantine AllocatorQuarantine;
StaticSpinMutex GlobalPrngMutex;
ScudoPrng GlobalPrng;
// The fallback caches are used when the thread local caches have been
// 'detroyed' on thread tear-down. They are protected by a Mutex as they can
// be accessed by different threads.
@ -303,10 +310,10 @@ struct ScudoAllocator {
// result, the maximum offset will be at most the maximum alignment for the
// last size class minus the header size, in multiples of MinAlignment.
UnpackedHeader Header = {};
uptr MaxPrimaryAlignment = 1 << MostSignificantSetBitIndex(
SizeClassMap::kMaxSize - MinAlignment);
uptr MaxOffset = (MaxPrimaryAlignment - AlignedChunkHeaderSize) >>
MinAlignmentLog;
uptr MaxPrimaryAlignment =
1 << MostSignificantSetBitIndex(SizeClassMap::kMaxSize - MinAlignment);
uptr MaxOffset =
(MaxPrimaryAlignment - AlignedChunkHeaderSize) >> MinAlignmentLog;
Header.Offset = MaxOffset;
if (Header.Offset != MaxOffset) {
dieWithMessage("ERROR: the maximum possible offset doesn't fit in the "
@ -328,13 +335,14 @@ struct ScudoAllocator {
DeleteSizeMismatch = Options.DeleteSizeMismatch;
ZeroContents = Options.ZeroContents;
SetAllocatorMayReturnNull(Options.MayReturnNull);
BackendAllocator.Init(Options.ReleaseToOSIntervalMs);
BackendAllocator.init(Options.ReleaseToOSIntervalMs);
AllocatorQuarantine.Init(
static_cast<uptr>(Options.QuarantineSizeMb) << 20,
static_cast<uptr>(Options.ThreadLocalQuarantineSizeKb) << 10);
BackendAllocator.InitCache(&FallbackAllocatorCache);
GlobalPrng.init();
Cookie = GlobalPrng.getU64();
BackendAllocator.initCache(&FallbackAllocatorCache);
FallbackPrng.init();
Cookie = FallbackPrng.getU64();
}
// Helper function that checks for a valid Scudo chunk. nullptr isn't.
@ -374,28 +382,36 @@ struct ScudoAllocator {
void *Ptr;
u8 Salt;
uptr AllocationSize = FromPrimary ? AlignedSize : NeededSize;
uptr AllocationAlignment = FromPrimary ? MinAlignment : Alignment;
ScudoThreadContext *ThreadContext = getThreadContextAndLock();
if (LIKELY(ThreadContext)) {
Salt = getPrng(ThreadContext)->getU8();
Ptr = BackendAllocator.Allocate(getAllocatorCache(ThreadContext),
AllocationSize, AllocationAlignment,
FromPrimary);
ThreadContext->unlock();
uptr AllocSize;
if (FromPrimary) {
AllocSize = AlignedSize;
ScudoThreadContext *ThreadContext = getThreadContextAndLock();
if (LIKELY(ThreadContext)) {
Salt = getPrng(ThreadContext)->getU8();
Ptr = BackendAllocator.allocatePrimary(getAllocatorCache(ThreadContext),
AllocSize);
ThreadContext->unlock();
} else {
SpinMutexLock l(&FallbackMutex);
Salt = FallbackPrng.getU8();
Ptr = BackendAllocator.allocatePrimary(&FallbackAllocatorCache,
AllocSize);
}
} else {
SpinMutexLock l(&FallbackMutex);
Salt = FallbackPrng.getU8();
Ptr = BackendAllocator.Allocate(&FallbackAllocatorCache, AllocationSize,
AllocationAlignment, FromPrimary);
{
SpinMutexLock l(&GlobalPrngMutex);
Salt = GlobalPrng.getU8();
}
AllocSize = NeededSize;
Ptr = BackendAllocator.allocateSecondary(AllocSize, Alignment);
}
if (UNLIKELY(!Ptr))
return FailureHandler::OnOOM();
// If requested, we will zero out the entire contents of the returned chunk.
if ((ForceZeroContents || ZeroContents) && FromPrimary)
memset(Ptr, 0,
BackendAllocator.GetActuallyAllocatedSize(Ptr, FromPrimary));
memset(Ptr, 0, BackendAllocator.getActuallyAllocatedSize(
Ptr, /*FromPrimary=*/true));
UnpackedHeader Header = {};
uptr AllocBeg = reinterpret_cast<uptr>(Ptr);
@ -409,11 +425,11 @@ struct ScudoAllocator {
uptr Offset = UserBeg - AlignedChunkHeaderSize - AllocBeg;
Header.Offset = Offset >> MinAlignmentLog;
}
CHECK_LE(UserBeg + Size, AllocBeg + AllocationSize);
CHECK_LE(UserBeg + Size, AllocBeg + AllocSize);
Header.State = ChunkAllocated;
Header.AllocType = Type;
if (FromPrimary) {
Header.FromPrimary = FromPrimary;
Header.FromPrimary = 1;
Header.SizeOrUnusedBytes = Size;
} else {
// The secondary fits the allocations to a page, so the amount of unused
@ -424,7 +440,7 @@ struct ScudoAllocator {
if (TrailingBytes)
Header.SizeOrUnusedBytes = PageSize - TrailingBytes;
}
Header.Salt = static_cast<u8>(Salt);
Header.Salt = Salt;
getScudoChunk(UserBeg)->storeHeader(&Header);
void *UserPtr = reinterpret_cast<void *>(UserBeg);
// if (&__sanitizer_malloc_hook) __sanitizer_malloc_hook(UserPtr, Size);
@ -442,15 +458,18 @@ struct ScudoAllocator {
if (BypassQuarantine) {
Chunk->eraseHeader();
void *Ptr = Chunk->getAllocBeg(Header);
ScudoThreadContext *ThreadContext = getThreadContextAndLock();
if (LIKELY(ThreadContext)) {
getBackendAllocator().Deallocate(getAllocatorCache(ThreadContext), Ptr,
FromPrimary);
ThreadContext->unlock();
if (FromPrimary) {
ScudoThreadContext *ThreadContext = getThreadContextAndLock();
if (LIKELY(ThreadContext)) {
getBackendAllocator().deallocatePrimary(
getAllocatorCache(ThreadContext), Ptr);
ThreadContext->unlock();
} else {
SpinMutexLock Lock(&FallbackMutex);
getBackendAllocator().deallocatePrimary(&FallbackAllocatorCache, Ptr);
}
} else {
SpinMutexLock Lock(&FallbackMutex);
getBackendAllocator().Deallocate(&FallbackAllocatorCache, Ptr,
FromPrimary);
getBackendAllocator().deallocateSecondary(Ptr);
}
} else {
UnpackedHeader NewHeader = *Header;
@ -580,7 +599,7 @@ struct ScudoAllocator {
void *calloc(uptr NMemB, uptr Size) {
initThreadMaybe();
if (CheckForCallocOverflow(NMemB, Size))
if (UNLIKELY(CheckForCallocOverflow(NMemB, Size)))
return FailureHandler::OnBadRequest();
return allocate(NMemB * Size, MinAlignment, FromMalloc, true);
}
@ -589,13 +608,13 @@ struct ScudoAllocator {
AllocatorCache *Cache = getAllocatorCache(ThreadContext);
AllocatorQuarantine.Drain(getQuarantineCache(ThreadContext),
QuarantineCallback(Cache));
BackendAllocator.DestroyCache(Cache);
BackendAllocator.destroyCache(Cache);
}
uptr getStats(AllocatorStat StatType) {
initThreadMaybe();
uptr stats[AllocatorStatCount];
BackendAllocator.GetStats(stats);
BackendAllocator.getStats(stats);
return stats[StatType];
}
};
@ -611,7 +630,7 @@ static void initScudoInternal(const AllocatorOptions &Options) {
}
void ScudoThreadContext::init() {
getBackendAllocator().InitCache(&Cache);
getBackendAllocator().initCache(&Cache);
Prng.init();
memset(QuarantineCachePlaceHolder, 0, sizeof(QuarantineCachePlaceHolder));
}
@ -621,7 +640,7 @@ void ScudoThreadContext::commitBack() {
}
void *scudoMalloc(uptr Size, AllocType Type) {
return Instance.allocate(Size, MinAlignment, Type);
return SetErrnoOnNull(Instance.allocate(Size, MinAlignment, Type));
}
void scudoFree(void *Ptr, AllocType Type) {
@ -634,54 +653,56 @@ void scudoSizedFree(void *Ptr, uptr Size, AllocType Type) {
void *scudoRealloc(void *Ptr, uptr Size) {
if (!Ptr)
return Instance.allocate(Size, MinAlignment, FromMalloc);
return SetErrnoOnNull(Instance.allocate(Size, MinAlignment, FromMalloc));
if (Size == 0) {
Instance.deallocate(Ptr, 0, FromMalloc);
return nullptr;
}
return Instance.reallocate(Ptr, Size);
return SetErrnoOnNull(Instance.reallocate(Ptr, Size));
}
void *scudoCalloc(uptr NMemB, uptr Size) {
return Instance.calloc(NMemB, Size);
return SetErrnoOnNull(Instance.calloc(NMemB, Size));
}
void *scudoValloc(uptr Size) {
return Instance.allocate(Size, GetPageSizeCached(), FromMemalign);
return SetErrnoOnNull(
Instance.allocate(Size, GetPageSizeCached(), FromMemalign));
}
void *scudoPvalloc(uptr Size) {
uptr PageSize = GetPageSizeCached();
Size = RoundUpTo(Size, PageSize);
if (Size == 0) {
// pvalloc(0) should allocate one page.
Size = PageSize;
}
return Instance.allocate(Size, PageSize, FromMemalign);
// pvalloc(0) should allocate one page.
Size = Size ? RoundUpTo(Size, PageSize) : PageSize;
return SetErrnoOnNull(Instance.allocate(Size, PageSize, FromMemalign));
}
void *scudoMemalign(uptr Alignment, uptr Size) {
if (UNLIKELY(!IsPowerOfTwo(Alignment)))
if (UNLIKELY(!IsPowerOfTwo(Alignment))) {
errno = errno_EINVAL;
return ScudoAllocator::FailureHandler::OnBadRequest();
return Instance.allocate(Size, Alignment, FromMemalign);
}
return SetErrnoOnNull(Instance.allocate(Size, Alignment, FromMemalign));
}
int scudoPosixMemalign(void **MemPtr, uptr Alignment, uptr Size) {
if (UNLIKELY(!IsPowerOfTwo(Alignment) || (Alignment % sizeof(void *)) != 0)) {
*MemPtr = ScudoAllocator::FailureHandler::OnBadRequest();
return EINVAL;
if (UNLIKELY(!CheckPosixMemalignAlignment(Alignment))) {
ScudoAllocator::FailureHandler::OnBadRequest();
return errno_EINVAL;
}
*MemPtr = Instance.allocate(Size, Alignment, FromMemalign);
if (!*MemPtr)
return ENOMEM;
void *Ptr = Instance.allocate(Size, Alignment, FromMemalign);
if (UNLIKELY(!Ptr))
return errno_ENOMEM;
*MemPtr = Ptr;
return 0;
}
void *scudoAlignedAlloc(uptr Alignment, uptr Size) {
// Alignment must be a power of 2, Size must be a multiple of Alignment.
if (UNLIKELY(!IsPowerOfTwo(Alignment) || (Size & (Alignment - 1)) != 0))
if (UNLIKELY(!CheckAlignedAllocAlignmentAndSize(Alignment, Size))) {
errno = errno_EINVAL;
return ScudoAllocator::FailureHandler::OnBadRequest();
return Instance.allocate(Size, Alignment, FromMalloc);
}
return SetErrnoOnNull(Instance.allocate(Size, Alignment, FromMalloc));
}
uptr scudoMallocUsableSize(void *Ptr) {

View File

@ -23,41 +23,47 @@ template <class PrimaryAllocator, class AllocatorCache,
class SecondaryAllocator>
class ScudoCombinedAllocator {
public:
void Init(s32 ReleaseToOSIntervalMs) {
void init(s32 ReleaseToOSIntervalMs) {
Primary.Init(ReleaseToOSIntervalMs);
Secondary.Init();
Stats.Init();
}
void *Allocate(AllocatorCache *Cache, uptr Size, uptr Alignment,
bool FromPrimary) {
if (FromPrimary)
return Cache->Allocate(&Primary, Primary.ClassID(Size));
// Primary allocations are always MinAlignment aligned, and as such do not
// require an Alignment parameter.
void *allocatePrimary(AllocatorCache *Cache, uptr Size) {
return Cache->Allocate(&Primary, Primary.ClassID(Size));
}
// Secondary allocations do not require a Cache, but do require an Alignment
// parameter.
void *allocateSecondary(uptr Size, uptr Alignment) {
return Secondary.Allocate(&Stats, Size, Alignment);
}
void Deallocate(AllocatorCache *Cache, void *Ptr, bool FromPrimary) {
if (FromPrimary)
Cache->Deallocate(&Primary, Primary.GetSizeClass(Ptr), Ptr);
else
Secondary.Deallocate(&Stats, Ptr);
void deallocatePrimary(AllocatorCache *Cache, void *Ptr) {
Cache->Deallocate(&Primary, Primary.GetSizeClass(Ptr), Ptr);
}
uptr GetActuallyAllocatedSize(void *Ptr, bool FromPrimary) {
void deallocateSecondary(void *Ptr) {
Secondary.Deallocate(&Stats, Ptr);
}
uptr getActuallyAllocatedSize(void *Ptr, bool FromPrimary) {
if (FromPrimary)
return PrimaryAllocator::ClassIdToSize(Primary.GetSizeClass(Ptr));
return Secondary.GetActuallyAllocatedSize(Ptr);
}
void InitCache(AllocatorCache *Cache) {
void initCache(AllocatorCache *Cache) {
Cache->Init(&Stats);
}
void DestroyCache(AllocatorCache *Cache) {
void destroyCache(AllocatorCache *Cache) {
Cache->Destroy(&Primary, &Stats);
}
void GetStats(AllocatorStatCounters StatType) const {
void getStats(AllocatorStatCounters StatType) const {
Stats.Get(StatType);
}

View File

@ -61,20 +61,13 @@
// an exclusive lock; ThreadClock's are private to respective threads and so
// do not need any protection.
//
// Description of ThreadClock state:
// clk_ - fixed size vector clock.
// nclk_ - effective size of the vector clock (the rest is zeros).
// tid_ - index of the thread associated with he clock ("current thread").
// last_acquire_ - current thread time when it acquired something from
// other threads.
//
// Description of SyncClock state:
// clk_ - variable size vector clock, low kClkBits hold timestamp,
// the remaining bits hold "acquired" flag (the actual value is thread's
// reused counter);
// if acquried == thr->reused_, then the respective thread has already
// acquired this clock (except possibly dirty_tids_).
// dirty_tids_ - holds up to two indeces in the vector clock that other threads
// acquired this clock (except possibly for dirty elements).
// dirty_ - holds up to two indeces in the vector clock that other threads
// need to acquire regardless of "acquired" flag value;
// release_store_tid_ - denotes that the clock state is a result of
// release-store operation by the thread with release_store_tid_ index.
@ -90,21 +83,51 @@
namespace __tsan {
static atomic_uint32_t *ref_ptr(ClockBlock *cb) {
return reinterpret_cast<atomic_uint32_t *>(&cb->table[ClockBlock::kRefIdx]);
}
// Drop reference to the first level block idx.
static void UnrefClockBlock(ClockCache *c, u32 idx, uptr blocks) {
ClockBlock *cb = ctx->clock_alloc.Map(idx);
atomic_uint32_t *ref = ref_ptr(cb);
u32 v = atomic_load(ref, memory_order_acquire);
for (;;) {
CHECK_GT(v, 0);
if (v == 1)
break;
if (atomic_compare_exchange_strong(ref, &v, v - 1, memory_order_acq_rel))
return;
}
// First level block owns second level blocks, so them as well.
for (uptr i = 0; i < blocks; i++)
ctx->clock_alloc.Free(c, cb->table[ClockBlock::kBlockIdx - i]);
ctx->clock_alloc.Free(c, idx);
}
ThreadClock::ThreadClock(unsigned tid, unsigned reused)
: tid_(tid)
, reused_(reused + 1) { // 0 has special meaning
, reused_(reused + 1) // 0 has special meaning
, cached_idx_()
, cached_size_()
, cached_blocks_() {
CHECK_LT(tid, kMaxTidInClock);
CHECK_EQ(reused_, ((u64)reused_ << kClkBits) >> kClkBits);
nclk_ = tid_ + 1;
last_acquire_ = 0;
internal_memset(clk_, 0, sizeof(clk_));
clk_[tid_].reused = reused_;
}
void ThreadClock::ResetCached(ClockCache *c) {
if (cached_idx_) {
UnrefClockBlock(c, cached_idx_, cached_blocks_);
cached_idx_ = 0;
cached_size_ = 0;
cached_blocks_ = 0;
}
}
void ThreadClock::acquire(ClockCache *c, const SyncClock *src) {
void ThreadClock::acquire(ClockCache *c, SyncClock *src) {
DCHECK_LE(nclk_, kMaxTid);
DCHECK_LE(src->size_, kMaxTid);
CPP_STAT_INC(StatClockAcquire);
@ -116,50 +139,46 @@ void ThreadClock::acquire(ClockCache *c, const SyncClock *src) {
return;
}
// Check if we've already acquired src after the last release operation on src
bool acquired = false;
if (nclk > tid_) {
if (src->elem(tid_).reused == reused_) {
for (unsigned i = 0; i < kDirtyTids; i++) {
unsigned tid = src->dirty_tids_[i];
if (tid != kInvalidTid) {
u64 epoch = src->elem(tid).epoch;
if (clk_[tid].epoch < epoch) {
clk_[tid].epoch = epoch;
acquired = true;
}
}
for (unsigned i = 0; i < kDirtyTids; i++) {
SyncClock::Dirty dirty = src->dirty_[i];
unsigned tid = dirty.tid;
if (tid != kInvalidTid) {
if (clk_[tid] < dirty.epoch) {
clk_[tid] = dirty.epoch;
acquired = true;
}
if (acquired) {
CPP_STAT_INC(StatClockAcquiredSomething);
last_acquire_ = clk_[tid_].epoch;
}
return;
}
}
// O(N) acquire.
CPP_STAT_INC(StatClockAcquireFull);
nclk_ = max(nclk_, nclk);
for (uptr i = 0; i < nclk; i++) {
u64 epoch = src->elem(i).epoch;
if (clk_[i].epoch < epoch) {
clk_[i].epoch = epoch;
acquired = true;
// Check if we've already acquired src after the last release operation on src
if (tid_ >= nclk || src->elem(tid_).reused != reused_) {
// O(N) acquire.
CPP_STAT_INC(StatClockAcquireFull);
nclk_ = max(nclk_, nclk);
u64 *dst_pos = &clk_[0];
for (ClockElem &src_elem : *src) {
u64 epoch = src_elem.epoch;
if (*dst_pos < epoch) {
*dst_pos = epoch;
acquired = true;
}
dst_pos++;
}
}
// Remember that this thread has acquired this clock.
if (nclk > tid_)
src->elem(tid_).reused = reused_;
// Remember that this thread has acquired this clock.
if (nclk > tid_)
src->elem(tid_).reused = reused_;
}
if (acquired) {
CPP_STAT_INC(StatClockAcquiredSomething);
last_acquire_ = clk_[tid_].epoch;
last_acquire_ = clk_[tid_];
ResetCached(c);
}
}
void ThreadClock::release(ClockCache *c, SyncClock *dst) const {
void ThreadClock::release(ClockCache *c, SyncClock *dst) {
DCHECK_LE(nclk_, kMaxTid);
DCHECK_LE(dst->size_, kMaxTid);
@ -179,7 +198,7 @@ void ThreadClock::release(ClockCache *c, SyncClock *dst) const {
// since the last release on dst. If so, we need to update
// only dst->elem(tid_).
if (dst->elem(tid_).epoch > last_acquire_) {
UpdateCurrentThread(dst);
UpdateCurrentThread(c, dst);
if (dst->release_store_tid_ != tid_ ||
dst->release_store_reused_ != reused_)
dst->release_store_tid_ = kInvalidTid;
@ -188,23 +207,24 @@ void ThreadClock::release(ClockCache *c, SyncClock *dst) const {
// O(N) release.
CPP_STAT_INC(StatClockReleaseFull);
dst->Unshare(c);
// First, remember whether we've acquired dst.
bool acquired = IsAlreadyAcquired(dst);
if (acquired)
CPP_STAT_INC(StatClockReleaseAcquired);
// Update dst->clk_.
for (uptr i = 0; i < nclk_; i++) {
ClockElem &ce = dst->elem(i);
ce.epoch = max(ce.epoch, clk_[i].epoch);
dst->FlushDirty();
uptr i = 0;
for (ClockElem &ce : *dst) {
ce.epoch = max(ce.epoch, clk_[i]);
ce.reused = 0;
i++;
}
// Clear 'acquired' flag in the remaining elements.
if (nclk_ < dst->size_)
CPP_STAT_INC(StatClockReleaseClearTail);
for (uptr i = nclk_; i < dst->size_; i++)
dst->elem(i).reused = 0;
for (unsigned i = 0; i < kDirtyTids; i++)
dst->dirty_tids_[i] = kInvalidTid;
dst->release_store_tid_ = kInvalidTid;
dst->release_store_reused_ = 0;
// If we've acquired dst, remember this fact,
@ -213,11 +233,37 @@ void ThreadClock::release(ClockCache *c, SyncClock *dst) const {
dst->elem(tid_).reused = reused_;
}
void ThreadClock::ReleaseStore(ClockCache *c, SyncClock *dst) const {
void ThreadClock::ReleaseStore(ClockCache *c, SyncClock *dst) {
DCHECK_LE(nclk_, kMaxTid);
DCHECK_LE(dst->size_, kMaxTid);
CPP_STAT_INC(StatClockStore);
if (dst->size_ == 0 && cached_idx_ != 0) {
// Reuse the cached clock.
// Note: we could reuse/cache the cached clock in more cases:
// we could update the existing clock and cache it, or replace it with the
// currently cached clock and release the old one. And for a shared
// existing clock, we could replace it with the currently cached;
// or unshare, update and cache. But, for simplicity, we currnetly reuse
// cached clock only when the target clock is empty.
dst->tab_ = ctx->clock_alloc.Map(cached_idx_);
dst->tab_idx_ = cached_idx_;
dst->size_ = cached_size_;
dst->blocks_ = cached_blocks_;
CHECK_EQ(dst->dirty_[0].tid, kInvalidTid);
// The cached clock is shared (immutable),
// so this is where we store the current clock.
dst->dirty_[0].tid = tid_;
dst->dirty_[0].epoch = clk_[tid_];
dst->release_store_tid_ = tid_;
dst->release_store_reused_ = reused_;
// Rememeber that we don't need to acquire it in future.
dst->elem(tid_).reused = reused_;
// Grab a reference.
atomic_fetch_add(ref_ptr(dst->tab_), 1, memory_order_relaxed);
return;
}
// Check if we need to resize dst.
if (dst->size_ < nclk_)
dst->Resize(c, nclk_);
@ -226,32 +272,41 @@ void ThreadClock::ReleaseStore(ClockCache *c, SyncClock *dst) const {
dst->release_store_reused_ == reused_ &&
dst->elem(tid_).epoch > last_acquire_) {
CPP_STAT_INC(StatClockStoreFast);
UpdateCurrentThread(dst);
UpdateCurrentThread(c, dst);
return;
}
// O(N) release-store.
CPP_STAT_INC(StatClockStoreFull);
for (uptr i = 0; i < nclk_; i++) {
ClockElem &ce = dst->elem(i);
ce.epoch = clk_[i].epoch;
dst->Unshare(c);
// Note: dst can be larger than this ThreadClock.
// This is fine since clk_ beyond size is all zeros.
uptr i = 0;
for (ClockElem &ce : *dst) {
ce.epoch = clk_[i];
ce.reused = 0;
i++;
}
// Clear the tail of dst->clk_.
if (nclk_ < dst->size_) {
for (uptr i = nclk_; i < dst->size_; i++) {
ClockElem &ce = dst->elem(i);
ce.epoch = 0;
ce.reused = 0;
}
CPP_STAT_INC(StatClockStoreTail);
}
for (unsigned i = 0; i < kDirtyTids; i++)
dst->dirty_tids_[i] = kInvalidTid;
for (uptr i = 0; i < kDirtyTids; i++)
dst->dirty_[i].tid = kInvalidTid;
dst->release_store_tid_ = tid_;
dst->release_store_reused_ = reused_;
// Rememeber that we don't need to acquire it in future.
dst->elem(tid_).reused = reused_;
// If the resulting clock is cachable, cache it for future release operations.
// The clock is always cachable if we released to an empty sync object.
if (cached_idx_ == 0 && dst->Cachable()) {
// Grab a reference to the ClockBlock.
atomic_uint32_t *ref = ref_ptr(dst->tab_);
if (atomic_load(ref, memory_order_acquire) == 1)
atomic_store_relaxed(ref, 2);
else
atomic_fetch_add(ref_ptr(dst->tab_), 1, memory_order_relaxed);
cached_idx_ = dst->tab_idx_;
cached_size_ = dst->size_;
cached_blocks_ = dst->blocks_;
}
}
void ThreadClock::acq_rel(ClockCache *c, SyncClock *dst) {
@ -261,37 +316,36 @@ void ThreadClock::acq_rel(ClockCache *c, SyncClock *dst) {
}
// Updates only single element related to the current thread in dst->clk_.
void ThreadClock::UpdateCurrentThread(SyncClock *dst) const {
void ThreadClock::UpdateCurrentThread(ClockCache *c, SyncClock *dst) const {
// Update the threads time, but preserve 'acquired' flag.
dst->elem(tid_).epoch = clk_[tid_].epoch;
for (unsigned i = 0; i < kDirtyTids; i++) {
if (dst->dirty_tids_[i] == tid_) {
SyncClock::Dirty *dirty = &dst->dirty_[i];
const unsigned tid = dirty->tid;
if (tid == tid_ || tid == kInvalidTid) {
CPP_STAT_INC(StatClockReleaseFast);
return;
}
if (dst->dirty_tids_[i] == kInvalidTid) {
CPP_STAT_INC(StatClockReleaseFast);
dst->dirty_tids_[i] = tid_;
dirty->tid = tid_;
dirty->epoch = clk_[tid_];
return;
}
}
// Reset all 'acquired' flags, O(N).
// We are going to touch dst elements, so we need to unshare it.
dst->Unshare(c);
CPP_STAT_INC(StatClockReleaseSlow);
dst->elem(tid_).epoch = clk_[tid_];
for (uptr i = 0; i < dst->size_; i++)
dst->elem(i).reused = 0;
for (unsigned i = 0; i < kDirtyTids; i++)
dst->dirty_tids_[i] = kInvalidTid;
dst->FlushDirty();
}
// Checks whether the current threads has already acquired src.
// Checks whether the current thread has already acquired src.
bool ThreadClock::IsAlreadyAcquired(const SyncClock *src) const {
if (src->elem(tid_).reused != reused_)
return false;
for (unsigned i = 0; i < kDirtyTids; i++) {
unsigned tid = src->dirty_tids_[i];
if (tid != kInvalidTid) {
if (clk_[tid].epoch < src->elem(tid).epoch)
SyncClock::Dirty dirty = src->dirty_[i];
if (dirty.tid != kInvalidTid) {
if (clk_[dirty.tid] < dirty.epoch)
return false;
}
}
@ -302,22 +356,19 @@ bool ThreadClock::IsAlreadyAcquired(const SyncClock *src) const {
// This function is called only from weird places like AcquireGlobal.
void ThreadClock::set(ClockCache *c, unsigned tid, u64 v) {
DCHECK_LT(tid, kMaxTid);
DCHECK_GE(v, clk_[tid].epoch);
clk_[tid].epoch = v;
DCHECK_GE(v, clk_[tid]);
clk_[tid] = v;
if (nclk_ <= tid)
nclk_ = tid + 1;
last_acquire_ = clk_[tid_].epoch;
last_acquire_ = clk_[tid_];
ResetCached(c);
}
void ThreadClock::DebugDump(int(*printf)(const char *s, ...)) {
printf("clock=[");
for (uptr i = 0; i < nclk_; i++)
printf("%s%llu", i == 0 ? "" : ",", clk_[i].epoch);
printf("] reused=[");
for (uptr i = 0; i < nclk_; i++)
printf("%s%llu", i == 0 ? "" : ",", clk_[i].reused);
printf("] tid=%u/%u last_acq=%llu",
tid_, reused_, last_acquire_);
printf("%s%llu", i == 0 ? "" : ",", clk_[i]);
printf("] tid=%u/%u last_acq=%llu", tid_, reused_, last_acquire_);
}
SyncClock::SyncClock() {
@ -327,22 +378,14 @@ SyncClock::SyncClock() {
SyncClock::~SyncClock() {
// Reset must be called before dtor.
CHECK_EQ(size_, 0);
CHECK_EQ(blocks_, 0);
CHECK_EQ(tab_, 0);
CHECK_EQ(tab_idx_, 0);
}
void SyncClock::Reset(ClockCache *c) {
if (size_ == 0) {
// nothing
} else if (size_ <= ClockBlock::kClockCount) {
// One-level table.
ctx->clock_alloc.Free(c, tab_idx_);
} else {
// Two-level table.
for (uptr i = 0; i < size_; i += ClockBlock::kClockCount)
ctx->clock_alloc.Free(c, tab_->table[i / ClockBlock::kClockCount]);
ctx->clock_alloc.Free(c, tab_idx_);
}
if (size_)
UnrefClockBlock(c, tab_idx_, blocks_);
ResetImpl();
}
@ -350,66 +393,171 @@ void SyncClock::ResetImpl() {
tab_ = 0;
tab_idx_ = 0;
size_ = 0;
blocks_ = 0;
release_store_tid_ = kInvalidTid;
release_store_reused_ = 0;
for (uptr i = 0; i < kDirtyTids; i++)
dirty_tids_[i] = kInvalidTid;
dirty_[i].tid = kInvalidTid;
}
void SyncClock::Resize(ClockCache *c, uptr nclk) {
CPP_STAT_INC(StatClockReleaseResize);
if (RoundUpTo(nclk, ClockBlock::kClockCount) <=
RoundUpTo(size_, ClockBlock::kClockCount)) {
// Growing within the same block.
Unshare(c);
if (nclk <= capacity()) {
// Memory is already allocated, just increase the size.
size_ = nclk;
return;
}
if (nclk <= ClockBlock::kClockCount) {
if (size_ == 0) {
// Grow from 0 to one-level table.
CHECK_EQ(size_, 0);
CHECK_EQ(blocks_, 0);
CHECK_EQ(tab_, 0);
CHECK_EQ(tab_idx_, 0);
size_ = nclk;
tab_idx_ = ctx->clock_alloc.Alloc(c);
tab_ = ctx->clock_alloc.Map(tab_idx_);
internal_memset(tab_, 0, sizeof(*tab_));
return;
atomic_store_relaxed(ref_ptr(tab_), 1);
size_ = 1;
} else if (size_ > blocks_ * ClockBlock::kClockCount) {
u32 idx = ctx->clock_alloc.Alloc(c);
ClockBlock *new_cb = ctx->clock_alloc.Map(idx);
uptr top = size_ - blocks_ * ClockBlock::kClockCount;
CHECK_LT(top, ClockBlock::kClockCount);
const uptr move = top * sizeof(tab_->clock[0]);
internal_memcpy(&new_cb->clock[0], tab_->clock, move);
internal_memset(&new_cb->clock[top], 0, sizeof(*new_cb) - move);
internal_memset(tab_->clock, 0, move);
append_block(idx);
}
// Growing two-level table.
if (size_ == 0) {
// Allocate first level table.
tab_idx_ = ctx->clock_alloc.Alloc(c);
tab_ = ctx->clock_alloc.Map(tab_idx_);
internal_memset(tab_, 0, sizeof(*tab_));
} else if (size_ <= ClockBlock::kClockCount) {
// Transform one-level table to two-level table.
u32 old = tab_idx_;
tab_idx_ = ctx->clock_alloc.Alloc(c);
tab_ = ctx->clock_alloc.Map(tab_idx_);
internal_memset(tab_, 0, sizeof(*tab_));
tab_->table[0] = old;
}
// At this point we have first level table allocated.
// At this point we have first level table allocated and all clock elements
// are evacuated from it to a second level block.
// Add second level tables as necessary.
for (uptr i = RoundUpTo(size_, ClockBlock::kClockCount);
i < nclk; i += ClockBlock::kClockCount) {
while (nclk > capacity()) {
u32 idx = ctx->clock_alloc.Alloc(c);
ClockBlock *cb = ctx->clock_alloc.Map(idx);
internal_memset(cb, 0, sizeof(*cb));
CHECK_EQ(tab_->table[i/ClockBlock::kClockCount], 0);
tab_->table[i/ClockBlock::kClockCount] = idx;
append_block(idx);
}
size_ = nclk;
}
ClockElem &SyncClock::elem(unsigned tid) const {
// Flushes all dirty elements into the main clock array.
void SyncClock::FlushDirty() {
for (unsigned i = 0; i < kDirtyTids; i++) {
Dirty *dirty = &dirty_[i];
if (dirty->tid != kInvalidTid) {
CHECK_LT(dirty->tid, size_);
elem(dirty->tid).epoch = dirty->epoch;
dirty->tid = kInvalidTid;
}
}
}
bool SyncClock::IsShared() const {
if (size_ == 0)
return false;
atomic_uint32_t *ref = ref_ptr(tab_);
u32 v = atomic_load(ref, memory_order_acquire);
CHECK_GT(v, 0);
return v > 1;
}
// Unshares the current clock if it's shared.
// Shared clocks are immutable, so they need to be unshared before any updates.
// Note: this does not apply to dirty entries as they are not shared.
void SyncClock::Unshare(ClockCache *c) {
if (!IsShared())
return;
// First, copy current state into old.
SyncClock old;
old.tab_ = tab_;
old.tab_idx_ = tab_idx_;
old.size_ = size_;
old.blocks_ = blocks_;
old.release_store_tid_ = release_store_tid_;
old.release_store_reused_ = release_store_reused_;
for (unsigned i = 0; i < kDirtyTids; i++)
old.dirty_[i] = dirty_[i];
// Then, clear current object.
ResetImpl();
// Allocate brand new clock in the current object.
Resize(c, old.size_);
// Now copy state back into this object.
Iter old_iter(&old);
for (ClockElem &ce : *this) {
ce = *old_iter;
++old_iter;
}
release_store_tid_ = old.release_store_tid_;
release_store_reused_ = old.release_store_reused_;
for (unsigned i = 0; i < kDirtyTids; i++)
dirty_[i] = old.dirty_[i];
// Drop reference to old and delete if necessary.
old.Reset(c);
}
// Can we cache this clock for future release operations?
ALWAYS_INLINE bool SyncClock::Cachable() const {
if (size_ == 0)
return false;
for (unsigned i = 0; i < kDirtyTids; i++) {
if (dirty_[i].tid != kInvalidTid)
return false;
}
return atomic_load_relaxed(ref_ptr(tab_)) == 1;
}
// elem linearizes the two-level structure into linear array.
// Note: this is used only for one time accesses, vector operations use
// the iterator as it is much faster.
ALWAYS_INLINE ClockElem &SyncClock::elem(unsigned tid) const {
DCHECK_LT(tid, size_);
if (size_ <= ClockBlock::kClockCount)
const uptr block = tid / ClockBlock::kClockCount;
DCHECK_LE(block, blocks_);
tid %= ClockBlock::kClockCount;
if (block == blocks_)
return tab_->clock[tid];
u32 idx = tab_->table[tid / ClockBlock::kClockCount];
u32 idx = get_block(block);
ClockBlock *cb = ctx->clock_alloc.Map(idx);
return cb->clock[tid % ClockBlock::kClockCount];
return cb->clock[tid];
}
ALWAYS_INLINE uptr SyncClock::capacity() const {
if (size_ == 0)
return 0;
uptr ratio = sizeof(ClockBlock::clock[0]) / sizeof(ClockBlock::table[0]);
// How many clock elements we can fit into the first level block.
// +1 for ref counter.
uptr top = ClockBlock::kClockCount - RoundUpTo(blocks_ + 1, ratio) / ratio;
return blocks_ * ClockBlock::kClockCount + top;
}
ALWAYS_INLINE u32 SyncClock::get_block(uptr bi) const {
DCHECK(size_);
DCHECK_LT(bi, blocks_);
return tab_->table[ClockBlock::kBlockIdx - bi];
}
ALWAYS_INLINE void SyncClock::append_block(u32 idx) {
uptr bi = blocks_++;
CHECK_EQ(get_block(bi), 0);
tab_->table[ClockBlock::kBlockIdx - bi] = idx;
}
// Used only by tests.
u64 SyncClock::get(unsigned tid) const {
for (unsigned i = 0; i < kDirtyTids; i++) {
Dirty dirty = dirty_[i];
if (dirty.tid == tid)
return dirty.epoch;
}
return elem(tid).epoch;
}
// Used only by Iter test.
u64 SyncClock::get_clean(unsigned tid) const {
return elem(tid).epoch;
}
void SyncClock::DebugDump(int(*printf)(const char *s, ...)) {
@ -419,8 +567,32 @@ void SyncClock::DebugDump(int(*printf)(const char *s, ...)) {
printf("] reused=[");
for (uptr i = 0; i < size_; i++)
printf("%s%llu", i == 0 ? "" : ",", elem(i).reused);
printf("] release_store_tid=%d/%d dirty_tids=%d/%d",
printf("] release_store_tid=%d/%d dirty_tids=%d[%llu]/%d[%llu]",
release_store_tid_, release_store_reused_,
dirty_tids_[0], dirty_tids_[1]);
dirty_[0].tid, dirty_[0].epoch,
dirty_[1].tid, dirty_[1].epoch);
}
void SyncClock::Iter::Next() {
// Finished with the current block, move on to the next one.
block_++;
if (block_ < parent_->blocks_) {
// Iterate over the next second level block.
u32 idx = parent_->get_block(block_);
ClockBlock *cb = ctx->clock_alloc.Map(idx);
pos_ = &cb->clock[0];
end_ = pos_ + min(parent_->size_ - block_ * ClockBlock::kClockCount,
ClockBlock::kClockCount);
return;
}
if (block_ == parent_->blocks_ &&
parent_->size_ > parent_->blocks_ * ClockBlock::kClockCount) {
// Iterate over elements in the first level block.
pos_ = &parent_->tab_->clock[0];
end_ = pos_ + min(parent_->size_ - block_ * ClockBlock::kClockCount,
ClockBlock::kClockCount);
return;
}
parent_ = nullptr; // denotes end
}
} // namespace __tsan

View File

@ -18,25 +18,6 @@
namespace __tsan {
struct ClockElem {
u64 epoch : kClkBits;
u64 reused : 64 - kClkBits;
};
struct ClockBlock {
static const uptr kSize = 512;
static const uptr kTableSize = kSize / sizeof(u32);
static const uptr kClockCount = kSize / sizeof(ClockElem);
union {
u32 table[kTableSize];
ClockElem clock[kClockCount];
};
ClockBlock() {
}
};
typedef DenseSlabAlloc<ClockBlock, 1<<16, 1<<10> ClockAlloc;
typedef DenseSlabAllocCache ClockCache;
@ -46,69 +27,117 @@ class SyncClock {
SyncClock();
~SyncClock();
uptr size() const {
return size_;
}
uptr size() const;
u64 get(unsigned tid) const {
return elem(tid).epoch;
}
// These are used only in tests.
u64 get(unsigned tid) const;
u64 get_clean(unsigned tid) const;
void Resize(ClockCache *c, uptr nclk);
void Reset(ClockCache *c);
void DebugDump(int(*printf)(const char *s, ...));
// Clock element iterator.
// Note: it iterates only over the table without regard to dirty entries.
class Iter {
public:
explicit Iter(SyncClock* parent);
Iter& operator++();
bool operator!=(const Iter& other);
ClockElem &operator*();
private:
SyncClock *parent_;
// [pos_, end_) is the current continuous range of clock elements.
ClockElem *pos_;
ClockElem *end_;
int block_; // Current number of second level block.
NOINLINE void Next();
};
Iter begin();
Iter end();
private:
friend struct ThreadClock;
friend class ThreadClock;
friend class Iter;
static const uptr kDirtyTids = 2;
struct Dirty {
u64 epoch : kClkBits;
u64 tid : 64 - kClkBits; // kInvalidId if not active
};
unsigned release_store_tid_;
unsigned release_store_reused_;
unsigned dirty_tids_[kDirtyTids];
// tab_ contains indirect pointer to a 512b block using DenseSlabAlloc.
// If size_ <= 64, then tab_ points to an array with 64 ClockElem's.
// Otherwise, tab_ points to an array with 128 u32 elements,
Dirty dirty_[kDirtyTids];
// If size_ is 0, tab_ is nullptr.
// If size <= 64 (kClockCount), tab_ contains pointer to an array with
// 64 ClockElem's (ClockBlock::clock).
// Otherwise, tab_ points to an array with up to 127 u32 elements,
// each pointing to the second-level 512b block with 64 ClockElem's.
// Unused space in the first level ClockBlock is used to store additional
// clock elements.
// The last u32 element in the first level ClockBlock is always used as
// reference counter.
//
// See the following scheme for details.
// All memory blocks are 512 bytes (allocated from ClockAlloc).
// Clock (clk) elements are 64 bits.
// Idx and ref are 32 bits.
//
// tab_
// |
// \/
// +----------------------------------------------------+
// | clk128 | clk129 | ...unused... | idx1 | idx0 | ref |
// +----------------------------------------------------+
// | |
// | \/
// | +----------------+
// | | clk0 ... clk63 |
// | +----------------+
// \/
// +------------------+
// | clk64 ... clk127 |
// +------------------+
//
// Note: dirty entries, if active, always override what's stored in the clock.
ClockBlock *tab_;
u32 tab_idx_;
u32 size_;
u16 size_;
u16 blocks_; // Number of second level blocks.
void Unshare(ClockCache *c);
bool IsShared() const;
bool Cachable() const;
void ResetImpl();
void FlushDirty();
uptr capacity() const;
u32 get_block(uptr bi) const;
void append_block(u32 idx);
ClockElem &elem(unsigned tid) const;
};
// The clock that lives in threads.
struct ThreadClock {
class ThreadClock {
public:
typedef DenseSlabAllocCache Cache;
explicit ThreadClock(unsigned tid, unsigned reused = 0);
u64 get(unsigned tid) const {
DCHECK_LT(tid, kMaxTidInClock);
return clk_[tid].epoch;
}
u64 get(unsigned tid) const;
void set(ClockCache *c, unsigned tid, u64 v);
void set(u64 v);
void tick();
uptr size() const;
void set(u64 v) {
DCHECK_GE(v, clk_[tid_].epoch);
clk_[tid_].epoch = v;
}
void tick() {
clk_[tid_].epoch++;
}
uptr size() const {
return nclk_;
}
void acquire(ClockCache *c, const SyncClock *src);
void release(ClockCache *c, SyncClock *dst) const;
void acquire(ClockCache *c, SyncClock *src);
void release(ClockCache *c, SyncClock *dst);
void acq_rel(ClockCache *c, SyncClock *dst);
void ReleaseStore(ClockCache *c, SyncClock *dst) const;
void ReleaseStore(ClockCache *c, SyncClock *dst);
void ResetCached(ClockCache *c);
void DebugReset();
@ -116,16 +145,82 @@ struct ThreadClock {
private:
static const uptr kDirtyTids = SyncClock::kDirtyTids;
// Index of the thread associated with he clock ("current thread").
const unsigned tid_;
const unsigned reused_;
const unsigned reused_; // tid_ reuse count.
// Current thread time when it acquired something from other threads.
u64 last_acquire_;
// Cached SyncClock (without dirty entries and release_store_tid_).
// We reuse it for subsequent store-release operations without intervening
// acquire operations. Since it is shared (and thus constant), clock value
// for the current thread is then stored in dirty entries in the SyncClock.
// We host a refernece to the table while it is cached here.
u32 cached_idx_;
u16 cached_size_;
u16 cached_blocks_;
// Number of active elements in the clk_ table (the rest is zeros).
uptr nclk_;
ClockElem clk_[kMaxTidInClock];
u64 clk_[kMaxTidInClock]; // Fixed size vector clock.
bool IsAlreadyAcquired(const SyncClock *src) const;
void UpdateCurrentThread(SyncClock *dst) const;
void UpdateCurrentThread(ClockCache *c, SyncClock *dst) const;
};
ALWAYS_INLINE u64 ThreadClock::get(unsigned tid) const {
DCHECK_LT(tid, kMaxTidInClock);
return clk_[tid];
}
ALWAYS_INLINE void ThreadClock::set(u64 v) {
DCHECK_GE(v, clk_[tid_]);
clk_[tid_] = v;
}
ALWAYS_INLINE void ThreadClock::tick() {
clk_[tid_]++;
}
ALWAYS_INLINE uptr ThreadClock::size() const {
return nclk_;
}
ALWAYS_INLINE SyncClock::Iter SyncClock::begin() {
return Iter(this);
}
ALWAYS_INLINE SyncClock::Iter SyncClock::end() {
return Iter(nullptr);
}
ALWAYS_INLINE uptr SyncClock::size() const {
return size_;
}
ALWAYS_INLINE SyncClock::Iter::Iter(SyncClock* parent)
: parent_(parent)
, pos_(nullptr)
, end_(nullptr)
, block_(-1) {
if (parent)
Next();
}
ALWAYS_INLINE SyncClock::Iter& SyncClock::Iter::operator++() {
pos_++;
if (UNLIKELY(pos_ >= end_))
Next();
return *this;
}
ALWAYS_INLINE bool SyncClock::Iter::operator!=(const SyncClock::Iter& other) {
return parent_ != other.parent_;
}
ALWAYS_INLINE ClockElem &SyncClock::Iter::operator*() {
return *pos_;
}
} // namespace __tsan
#endif // TSAN_CLOCK_H

View File

@ -38,15 +38,40 @@
namespace __tsan {
const int kClkBits = 42;
const unsigned kMaxTidReuse = (1 << (64 - kClkBits)) - 1;
struct ClockElem {
u64 epoch : kClkBits;
u64 reused : 64 - kClkBits; // tid reuse count
};
struct ClockBlock {
static const uptr kSize = 512;
static const uptr kTableSize = kSize / sizeof(u32);
static const uptr kClockCount = kSize / sizeof(ClockElem);
static const uptr kRefIdx = kTableSize - 1;
static const uptr kBlockIdx = kTableSize - 2;
union {
u32 table[kTableSize];
ClockElem clock[kClockCount];
};
ClockBlock() {
}
};
const int kTidBits = 13;
const unsigned kMaxTid = 1 << kTidBits;
// Reduce kMaxTid by kClockCount because one slot in ClockBlock table is
// occupied by reference counter, so total number of elements we can store
// in SyncClock is kClockCount * (kTableSize - 1).
const unsigned kMaxTid = (1 << kTidBits) - ClockBlock::kClockCount;
#if !SANITIZER_GO
const unsigned kMaxTidInClock = kMaxTid * 2; // This includes msb 'freed' bit.
#else
const unsigned kMaxTidInClock = kMaxTid; // Go does not track freed memory.
#endif
const int kClkBits = 42;
const unsigned kMaxTidReuse = (1 << (64 - kClkBits)) - 1;
const uptr kShadowStackSize = 64 * 1024;
// Count of shadow values in a shadow cell.
@ -74,7 +99,7 @@ const bool kCollectHistory = false;
const bool kCollectHistory = true;
#endif
const unsigned kInvalidTid = (unsigned)-1;
const u16 kInvalidTid = kMaxTid + 1;
// The following "build consistency" machinery ensures that all source files
// are built in the same configuration. Inconsistent builds lead to

View File

@ -10,6 +10,7 @@
// This file is a part of ThreadSanitizer (TSan), a race detector.
//
//===----------------------------------------------------------------------===//
#include "sanitizer_common/sanitizer_allocator_checks.h"
#include "sanitizer_common/sanitizer_allocator_interface.h"
#include "sanitizer_common/sanitizer_common.h"
#include "sanitizer_common/sanitizer_placement_new.h"

View File

@ -286,7 +286,7 @@ void InitializePlatform() {
int ExtractResolvFDs(void *state, int *fds, int nfd) {
#if SANITIZER_LINUX && !SANITIZER_ANDROID
int cnt = 0;
__res_state *statp = (__res_state*)state;
struct __res_state *statp = (struct __res_state*)state;
for (int i = 0; i < MAXNS && cnt < nfd; i++) {
if (statp->_u._ext.nsaddrs[i] && statp->_u._ext.nssocks[i] != -1)
fds[cnt++] = statp->_u._ext.nssocks[i];

View File

@ -53,6 +53,31 @@ TEST(Clock, ChunkedBasic) {
chunked.Reset(&cache);
}
static const uptr interesting_sizes[] = {0, 1, 2, 30, 61, 62, 63, 64, 65, 66,
100, 124, 125, 126, 127, 128, 129, 130, 188, 189, 190, 191, 192, 193, 254,
255};
TEST(Clock, Iter) {
const uptr n = ARRAY_SIZE(interesting_sizes);
for (uptr fi = 0; fi < n; fi++) {
const uptr size = interesting_sizes[fi];
SyncClock sync;
ThreadClock vector(0);
for (uptr i = 0; i < size; i++)
vector.set(&cache, i, i + 1);
if (size != 0)
vector.release(&cache, &sync);
uptr i = 0;
for (ClockElem &ce : sync) {
ASSERT_LT(i, size);
ASSERT_EQ(sync.get_clean(i), ce.epoch);
i++;
}
ASSERT_EQ(i, size);
sync.Reset(&cache);
}
}
TEST(Clock, AcquireRelease) {
ThreadClock vector1(100);
vector1.tick();
@ -216,13 +241,11 @@ TEST(Clock, Growth) {
TEST(Clock, Growth2) {
// Test clock growth for every pair of sizes:
const uptr sizes[] = {0, 1, 2, 30, 61, 62, 63, 64, 65, 66, 100, 124, 125, 126,
127, 128, 129, 130, 188, 189, 190, 191, 192, 193, 254, 255};
const uptr n = sizeof(sizes) / sizeof(sizes[0]);
const uptr n = ARRAY_SIZE(interesting_sizes);
for (uptr fi = 0; fi < n; fi++) {
for (uptr ti = fi + 1; ti < n; ti++) {
const uptr from = sizes[fi];
const uptr to = sizes[ti];
const uptr from = interesting_sizes[fi];
const uptr to = interesting_sizes[ti];
SyncClock sync;
ThreadClock vector(0);
for (uptr i = 0; i < from; i++)

View File

@ -573,14 +573,19 @@ static void handlePointerOverflowImpl(PointerOverflowData *Data,
ScopedReport R(Opts, Loc, ET);
if ((sptr(Base) >= 0) == (sptr(Result) >= 0))
Diag(Loc, DL_Error, "unsigned pointer index expression result is %0, "
"preceding its base %1")
<< (void *)Result << (void *)Base;
else
if ((sptr(Base) >= 0) == (sptr(Result) >= 0)) {
if (Base > Result)
Diag(Loc, DL_Error, "addition of unsigned offset to %0 overflowed to %1")
<< (void *)Base << (void *)Result;
else
Diag(Loc, DL_Error,
"subtraction of unsigned offset from %0 overflowed to %1")
<< (void *)Base << (void *)Result;
} else {
Diag(Loc, DL_Error,
"pointer index expression with base %0 overflowed to %1")
<< (void *)Base << (void *)Result;
}
}
void __ubsan::__ubsan_handle_pointer_overflow(PointerOverflowData *Data,

View File

@ -36,10 +36,13 @@
// RUN: %env_asan_opts=allocator_may_return_null=1 %run %t new-nothrow 2>&1 \
// RUN: | FileCheck %s --check-prefix=CHECK-nnNULL
// UNSUPPORTED: win32
#include <assert.h>
#include <string.h>
#include <errno.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <limits>
#include <new>
@ -84,6 +87,8 @@ int main(int argc, char **argv) {
assert(0);
}
fprintf(stderr, "errno: %d\n", errno);
// The NULL pointer is printed differently on different systems, while (long)0
// is always the same.
fprintf(stderr, "x: %lx\n", (long)x);
@ -108,14 +113,19 @@ int main(int argc, char **argv) {
// CHECK-nnCRASH: AddressSanitizer's allocator is terminating the process
// CHECK-mNULL: malloc:
// CHECK-mNULL: errno: 12
// CHECK-mNULL: x: 0
// CHECK-cNULL: calloc:
// CHECK-cNULL: errno: 12
// CHECK-cNULL: x: 0
// CHECK-coNULL: calloc-overflow:
// CHECK-coNULL: errno: 12
// CHECK-coNULL: x: 0
// CHECK-rNULL: realloc:
// CHECK-rNULL: errno: 12
// CHECK-rNULL: x: 0
// CHECK-mrNULL: realloc-after-malloc:
// CHECK-mrNULL: errno: 12
// CHECK-mrNULL: x: 0
// CHECK-nnNULL: new-nothrow:
// CHECK-nnNULL: x: 0

View File

@ -37,9 +37,10 @@
// RUN: | FileCheck %s --check-prefix=CHECK-nnNULL
#include <assert.h>
#include <string.h>
#include <errno.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <limits>
#include <new>
@ -86,6 +87,8 @@ int main(int argc, char **argv) {
assert(0);
}
fprintf(stderr, "errno: %d\n", errno);
// The NULL pointer is printed differently on different systems, while (long)0
// is always the same.
fprintf(stderr, "x: %zu\n", (size_t)x);
@ -110,14 +113,19 @@ int main(int argc, char **argv) {
// CHECK-nnCRASH: Sanitizer's allocator is terminating the process
// CHECK-mNULL: malloc:
// CHECK-mNULL: errno: 12
// CHECK-mNULL: x: 0
// CHECK-cNULL: calloc:
// CHECK-cNULL: errno: 12
// CHECK-cNULL: x: 0
// CHECK-coNULL: calloc-overflow:
// CHECK-coNULL: errno: 12
// CHECK-coNULL: x: 0
// CHECK-rNULL: realloc:
// CHECK-rNULL: errno: 12
// CHECK-rNULL: x: 0
// CHECK-mrNULL: realloc-after-malloc:
// CHECK-mrNULL: errno: 12
// CHECK-mrNULL: x: 0
// CHECK-nnNULL: new-nothrow:
// CHECK-nnNULL: x: 0

View File

@ -36,11 +36,13 @@
// RUN: MSAN_OPTIONS=allocator_may_return_null=1 %run %t new-nothrow 2>&1 \
// RUN: | FileCheck %s --check-prefix=CHECK-nnNULL
// UNSUPPORTED: win32
#include <assert.h>
#include <string.h>
#include <errno.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <limits>
#include <new>
@ -85,6 +87,8 @@ int main(int argc, char **argv) {
assert(0);
}
fprintf(stderr, "errno: %d\n", errno);
// The NULL pointer is printed differently on different systems, while (long)0
// is always the same.
fprintf(stderr, "x: %lx\n", (long)x);
@ -109,14 +113,19 @@ int main(int argc, char **argv) {
// CHECK-nnCRASH: MemorySanitizer's allocator is terminating the process
// CHECK-mNULL: malloc:
// CHECK-mNULL: errno: 12
// CHECK-mNULL: x: 0
// CHECK-cNULL: calloc:
// CHECK-cNULL: errno: 12
// CHECK-cNULL: x: 0
// CHECK-coNULL: calloc-overflow:
// CHECK-coNULL: errno: 12
// CHECK-coNULL: x: 0
// CHECK-rNULL: realloc:
// CHECK-rNULL: errno: 12
// CHECK-rNULL: x: 0
// CHECK-mrNULL: realloc-after-malloc:
// CHECK-mrNULL: errno: 12
// CHECK-mrNULL: x: 0
// CHECK-nnNULL: new-nothrow:
// CHECK-nnNULL: x: 0

View File

@ -65,15 +65,15 @@ int main(int argc, char **argv)
// Size is not a multiple of alignment.
p = aligned_alloc(alignment, size >> 1);
assert(!p);
p = (void *)0x42UL;
void *p_unchanged = (void *)0x42UL;
p = p_unchanged;
// Alignment is not a power of 2.
err = posix_memalign(&p, 3, size);
assert(!p);
assert(p == p_unchanged);
assert(err == EINVAL);
p = (void *)0x42UL;
// Alignment is a power of 2, but not a multiple of size(void *).
err = posix_memalign(&p, 2, size);
assert(!p);
assert(p == p_unchanged);
assert(err == EINVAL);
}
return 0;

View File

@ -5,6 +5,8 @@
// RUN: %clangxx_tsan -O1 %s -o %t
// RUN: llvm-objdump -d %t | FileCheck %s
// REQUIRES: compiler-rt-optimized
int main() {
return 0;
}

View File

@ -1,13 +1,20 @@
// RUN: %clangxx -fsanitize=pointer-overflow %s -o %t
// RUN: %clangxx -std=c++11 -fsanitize=pointer-overflow %s -o %t
// RUN: %t 2>&1 | FileCheck %s
int main(int argc, char *argv[]) {
char c;
char *p = &c;
unsigned long long offset = -1;
unsigned long long neg_1 = -1;
// CHECK: unsigned-index-expression.cpp:[[@LINE+1]]:15: runtime error: unsigned pointer index expression result is 0x{{.*}}, preceding its base 0x{{.*}}
char *q = p + offset;
// CHECK: unsigned-index-expression.cpp:[[@LINE+1]]:15: runtime error: addition of unsigned offset to 0x{{.*}} overflowed to 0x{{.*}}
char *q = p + neg_1;
// CHECK: unsigned-index-expression.cpp:[[@LINE+1]]:16: runtime error: subtraction of unsigned offset from 0x{{.*}} overflowed to 0x{{.*}}
char *q1 = p - neg_1;
// CHECK: unsigned-index-expression.cpp:[[@LINE+2]]:16: runtime error: pointer index expression with base 0x{{0*}} overflowed to 0x{{.*}}
char *n = nullptr;
char *q2 = n - 1ULL;
return 0;
}