Vendor import of compiler-rt trunk r305575:

https://llvm.org/svn/llvm-project/compiler-rt/trunk@305575
This commit is contained in:
Dimitry Andric 2017-06-16 21:03:53 +00:00
parent 7edd24de96
commit 4658ff5fee
30 changed files with 380 additions and 49 deletions

View File

@ -235,6 +235,8 @@ struct Allocator {
AllocatorCache fallback_allocator_cache;
QuarantineCache fallback_quarantine_cache;
atomic_uint8_t rss_limit_exceeded;
// ------------------- Options --------------------------
atomic_uint16_t min_redzone;
atomic_uint16_t max_redzone;
@ -268,6 +270,14 @@ struct Allocator {
SharedInitCode(options);
}
bool RssLimitExceeded() {
return atomic_load(&rss_limit_exceeded, memory_order_relaxed);
}
void SetRssLimitExceeded(bool limit_exceeded) {
atomic_store(&rss_limit_exceeded, limit_exceeded, memory_order_relaxed);
}
void RePoisonChunk(uptr chunk) {
// This could be a user-facing chunk (with redzones), or some internal
// housekeeping chunk, like TransferBatch. Start by assuming the former.
@ -363,6 +373,8 @@ struct Allocator {
AllocType alloc_type, bool can_fill) {
if (UNLIKELY(!asan_inited))
AsanInitFromRtl();
if (RssLimitExceeded())
return allocator.ReturnNullOrDieOnOOM();
Flags &fl = *flags();
CHECK(stack);
const uptr min_alignment = SHADOW_GRANULARITY;
@ -400,16 +412,15 @@ struct Allocator {
AsanThread *t = GetCurrentThread();
void *allocated;
bool check_rss_limit = true;
if (t) {
AllocatorCache *cache = GetAllocatorCache(&t->malloc_storage());
allocated =
allocator.Allocate(cache, needed_size, 8, false, check_rss_limit);
allocator.Allocate(cache, needed_size, 8, false);
} else {
SpinMutexLock l(&fallback_mutex);
AllocatorCache *cache = &fallback_allocator_cache;
allocated =
allocator.Allocate(cache, needed_size, 8, false, check_rss_limit);
allocator.Allocate(cache, needed_size, 8, false);
}
if (!allocated) return allocator.ReturnNullOrDieOnOOM();
@ -866,8 +877,8 @@ void asan_mz_force_unlock() {
instance.ForceUnlock();
}
void AsanSoftRssLimitExceededCallback(bool exceeded) {
instance.allocator.SetRssLimitIsExceeded(exceeded);
void AsanSoftRssLimitExceededCallback(bool limit_exceeded) {
instance.SetRssLimitExceeded(limit_exceeded);
}
} // namespace __asan

View File

@ -43,12 +43,12 @@ class CombinedAllocator {
}
void *Allocate(AllocatorCache *cache, uptr size, uptr alignment,
bool cleared = false, bool check_rss_limit = false) {
bool cleared = false) {
// Returning 0 on malloc(0) may break a lot of code.
if (size == 0)
size = 1;
if (size + alignment < size) return ReturnNullOrDieOnBadRequest();
if (check_rss_limit && RssLimitIsExceeded()) return ReturnNullOrDieOnOOM();
if (size + alignment < size)
return ReturnNullOrDieOnBadRequest();
uptr original_size = size;
// If alignment requirements are to be fulfilled by the frontend allocator
// rather than by the primary or secondary, passing an alignment lower than
@ -89,7 +89,8 @@ class CombinedAllocator {
}
void *ReturnNullOrDieOnOOM() {
if (MayReturnNull()) return nullptr;
if (MayReturnNull())
return nullptr;
ReportAllocatorCannotReturnNull(true);
}
@ -106,15 +107,6 @@ class CombinedAllocator {
primary_.SetReleaseToOSIntervalMs(release_to_os_interval_ms);
}
bool RssLimitIsExceeded() {
return atomic_load(&rss_limit_is_exceeded_, memory_order_acquire);
}
void SetRssLimitIsExceeded(bool rss_limit_is_exceeded) {
atomic_store(&rss_limit_is_exceeded_, rss_limit_is_exceeded,
memory_order_release);
}
void Deallocate(AllocatorCache *cache, void *p) {
if (!p) return;
if (primary_.PointerIsMine(p))
@ -228,6 +220,5 @@ class CombinedAllocator {
SecondaryAllocator secondary_;
AllocatorGlobalStats stats_;
atomic_uint8_t may_return_null_;
atomic_uint8_t rss_limit_is_exceeded_;
};

View File

@ -36,9 +36,12 @@ class LargeMmapAllocator {
if (alignment > page_size_)
map_size += alignment;
// Overflow.
if (map_size < size) return ReturnNullOrDieOnBadRequest();
if (map_size < size)
return ReturnNullOrDieOnBadRequest();
uptr map_beg = reinterpret_cast<uptr>(
MmapOrDie(map_size, "LargeMmapAllocator"));
MmapOrDieOnFatalError(map_size, "LargeMmapAllocator"));
if (!map_beg)
return ReturnNullOrDieOnOOM();
CHECK(IsAligned(map_beg, page_size_));
MapUnmapCallback().OnMap(map_beg, map_size);
uptr map_end = map_beg + map_size;

View File

@ -85,6 +85,9 @@ INLINE void *MmapOrDieQuietly(uptr size, const char *mem_type) {
return MmapOrDie(size, mem_type, /*raw_report*/ true);
}
void UnmapOrDie(void *addr, uptr size);
// Behaves just like MmapOrDie, but tolerates out of memory condition, in that
// case returns nullptr.
void *MmapOrDieOnFatalError(uptr size, const char *mem_type);
void *MmapFixedNoReserve(uptr fixed_addr, uptr size,
const char *name = nullptr);
void *MmapNoReserveOrDie(uptr size, const char *mem_type);

View File

@ -93,6 +93,9 @@ COMMON_FLAG(HandleSignalMode, handle_sigill, kHandleSignalNo,
COMMON_FLAG(HandleSignalMode, handle_sigfpe, kHandleSignalYes,
COMMON_FLAG_HANDLE_SIGNAL_HELP(SIGFPE))
#undef COMMON_FLAG_HANDLE_SIGNAL_HELP
COMMON_FLAG(bool, allow_user_segv_handler, true,
"Deprecated. True has no effect, use handle_sigbus=1. If false, "
"handle_*=1 will be upgraded to handle_*=2.")
COMMON_FLAG(bool, use_sigaltstack, true,
"If set, uses alternate stack for signal handling.")
COMMON_FLAG(bool, detect_deadlocks, false,

View File

@ -1396,7 +1396,7 @@ AndroidApiLevel AndroidGetApiLevel() {
#endif
HandleSignalMode GetHandleSignalMode(int signum) {
static HandleSignalMode GetHandleSignalModeImpl(int signum) {
switch (signum) {
case SIGABRT:
return common_flags()->handle_abort;
@ -1412,6 +1412,13 @@ HandleSignalMode GetHandleSignalMode(int signum) {
return kHandleSignalNo;
}
HandleSignalMode GetHandleSignalMode(int signum) {
HandleSignalMode result = GetHandleSignalModeImpl(signum);
if (result == kHandleSignalYes && !common_flags()->allow_user_segv_handler)
return kHandleSignalExclusive;
return result;
}
#if !SANITIZER_GO
void *internal_start_thread(void(*func)(void *arg), void *arg) {
// Start the thread with signals blocked, otherwise it can steal user signals.

View File

@ -113,7 +113,6 @@ void GetThreadStackTopAndBottom(bool at_initialization, uptr *stack_top,
my_pthread_attr_getstack(&attr, &stackaddr, &stacksize);
pthread_attr_destroy(&attr);
CHECK_LE(stacksize, kMaxThreadStackSize); // Sanity check.
*stack_top = (uptr)stackaddr + stacksize;
*stack_bottom = (uptr)stackaddr;
}

View File

@ -414,10 +414,7 @@ void ListOfModules::init() {
memory_mapping.DumpListOfModules(&modules_);
}
HandleSignalMode GetHandleSignalMode(int signum) {
// Handling fatal signals on watchOS and tvOS devices is disallowed.
if ((SANITIZER_WATCHOS || SANITIZER_TVOS) && !(SANITIZER_IOSSIM))
return kHandleSignalNo;
static HandleSignalMode GetHandleSignalModeImpl(int signum) {
switch (signum) {
case SIGABRT:
return common_flags()->handle_abort;
@ -433,6 +430,16 @@ HandleSignalMode GetHandleSignalMode(int signum) {
return kHandleSignalNo;
}
HandleSignalMode GetHandleSignalMode(int signum) {
// Handling fatal signals on watchOS and tvOS devices is disallowed.
if ((SANITIZER_WATCHOS || SANITIZER_TVOS) && !(SANITIZER_IOSSIM))
return kHandleSignalNo;
HandleSignalMode result = GetHandleSignalModeImpl(signum);
if (result == kHandleSignalYes && !common_flags()->allow_user_segv_handler)
return kHandleSignalExclusive;
return result;
}
MacosVersion cached_macos_version = MACOS_VERSION_UNINITIALIZED;
MacosVersion GetMacosVersionInternal() {

View File

@ -22,6 +22,7 @@
#include "sanitizer_procmaps.h"
#include "sanitizer_stacktrace.h"
#include <errno.h>
#include <fcntl.h>
#include <signal.h>
#include <sys/mman.h>
@ -145,6 +146,21 @@ void UnmapOrDie(void *addr, uptr size) {
DecreaseTotalMmap(size);
}
void *MmapOrDieOnFatalError(uptr size, const char *mem_type) {
size = RoundUpTo(size, GetPageSizeCached());
uptr res = internal_mmap(nullptr, size,
PROT_READ | PROT_WRITE,
MAP_PRIVATE | MAP_ANON, -1, 0);
int reserrno;
if (internal_iserror(res, &reserrno)) {
if (reserrno == ENOMEM)
return nullptr;
ReportMmapFailureAndDie(size, mem_type, "allocate", reserrno);
}
IncreaseTotalMmap(size);
return (void *)res;
}
// We want to map a chunk of address space aligned to 'alignment'.
// We do it by maping a bit more and then unmaping redundant pieces.
// We probably can do it with fewer syscalls in some OS-dependent way.

View File

@ -131,6 +131,16 @@ void UnmapOrDie(void *addr, uptr size) {
}
}
void *MmapOrDieOnFatalError(uptr size, const char *mem_type) {
void *rv = VirtualAlloc(0, size, MEM_RESERVE | MEM_COMMIT, PAGE_READWRITE);
if (rv == 0) {
error_t last_error = GetLastError();
if (last_error != ERROR_NOT_ENOUGH_MEMORY)
ReportMmapFailureAndDie(size, mem_type, "allocate", last_error);
}
return rv;
}
// We want to map a chunk of address space aligned to 'alignment'.
void *MmapAlignedOrDie(uptr size, uptr alignment, const char *mem_type) {
CHECK(IsPowerOfTwo(size));

View File

@ -92,7 +92,8 @@ static const char *ReportTypeString(ReportType typ, uptr tag) {
if (typ == ReportTypeVptrUseAfterFree)
return "heap-use-after-free (virtual call vs free)";
if (typ == ReportTypeExternalRace) {
return GetReportHeaderFromTag(tag) ?: "race on external object";
const char *str = GetReportHeaderFromTag(tag);
return str ? str : "race on external object";
}
if (typ == ReportTypeThreadLeak)
return "thread leak";
@ -170,8 +171,9 @@ static void PrintMop(const ReportMop *mop, bool first) {
MopDesc(first, mop->write, mop->atomic), mop->size,
(void *)mop->addr, thread_name(thrbuf, mop->tid));
} else {
const char *object_type =
GetObjectTypeFromTag(mop->external_tag) ?: "external object";
const char *object_type = GetObjectTypeFromTag(mop->external_tag);
if (object_type == nullptr)
object_type = "external object";
Printf(" %s access of %s at %p by %s",
ExternalMopDesc(first, mop->write), object_type,
(void *)mop->addr, thread_name(thrbuf, mop->tid));

View File

@ -83,7 +83,7 @@ struct SyncVar {
}
bool IsFlagSet(u32 f) const {
return atomic_load_relaxed(&flags);
return atomic_load_relaxed(&flags) & f;
}
void SetFlags(u32 f) {

View File

@ -566,8 +566,14 @@ static void handlePointerOverflowImpl(PointerOverflowData *Data,
ScopedReport R(Opts, Loc, ET);
Diag(Loc, DL_Error, "pointer index expression with base %0 overflowed to %1")
<< (void *)Base << (void*)Result;
if ((sptr(Base) >= 0) == (sptr(Result) >= 0))
Diag(Loc, DL_Error, "unsigned pointer index expression result is %0, "
"preceding its base %1")
<< (void *)Result << (void *)Base;
else
Diag(Loc, DL_Error,
"pointer index expression with base %0 overflowed to %1")
<< (void *)Base << (void *)Result;
}
void __ubsan::__ubsan_handle_pointer_overflow(PointerOverflowData *Data,

View File

@ -197,9 +197,9 @@ struct VtablePrefix {
};
VtablePrefix *getVtablePrefix(void *Vtable) {
VtablePrefix *Vptr = reinterpret_cast<VtablePrefix*>(Vtable);
if (!IsAccessibleMemoryRange((uptr)Vptr, sizeof(VtablePrefix)))
return nullptr;
VtablePrefix *Prefix = Vptr - 1;
if (!IsAccessibleMemoryRange((uptr)Prefix, sizeof(VtablePrefix)))
return nullptr;
if (!Prefix->TypeInfo)
// This can't possibly be a valid vtable.
return nullptr;

View File

@ -0,0 +1,82 @@
// Test the behavior of malloc/calloc/realloc when the allocation causes OOM
// in the secondary allocator.
// By default (allocator_may_return_null=0) the process should crash.
// With allocator_may_return_null=1 the allocator should return 0.
// Set the limit to 20.5T on 64 bits to account for ASan shadow memory,
// allocator buffers etc. so that the test allocation of ~1T will trigger OOM.
// Limit this test to Linux since we're relying on allocator internal
// limits (shadow memory size, allocation limits etc.)
// RUN: %clangxx_asan -O0 %s -o %t
// RUN: ulimit -v 22024290304
// RUN: not %run %t malloc 2>&1 \
// RUN: | FileCheck %s --check-prefixes=CHECK-MALLOC,CHECK-CRASH
// RUN: %env_asan_opts=allocator_may_return_null=0 not %run %t malloc 2>&1 \
// RUN: | FileCheck %s --check-prefixes=CHECK-MALLOC,CHECK-CRASH
// RUN: %env_asan_opts=allocator_may_return_null=1 %run %t malloc 2>&1 \
// RUN: | FileCheck %s --check-prefixes=CHECK-MALLOC,CHECK-NULL
// RUN: %env_asan_opts=allocator_may_return_null=0 not %run %t calloc 2>&1 \
// RUN: | FileCheck %s --check-prefixes=CHECK-CALLOC,CHECK-CRASH
// RUN: %env_asan_opts=allocator_may_return_null=1 %run %t calloc 2>&1 \
// RUN: | FileCheck %s --check-prefixes=CHECK-CALLOC,CHECK-NULL
// RUN: %env_asan_opts=allocator_may_return_null=0 not %run %t realloc 2>&1 \
// RUN: | FileCheck %s --check-prefixes=CHECK-REALLOC,CHECK-CRASH
// RUN: %env_asan_opts=allocator_may_return_null=1 %run %t realloc 2>&1 \
// RUN: | FileCheck %s --check-prefixes=CHECK-REALLOC,CHECK-NULL
// RUN: %env_asan_opts=allocator_may_return_null=0 not %run %t realloc-after-malloc 2>&1 \
// RUN: | FileCheck %s --check-prefixes=CHECK-MALLOC-REALLOC,CHECK-CRASH
// RUN: %env_asan_opts=allocator_may_return_null=1 %run %t realloc-after-malloc 2>&1 \
// RUN: | FileCheck %s --check-prefixes=CHECK-MALLOC-REALLOC,CHECK-NULL
#include <stdlib.h>
#include <string.h>
#include <stdio.h>
#include <assert.h>
int main(int argc, char **argv) {
assert(argc == 2);
const char *action = argv[1];
fprintf(stderr, "%s:\n", action);
// Allocate just a bit less than max allocation size enforced by ASan's
// allocator (currently 1T and 3G).
const size_t size =
#if __LP64__
(1ULL << 40) - (1ULL << 30);
#else
(3ULL << 30) - (1ULL << 20);
#endif
void *x = 0;
if (!strcmp(action, "malloc")) {
x = malloc(size);
} else if (!strcmp(action, "calloc")) {
x = calloc(size / 4, 4);
} else if (!strcmp(action, "realloc")) {
x = realloc(0, size);
} else if (!strcmp(action, "realloc-after-malloc")) {
char *t = (char*)malloc(100);
*t = 42;
x = realloc(t, size);
assert(*t == 42);
free(t);
} else {
assert(0);
}
// The NULL pointer is printed differently on different systems, while (long)0
// is always the same.
fprintf(stderr, "x: %lx\n", (long)x);
free(x);
return x != 0;
}
// CHECK-MALLOC: malloc:
// CHECK-CALLOC: calloc:
// CHECK-REALLOC: realloc:
// CHECK-MALLOC-REALLOC: realloc-after-malloc:
// CHECK-CRASH: AddressSanitizer's allocator is terminating the process
// CHECK-NULL: x: 0

View File

@ -10,6 +10,14 @@
// RUN: %clangxx_asan -O0 %s -o %t && %env_asan_opts=handle_segv=2 not %run %t 2>&1 | FileCheck %s --check-prefix=CHECK2
// RUN: %clangxx_asan -O2 %s -o %t && %env_asan_opts=handle_segv=2 not %run %t 2>&1 | FileCheck %s --check-prefix=CHECK2
// RUN: %clangxx_asan -O0 %s -o %t && %env_asan_opts=handle_segv=0:allow_user_segv_handler=0 not %run %t 2>&1 | FileCheck %s --check-prefix=CHECK0
// RUN: %clangxx_asan -O0 %s -o %t && %env_asan_opts=handle_segv=1:allow_user_segv_handler=0 not %run %t 2>&1 | FileCheck %s --check-prefix=CHECK2
// RUN: %clangxx_asan -O0 %s -o %t && %env_asan_opts=handle_segv=2:allow_user_segv_handler=0 not %run %t 2>&1 | FileCheck %s --check-prefix=CHECK2
// RUN: %clangxx_asan -O0 %s -o %t && %env_asan_opts=handle_segv=0:allow_user_segv_handler=1 not %run %t 2>&1 | FileCheck %s --check-prefix=CHECK0
// RUN: %clangxx_asan -O0 %s -o %t && %env_asan_opts=handle_segv=1:allow_user_segv_handler=1 not %run %t 2>&1 | FileCheck %s --check-prefix=CHECK1
// RUN: %clangxx_asan -O0 %s -o %t && %env_asan_opts=handle_segv=2:allow_user_segv_handler=1 not %run %t 2>&1 | FileCheck %s --check-prefix=CHECK2
#include <signal.h>
#include <stdio.h>
#include <stdlib.h>

View File

@ -1,6 +1,3 @@
# The cfi-icall checker is only supported on x86 and x86_64 for now.
if config.root.host_arch not in ['x86', 'x86_64']:
config.unsupported = True
if config.root.use_thinlto:
config.unsupported = True

View File

@ -5,7 +5,6 @@
// CFI-icall is not implemented in thinlto mode => ".cfi" suffixes are missing
// in sanstats output.
// XFAIL: thinlto
struct ABase {};

View File

@ -1,6 +1,3 @@
# The cfi-icall checker is only supported on x86 and x86_64 for now.
if config.root.host_arch not in ['x86', 'x86_64']:
config.unsupported = True
if config.use_thinlto:
config.unsupported = True

View File

@ -0,0 +1,41 @@
// Test that the checking is done with the actual type of f() even when the
// calling module has an incorrect declaration. Test a mix of lto types.
//
// -flto below overrides -flto=thin in %clang_cfi
// RUN: %clang_cfi %s -DMODULE_A -c -o %t1_a.o
// RUN: %clang_cfi %s -DMODULE_B -c -o %t1_b.o -flto
// RUN: %clang_cfi %t1_a.o %t1_b.o -o %t1
// RUN: %expect_crash %t1 2>&1 | FileCheck --check-prefix=CFI %s
//
// RUN: %clang_cfi %s -DMODULE_A -c -o %t2_a.o -flto
// RUN: %clang_cfi %s -DMODULE_B -c -o %t2_b.o
// RUN: %clang_cfi %t2_a.o %t2_b.o -o %t2
// RUN: %expect_crash %t2 2>&1 | FileCheck --check-prefix=CFI %s
//
// RUN: %clang_cfi %s -DMODULE_A -c -o %t3_a.o
// RUN: %clang_cfi %s -DMODULE_B -c -o %t3_b.o
// RUN: %clang_cfi %t3_a.o %t3_b.o -o %t3
// RUN: %expect_crash %t3 2>&1 | FileCheck --check-prefix=CFI %s
//
// REQUIRES: thinlto
#include <stdio.h>
#if defined(MODULE_B)
int f() {
return 42;
}
#elif defined(MODULE_A)
void f();
int main() {
// CFI: 1
fprintf(stderr, "1\n");
void (*volatile p)() = &f;
p();
// CFI-NOT: 2
fprintf(stderr, "2\n");
}
#endif

View File

@ -6,11 +6,11 @@
// A very primitive mutex annotated with tsan annotations.
class Mutex {
public:
Mutex(bool prof = true)
Mutex(bool prof, unsigned flags)
: prof_(prof)
, locked_(false)
, seq_(0) {
__tsan_mutex_create(this, 0);
__tsan_mutex_create(this, flags);
}
~Mutex() {
@ -87,5 +87,5 @@ class Mutex {
}
};
Mutex Mutex::prof_mu_(false);
Mutex Mutex::prof_mu_(false, __tsan_mutex_linker_init);
int Mutex::prof_data_;

View File

@ -4,7 +4,7 @@
// Test that custom annoations provide normal mutex synchronization
// (no race reports for properly protected critical sections).
Mutex mu;
Mutex mu(true, 0);
long data;
void *thr(void *arg) {

View File

@ -3,7 +3,7 @@
// Test that failed TryLock does not induce parasitic synchronization.
Mutex mu;
Mutex mu(true, 0);
long data;
void *thr(void *arg) {

View File

@ -3,7 +3,7 @@
// Test that Broadcast does not induce parasitic synchronization.
Mutex mu;
Mutex mu(true, 0);
long data;
void *thr(void *arg) {

View File

@ -0,0 +1,46 @@
// RUN: %clangxx_tsan -O1 --std=c++11 %s -o %t
// RUN: %env_tsan_opts=report_destroy_locked=0 %run %t 2>&1 | FileCheck %s
#include "custom_mutex.h"
// Regression test for a bug.
// Thr1 destroys a locked mutex, previously such mutex was not removed from
// sync map and as the result subsequent uses of a mutex located at the same
// address caused false race reports.
Mutex mu(false, __tsan_mutex_write_reentrant);
long data;
void *thr1(void *arg) {
mu.Lock();
mu.~Mutex();
new(&mu) Mutex(true, __tsan_mutex_write_reentrant);
return 0;
}
void *thr2(void *arg) {
barrier_wait(&barrier);
mu.Lock();
data++;
mu.Unlock();
return 0;
}
int main() {
barrier_init(&barrier, 2);
pthread_t th;
pthread_create(&th, 0, thr1, 0);
pthread_join(th, 0);
barrier_init(&barrier, 2);
pthread_create(&th, 0, thr2, 0);
mu.Lock();
data++;
mu.Unlock();
barrier_wait(&barrier);
pthread_join(th, 0);
fprintf(stderr, "DONE\n");
return 0;
}
// CHECK-NOT: WARNING: ThreadSanitizer: data race
// CHECK: DONE

View File

@ -0,0 +1,13 @@
// RUN: %clangxx -fsanitize=pointer-overflow %s -o %t
// RUN: %t 2>&1 | FileCheck %s
int main(int argc, char *argv[]) {
char c;
char *p = &c;
unsigned long long offset = -1;
// CHECK: unsigned-index-expression.cpp:[[@LINE+1]]:15: runtime error: unsigned pointer index expression result is 0x{{.*}}, preceding its base 0x{{.*}}
char *q = p + offset;
return 0;
}

View File

@ -0,0 +1,50 @@
// RUN: %clangxx -std=c++11 -frtti -fsanitize=vptr -g %s -O3 -o %t
// RUN: %run %t &> %t.log
// RUN: cat %t.log | not count 0 && FileCheck --input-file %t.log %s || cat %t.log | count 0
// REQUIRES: cxxabi
#include <sys/mman.h>
#include <unistd.h>
class Base {
public:
int i;
virtual void print() {}
};
class Derived : public Base {
public:
void print() {}
};
int main() {
int page_size = getpagesize();
void *non_accessible = mmap(nullptr, page_size, PROT_NONE,
MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
if (non_accessible == MAP_FAILED)
return 0;
void *accessible = mmap((char*)non_accessible + page_size, page_size,
PROT_READ | PROT_WRITE,
MAP_FIXED | MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
if (accessible == MAP_FAILED)
return 0;
char *c = new char[sizeof(Derived)];
// The goal is to trigger a condition when Vptr points to accessible memory,
// but VptrPrefix does not. That has been triggering SIGSEGV in UBSan code.
void **vtable_ptr = reinterpret_cast<void **>(c);
*vtable_ptr = (void*)accessible;
Derived *list = (Derived *)c;
// CHECK: PR33221.cpp:[[@LINE+2]]:19: runtime error: member access within address {{.*}} which does not point to an object of type 'Base'
// CHECK-NEXT: invalid vptr
int foo = list->i;
return 0;
}

View File

@ -0,0 +1,9 @@
def getRoot(config):
if not config.parent:
return config
return getRoot(config.parent)
root = getRoot(config)
if root.host_os not in ['Linux']:
config.unsupported = True

View File

@ -18,7 +18,7 @@ public:
int main() {
char *c = new char[sizeof(Derived)];
memset((void *)c, 0, sizeof(Derived));
memset((void *)c, 0xFF, sizeof(Derived));
Derived *list = (Derived *)c;
// CHECK: PR33221.cpp:[[@LINE+2]]:19: runtime error: member access within address {{.*}} which does not point to an object of type 'Base'

View File

@ -0,0 +1,31 @@
// Intercept the implicit 'this' argument of class member functions.
//
// RUN: %clangxx_xray -g -std=c++11 %s -o %t
// RUN: rm log-args-this-* || true
// RUN: XRAY_OPTIONS="patch_premain=true verbosity=1 xray_logfile_base=log-args-this-" %run %t
//
// XFAIL: arm || aarch64 || mips
// UNSUPPORTED: powerpc64le
#include "xray/xray_interface.h"
#include <cassert>
class A {
public:
[[clang::xray_always_instrument, clang::xray_log_args(1)]] void f() {
// does nothing.
}
};
volatile uint64_t captured = 0;
void handler(int32_t, XRayEntryType, uint64_t arg1) {
captured = arg1;
}
int main() {
__xray_set_handler_arg1(handler);
A instance;
instance.f();
__xray_remove_handler_arg1();
assert(captured == (uint64_t)&instance);
}