Merge compiler-rt release_40 branch r292009.

This commit is contained in:
Dimitry Andric 2017-01-14 22:16:01 +00:00
commit 6313c2ae8d
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/projects/clang400-import/; revision=312198
54 changed files with 428 additions and 110 deletions

View File

@ -77,12 +77,13 @@ static struct AsanDeactivatedFlags {
void Print() {
Report(
"quarantine_size_mb %d, max_redzone %d, poison_heap %d, "
"malloc_context_size %d, alloc_dealloc_mismatch %d, "
"allocator_may_return_null %d, coverage %d, coverage_dir %s, "
"allocator_release_to_os_interval_ms %d\n",
allocator_options.quarantine_size_mb, allocator_options.max_redzone,
poison_heap, malloc_context_size,
"quarantine_size_mb %d, thread_local_quarantine_size_kb %d, "
"max_redzone %d, poison_heap %d, malloc_context_size %d, "
"alloc_dealloc_mismatch %d, allocator_may_return_null %d, coverage %d, "
"coverage_dir %s, allocator_release_to_os_interval_ms %d\n",
allocator_options.quarantine_size_mb,
allocator_options.thread_local_quarantine_size_kb,
allocator_options.max_redzone, poison_heap, malloc_context_size,
allocator_options.alloc_dealloc_mismatch,
allocator_options.may_return_null, coverage, coverage_dir,
allocator_options.release_to_os_interval_ms);
@ -109,6 +110,7 @@ void AsanDeactivate() {
AllocatorOptions disabled = asan_deactivated_flags.allocator_options;
disabled.quarantine_size_mb = 0;
disabled.thread_local_quarantine_size_kb = 0;
disabled.min_redzone = 16; // Redzone must be at least 16 bytes long.
disabled.max_redzone = 16;
disabled.alloc_dealloc_mismatch = false;

View File

@ -24,6 +24,7 @@
ASAN_ACTIVATION_FLAG(int, redzone)
ASAN_ACTIVATION_FLAG(int, max_redzone)
ASAN_ACTIVATION_FLAG(int, quarantine_size_mb)
ASAN_ACTIVATION_FLAG(int, thread_local_quarantine_size_kb)
ASAN_ACTIVATION_FLAG(bool, alloc_dealloc_mismatch)
ASAN_ACTIVATION_FLAG(bool, poison_heap)

View File

@ -269,24 +269,24 @@ struct Allocator {
}
void RePoisonChunk(uptr chunk) {
// This could a user-facing chunk (with redzones), or some internal
// This could be a user-facing chunk (with redzones), or some internal
// housekeeping chunk, like TransferBatch. Start by assuming the former.
AsanChunk *ac = GetAsanChunk((void *)chunk);
uptr allocated_size = allocator.GetActuallyAllocatedSize((void *)ac);
uptr beg = ac->Beg();
uptr end = ac->Beg() + ac->UsedSize(true);
uptr chunk_end = chunk + allocated_size;
if (chunk < beg && beg < end && end <= chunk_end) {
// Looks like a valid AsanChunk. Or maybe not. Be conservative and only
// poison the redzones.
if (chunk < beg && beg < end && end <= chunk_end &&
ac->chunk_state == CHUNK_ALLOCATED) {
// Looks like a valid AsanChunk in use, poison redzones only.
PoisonShadow(chunk, beg - chunk, kAsanHeapLeftRedzoneMagic);
uptr end_aligned_down = RoundDownTo(end, SHADOW_GRANULARITY);
FastPoisonShadowPartialRightRedzone(
end_aligned_down, end - end_aligned_down,
chunk_end - end_aligned_down, kAsanHeapLeftRedzoneMagic);
} else {
// This can not be an AsanChunk. Poison everything. It may be reused as
// AsanChunk later.
// This is either not an AsanChunk or freed or quarantined AsanChunk.
// In either case, poison everything.
PoisonShadow(chunk, allocated_size, kAsanHeapLeftRedzoneMagic);
}
}

View File

@ -169,6 +169,11 @@ void InitializeFlags() {
(ASAN_LOW_MEMORY) ? 1 << 6 : FIRST_32_SECOND_64(1 << 8, 1 << 10);
f->thread_local_quarantine_size_kb = kDefaultThreadLocalQuarantineSizeKb;
}
if (f->thread_local_quarantine_size_kb == 0 && f->quarantine_size_mb > 0) {
Report("%s: thread_local_quarantine_size_kb can be set to 0 only when "
"quarantine_size_mb is set to 0\n", SanitizerToolName);
Die();
}
if (!f->replace_str && common_flags()->intercept_strlen) {
Report("WARNING: strlen interceptor is enabled even though replace_str=0. "
"Use intercept_strlen=0 to disable it.");

View File

@ -18,10 +18,14 @@
.syntax unified
.p2align 2
DEFINE_COMPILERRT_FUNCTION(__adddf3vfp)
#if defined(COMPILER_RT_ARMHF_TARGET)
vadd.f64 d0, d0, d1
#else
vmov d6, r0, r1 // move first param from r0/r1 pair into d6
vmov d7, r2, r3 // move second param from r2/r3 pair into d7
vadd.f64 d6, d6, d7
vmov r0, r1, d6 // move result back to r0/r1 pair
#endif
bx lr
END_COMPILERRT_FUNCTION(__adddf3vfp)

View File

@ -18,10 +18,14 @@
.syntax unified
.p2align 2
DEFINE_COMPILERRT_FUNCTION(__addsf3vfp)
#if defined(COMPILER_RT_ARMHF_TARGET)
vadd.f32 s0, s0, s1
#else
vmov s14, r0 // move first param from r0 into float register
vmov s15, r1 // move second param from r1 into float register
vadd.f32 s14, s14, s15
vmov r0, s14 // move result back to r0
#endif
bx lr
END_COMPILERRT_FUNCTION(__addsf3vfp)

View File

@ -43,8 +43,14 @@
.thumb
#endif
.p2align 2
@ int __eqsf2(float a, float b)
.p2align 2
DEFINE_COMPILERRT_FUNCTION(__eqsf2)
#if defined(COMPILER_RT_ARMHF_TARGET)
vmov r0, s0
vmov r1, s1
#endif
// Make copies of a and b with the sign bit shifted off the top. These will
// be used to detect zeros and NaNs.
#if __ARM_ARCH_ISA_THUMB == 1
@ -166,16 +172,23 @@ LOCAL_LABEL(CHECK_NAN):
JMP(lr)
#endif
END_COMPILERRT_FUNCTION(__eqsf2)
DEFINE_COMPILERRT_FUNCTION_ALIAS(__lesf2, __eqsf2)
DEFINE_COMPILERRT_FUNCTION_ALIAS(__ltsf2, __eqsf2)
DEFINE_COMPILERRT_FUNCTION_ALIAS(__nesf2, __eqsf2)
.p2align 2
@ int __gtsf2(float a, float b)
.p2align 2
DEFINE_COMPILERRT_FUNCTION(__gtsf2)
// Identical to the preceding except in that we return -1 for NaN values.
// Given that the two paths share so much code, one might be tempted to
// unify them; however, the extra code needed to do so makes the code size
// to performance tradeoff very hard to justify for such small functions.
#if defined(COMPILER_RT_ARMHF_TARGET)
vmov r0, s0
vmov r1, s1
#endif
#if __ARM_ARCH_ISA_THUMB == 1
push {r6, lr}
lsls r2, r0, #1
@ -215,6 +228,8 @@ LOCAL_LABEL(CHECK_NAN_2):
6:
pop {r6, pc}
#else
mov r2, r0, lsl #1
mov r3, r1, lsl #1
orrs r12, r2, r3, lsr #1
it ne
eorsne r12, r0, r1
@ -233,10 +248,17 @@ LOCAL_LABEL(CHECK_NAN_2):
JMP(lr)
#endif
END_COMPILERRT_FUNCTION(__gtsf2)
DEFINE_COMPILERRT_FUNCTION_ALIAS(__gesf2, __gtsf2)
.p2align 2
@ int __unordsf2(float a, float b)
.p2align 2
DEFINE_COMPILERRT_FUNCTION(__unordsf2)
#if defined(COMPILER_RT_ARMHF_TARGET)
vmov r0, s0
vmov r1, s1
#endif
// Return 1 for NaN values, 0 otherwise.
lsls r2, r0, #1
lsls r3, r1, #1
@ -260,7 +282,15 @@ DEFINE_COMPILERRT_FUNCTION(__unordsf2)
JMP(lr)
END_COMPILERRT_FUNCTION(__unordsf2)
#if defined(COMPILER_RT_ARMHF_TARGET)
DEFINE_COMPILERRT_FUNCTION(__aeabi_fcmpum):
vmov s0, r0
vmov s1, r1
b SYMBOL_NAME(__unordsf2)
END_COMPILERRT_FUNCTION(__aeabi_fcmpum)
#else
DEFINE_AEABI_FUNCTION_ALIAS(__aeabi_fcmpun, __unordsf2)
#endif
NO_EXEC_STACK_DIRECTIVE

View File

@ -18,10 +18,14 @@
.syntax unified
.p2align 2
DEFINE_COMPILERRT_FUNCTION(__divdf3vfp)
#if defined(COMPILER_RT_ARMHF_TARGET)
vdiv.f64 d0, d0, d1
#else
vmov d6, r0, r1 // move first param from r0/r1 pair into d6
vmov d7, r2, r3 // move second param from r2/r3 pair into d7
vdiv.f64 d5, d6, d7
vdiv.f64 d5, d6, d7
vmov r0, r1, d5 // move result back to r0/r1 pair
#endif
bx lr
END_COMPILERRT_FUNCTION(__divdf3vfp)

View File

@ -18,10 +18,14 @@
.syntax unified
.p2align 2
DEFINE_COMPILERRT_FUNCTION(__divsf3vfp)
#if defined(COMPILER_RT_ARMHF_TARGET)
vdiv.f32 s0, s0, s1
#else
vmov s14, r0 // move first param from r0 into float register
vmov s15, r1 // move second param from r1 into float register
vdiv.f32 s13, s14, s15
vmov r0, s13 // move result back to r0
#endif
bx lr
END_COMPILERRT_FUNCTION(__divsf3vfp)

View File

@ -19,9 +19,13 @@
.syntax unified
.p2align 2
DEFINE_COMPILERRT_FUNCTION(__eqdf2vfp)
#if defined(COMPILER_RT_ARMHF_TARGET)
vcmp.f64 d0, d1
#else
vmov d6, r0, r1 // load r0/r1 pair in double register
vmov d7, r2, r3 // load r2/r3 pair in double register
vcmp.f64 d6, d7
#endif
vmrs apsr_nzcv, fpscr
moveq r0, #1 // set result register to 1 if equal
movne r0, #0

View File

@ -19,9 +19,13 @@
.syntax unified
.p2align 2
DEFINE_COMPILERRT_FUNCTION(__eqsf2vfp)
#if defined(COMPILER_RT_ARMHF_TARGET)
vcmp.f32 s0, s1
#else
vmov s14, r0 // move from GPR 0 to float register
vmov s15, r1 // move from GPR 1 to float register
vcmp.f32 s14, s15
#endif
vmrs apsr_nzcv, fpscr
moveq r0, #1 // set result register to 1 if equal
movne r0, #0

View File

@ -19,9 +19,13 @@
.syntax unified
.p2align 2
DEFINE_COMPILERRT_FUNCTION(__extendsfdf2vfp)
#if defined(COMPILER_RT_ARMHF_TARGET)
vcvt.f64.f32 d0, s0
#else
vmov s15, r0 // load float register from R0
vcvt.f64.f32 d7, s15 // convert single to double
vmov r0, r1, d7 // return result in r0/r1 pair
#endif
bx lr
END_COMPILERRT_FUNCTION(__extendsfdf2vfp)

View File

@ -19,9 +19,14 @@
.syntax unified
.p2align 2
DEFINE_COMPILERRT_FUNCTION(__fixdfsivfp)
#if defined(COMPILER_RT_ARMHF_TARGET)
vcvt.s32.f64 s0, d0
vmov r0, s0
#else
vmov d7, r0, r1 // load double register from R0/R1
vcvt.s32.f64 s15, d7 // convert double to 32-bit int into s15
vmov r0, s15 // move s15 to result register
#endif
bx lr
END_COMPILERRT_FUNCTION(__fixdfsivfp)

View File

@ -19,9 +19,14 @@
.syntax unified
.p2align 2
DEFINE_COMPILERRT_FUNCTION(__fixsfsivfp)
#if defined(COMPILER_RT_ARMHF_TARGET)
vcvt.s32.f32 s0, s0
vmov r0, s0
#else
vmov s15, r0 // load float register from R0
vcvt.s32.f32 s15, s15 // convert single to 32-bit int into s15
vmov r0, s15 // move s15 to result register
#endif
bx lr
END_COMPILERRT_FUNCTION(__fixsfsivfp)

View File

@ -20,9 +20,14 @@
.syntax unified
.p2align 2
DEFINE_COMPILERRT_FUNCTION(__fixunsdfsivfp)
#if defined(COMPILER_RT_ARMHF_TARGET)
vcvt.u32.f64 s0, d0
vmov r0, s0
#else
vmov d7, r0, r1 // load double register from R0/R1
vcvt.u32.f64 s15, d7 // convert double to 32-bit int into s15
vmov r0, s15 // move s15 to result register
#endif
bx lr
END_COMPILERRT_FUNCTION(__fixunsdfsivfp)

View File

@ -20,9 +20,14 @@
.syntax unified
.p2align 2
DEFINE_COMPILERRT_FUNCTION(__fixunssfsivfp)
#if defined(COMPILER_RT_ARMHF_TARGET)
vcvt.u32.f32 s0, s0
vmov r0, s0
#else
vmov s15, r0 // load float register from R0
vcvt.u32.f32 s15, s15 // convert single to 32-bit unsigned into s15
vmov r0, s15 // move s15 to result register
#endif
bx lr
END_COMPILERRT_FUNCTION(__fixunssfsivfp)

View File

@ -19,9 +19,14 @@
.syntax unified
.p2align 2
DEFINE_COMPILERRT_FUNCTION(__floatsidfvfp)
#if defined(COMPILER_RT_ARMHF_TARGET)
vmov s0, r0
vcvt.f64.s32 d0, s0
#else
vmov s15, r0 // move int to float register s15
vcvt.f64.s32 d7, s15 // convert 32-bit int in s15 to double in d7
vmov r0, r1, d7 // move d7 to result register pair r0/r1
#endif
bx lr
END_COMPILERRT_FUNCTION(__floatsidfvfp)

View File

@ -19,9 +19,14 @@
.syntax unified
.p2align 2
DEFINE_COMPILERRT_FUNCTION(__floatsisfvfp)
#if defined(COMPILER_RT_ARMHF_TARGET)
vmov s0, r0
vcvt.f32.s32 s0, s0
#else
vmov s15, r0 // move int to float register s15
vcvt.f32.s32 s15, s15 // convert 32-bit int in s15 to float in s15
vmov r0, s15 // move s15 to result register
#endif
bx lr
END_COMPILERRT_FUNCTION(__floatsisfvfp)

View File

@ -19,9 +19,14 @@
.syntax unified
.p2align 2
DEFINE_COMPILERRT_FUNCTION(__floatunssidfvfp)
#if defined(COMPILER_RT_ARMHF_TARGET)
vmov s0, r0
vcvt.f64.u32 d0, s0
#else
vmov s15, r0 // move int to float register s15
vcvt.f64.u32 d7, s15 // convert 32-bit int in s15 to double in d7
vmov r0, r1, d7 // move d7 to result register pair r0/r1
#endif
bx lr
END_COMPILERRT_FUNCTION(__floatunssidfvfp)

View File

@ -19,9 +19,14 @@
.syntax unified
.p2align 2
DEFINE_COMPILERRT_FUNCTION(__floatunssisfvfp)
#if defined(COMPILER_RT_ARMHF_TARGET)
vmov s0, r0
vcvt.f32.u32 s0, s0
#else
vmov s15, r0 // move int to float register s15
vcvt.f32.u32 s15, s15 // convert 32-bit int in s15 to float in s15
vmov r0, s15 // move s15 to result register
#endif
bx lr
END_COMPILERRT_FUNCTION(__floatunssisfvfp)

View File

@ -19,9 +19,13 @@
.syntax unified
.p2align 2
DEFINE_COMPILERRT_FUNCTION(__gedf2vfp)
#if defined(COMPILER_RT_ARMHF_TARGET)
vcmp.f64 d0, d1
#else
vmov d6, r0, r1 // load r0/r1 pair in double register
vmov d7, r2, r3 // load r2/r3 pair in double register
vcmp.f64 d6, d7
#endif
vmrs apsr_nzcv, fpscr
movge r0, #1 // set result register to 1 if greater than or equal
movlt r0, #0

View File

@ -19,9 +19,13 @@
.syntax unified
.p2align 2
DEFINE_COMPILERRT_FUNCTION(__gesf2vfp)
#if defined(COMPILER_RT_ARMHF_TARGET)
vcmp.f32 s0, s1
#else
vmov s14, r0 // move from GPR 0 to float register
vmov s15, r1 // move from GPR 1 to float register
vcmp.f32 s14, s15
#endif
vmrs apsr_nzcv, fpscr
movge r0, #1 // set result register to 1 if greater than or equal
movlt r0, #0

View File

@ -19,9 +19,13 @@
.syntax unified
.p2align 2
DEFINE_COMPILERRT_FUNCTION(__gtdf2vfp)
#if defined(COMPILER_RT_ARMHF_TARGET)
vcmp.f64 d0, d1
#else
vmov d6, r0, r1 // load r0/r1 pair in double register
vmov d7, r2, r3 // load r2/r3 pair in double register
vcmp.f64 d6, d7
#endif
vmrs apsr_nzcv, fpscr
movgt r0, #1 // set result register to 1 if equal
movle r0, #0

View File

@ -19,9 +19,13 @@
.syntax unified
.p2align 2
DEFINE_COMPILERRT_FUNCTION(__gtsf2vfp)
#if defined(COMPILER_RT_ARMHF_TARGET)
vcmp.f32 s0, s1
#else
vmov s14, r0 // move from GPR 0 to float register
vmov s15, r1 // move from GPR 1 to float register
vcmp.f32 s14, s15
#endif
vmrs apsr_nzcv, fpscr
movgt r0, #1 // set result register to 1 if equal
movle r0, #0

View File

@ -19,9 +19,13 @@
.syntax unified
.p2align 2
DEFINE_COMPILERRT_FUNCTION(__ledf2vfp)
#if defined(COMPILER_RT_ARMHF_TARGET)
vcmp.f64 d0, d1
#else
vmov d6, r0, r1 // load r0/r1 pair in double register
vmov d7, r2, r3 // load r2/r3 pair in double register
vcmp.f64 d6, d7
#endif
vmrs apsr_nzcv, fpscr
movls r0, #1 // set result register to 1 if equal
movhi r0, #0

View File

@ -19,9 +19,13 @@
.syntax unified
.p2align 2
DEFINE_COMPILERRT_FUNCTION(__lesf2vfp)
#if defined(COMPILER_RT_ARMHF_TARGET)
vcmp.f32 s0, s1
#else
vmov s14, r0 // move from GPR 0 to float register
vmov s15, r1 // move from GPR 1 to float register
vcmp.f32 s14, s15
#endif
vmrs apsr_nzcv, fpscr
movls r0, #1 // set result register to 1 if equal
movhi r0, #0

View File

@ -19,9 +19,13 @@
.syntax unified
.p2align 2
DEFINE_COMPILERRT_FUNCTION(__ltdf2vfp)
#if defined(COMPILER_RT_ARMHF_TARGET)
vcmp.f64 d0, d1
#else
vmov d6, r0, r1 // load r0/r1 pair in double register
vmov d7, r2, r3 // load r2/r3 pair in double register
vcmp.f64 d6, d7
#endif
vmrs apsr_nzcv, fpscr
movmi r0, #1 // set result register to 1 if equal
movpl r0, #0

View File

@ -19,9 +19,13 @@
.syntax unified
.p2align 2
DEFINE_COMPILERRT_FUNCTION(__ltsf2vfp)
#if defined(COMPILER_RT_ARMHF_TARGET)
vcmp.f32 s0, s1
#else
vmov s14, r0 // move from GPR 0 to float register
vmov s15, r1 // move from GPR 1 to float register
vcmp.f32 s14, s15
#endif
vmrs apsr_nzcv, fpscr
movmi r0, #1 // set result register to 1 if equal
movpl r0, #0

View File

@ -18,10 +18,14 @@
.syntax unified
.p2align 2
DEFINE_COMPILERRT_FUNCTION(__muldf3vfp)
#if defined(COMPILER_RT_ARMHF_TARGET)
vmul.f64 d0, d0, d1
#else
vmov d6, r0, r1 // move first param from r0/r1 pair into d6
vmov d7, r2, r3 // move second param from r2/r3 pair into d7
vmul.f64 d6, d6, d7
vmul.f64 d6, d6, d7
vmov r0, r1, d6 // move result back to r0/r1 pair
#endif
bx lr
END_COMPILERRT_FUNCTION(__muldf3vfp)

View File

@ -18,9 +18,13 @@
.syntax unified
.p2align 2
DEFINE_COMPILERRT_FUNCTION(__mulsf3vfp)
#if defined(COMPILER_RT_ARMHF_TARGET)
vmul.f32 s0, s0, s1
#else
vmov s14, r0 // move first param from r0 into float register
vmov s15, r1 // move second param from r1 into float register
vmul.f32 s13, s14, s15
#endif
vmov r0, s13 // move result back to r0
bx lr
END_COMPILERRT_FUNCTION(__mulsf3vfp)

View File

@ -19,9 +19,13 @@
.syntax unified
.p2align 2
DEFINE_COMPILERRT_FUNCTION(__nedf2vfp)
#if defined(COMPILER_RT_ARMHF_TARGET)
vcmp.f64 d0, d1
#else
vmov d6, r0, r1 // load r0/r1 pair in double register
vmov d7, r2, r3 // load r2/r3 pair in double register
vcmp.f64 d6, d7
#endif
vmrs apsr_nzcv, fpscr
movne r0, #1 // set result register to 0 if unequal
moveq r0, #0

View File

@ -18,7 +18,11 @@
.syntax unified
.p2align 2
DEFINE_COMPILERRT_FUNCTION(__negdf2vfp)
#if defined(COMPILER_RT_ARMHF_TARGET)
vneg.f64 d0, d0
#else
eor r1, r1, #-2147483648 // flip sign bit on double in r0/r1 pair
#endif
bx lr
END_COMPILERRT_FUNCTION(__negdf2vfp)

View File

@ -18,7 +18,11 @@
.syntax unified
.p2align 2
DEFINE_COMPILERRT_FUNCTION(__negsf2vfp)
#if defined(COMPILER_RT_ARMHF_TARGET)
vneg.f32 s0, s0
#else
eor r0, r0, #-2147483648 // flip sign bit on float in r0
#endif
bx lr
END_COMPILERRT_FUNCTION(__negsf2vfp)

View File

@ -19,9 +19,13 @@
.syntax unified
.p2align 2
DEFINE_COMPILERRT_FUNCTION(__nesf2vfp)
#if defined(COMPILER_RT_ARMHF_TARGET)
vcmp.f32 s0, s1
#else
vmov s14, r0 // move from GPR 0 to float register
vmov s15, r1 // move from GPR 1 to float register
vcmp.f32 s14, s15
#endif
vmrs apsr_nzcv, fpscr
movne r0, #1 // set result register to 1 if unequal
moveq r0, #0

View File

@ -18,10 +18,14 @@
.syntax unified
.p2align 2
DEFINE_COMPILERRT_FUNCTION(__subdf3vfp)
#if defined(COMPILER_RT_ARMHF_TARGET)
vsub.f64 d0, d0, d1
#else
vmov d6, r0, r1 // move first param from r0/r1 pair into d6
vmov d7, r2, r3 // move second param from r2/r3 pair into d7
vsub.f64 d6, d6, d7
vmov r0, r1, d6 // move result back to r0/r1 pair
#endif
bx lr
END_COMPILERRT_FUNCTION(__subdf3vfp)

View File

@ -12,17 +12,21 @@
//
// extern float __subsf3vfp(float a, float b);
//
// Returns the difference between two single precision floating point numbers
// Returns the difference between two single precision floating point numbers
// using the Darwin calling convention where single arguments are passsed
// like 32-bit ints.
//
.syntax unified
.p2align 2
DEFINE_COMPILERRT_FUNCTION(__subsf3vfp)
#if defined(COMPILER_RT_ARMHF_TARGET)
vsub.f32 s0, s0, s1
#elsee
vmov s14, r0 // move first param from r0 into float register
vmov s15, r1 // move second param from r1 into float register
vsub.f32 s14, s14, s15
vmov r0, s14 // move result back to r0
#endif
bx lr
END_COMPILERRT_FUNCTION(__subsf3vfp)

View File

@ -19,9 +19,13 @@
.syntax unified
.p2align 2
DEFINE_COMPILERRT_FUNCTION(__truncdfsf2vfp)
#if defined(COMPILER_RT_ARMHF_TARGET)
vcvt.f32.f64 s0, d0
#else
vmov d7, r0, r1 // load double from r0/r1 pair
vcvt.f32.f64 s15, d7 // convert double to single (trucate precision)
vmov r0, s15 // return result in r0
#endif
bx lr
END_COMPILERRT_FUNCTION(__truncdfsf2vfp)

View File

@ -19,9 +19,13 @@
.syntax unified
.p2align 2
DEFINE_COMPILERRT_FUNCTION(__unorddf2vfp)
#if defined(COMPILER_RT_ARMHF_TARGET)
vcmp.f64 d0, d1
#else
vmov d6, r0, r1 // load r0/r1 pair in double register
vmov d7, r2, r3 // load r2/r3 pair in double register
vcmp.f64 d6, d7
vcmp.f64 d6, d7
#endif
vmrs apsr_nzcv, fpscr
movvs r0, #1 // set result register to 1 if "overflow" (any NaNs)
movvc r0, #0

View File

@ -19,9 +19,13 @@
.syntax unified
.p2align 2
DEFINE_COMPILERRT_FUNCTION(__unordsf2vfp)
#if defined(COMPILER_RT_ARMHF_TARGET)
vcmp.f32 s0, s1
#else
vmov s14, r0 // move from GPR 0 to float register
vmov s15, r1 // move from GPR 1 to float register
vcmp.f32 s14, s15
#endif
vmrs apsr_nzcv, fpscr
movvs r0, #1 // set result register to 1 if "overflow" (any NaNs)
movvc r0, #0

View File

@ -690,6 +690,7 @@ inline const char *ModuleArchToString(ModuleArch arch) {
return "arm64";
}
CHECK(0 && "Invalid module arch");
return "";
}
const uptr kModuleUUIDSize = 16;

View File

@ -954,7 +954,9 @@ SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_cov_init() {
}
SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_cov_dump() {
coverage_data.DumpAll();
#if SANITIZER_LINUX
__sanitizer_dump_trace_pc_guard_coverage();
#endif
}
SANITIZER_INTERFACE_ATTRIBUTE void
__sanitizer_cov_module_init(s32 *guards, uptr npcs, u8 *counters,

View File

@ -78,10 +78,12 @@ void LibIgnore::OnLibraryLoaded(const char *name) {
lib->templ, mod.full_name());
lib->loaded = true;
lib->name = internal_strdup(mod.full_name());
const uptr idx = atomic_load(&loaded_count_, memory_order_relaxed);
code_ranges_[idx].begin = range.beg;
code_ranges_[idx].end = range.end;
atomic_store(&loaded_count_, idx + 1, memory_order_release);
const uptr idx =
atomic_load(&ignored_ranges_count_, memory_order_relaxed);
CHECK_LT(idx, kMaxLibs);
ignored_code_ranges_[idx].begin = range.beg;
ignored_code_ranges_[idx].end = range.end;
atomic_store(&ignored_ranges_count_, idx + 1, memory_order_release);
break;
}
}
@ -92,6 +94,29 @@ void LibIgnore::OnLibraryLoaded(const char *name) {
Die();
}
}
// Track instrumented ranges.
if (track_instrumented_libs_) {
for (const auto &mod : modules) {
if (!mod.instrumented())
continue;
for (const auto &range : mod.ranges()) {
if (!range.executable)
continue;
if (IsPcInstrumented(range.beg) && IsPcInstrumented(range.end - 1))
continue;
VReport(1, "Adding instrumented range %p-%p from library '%s'\n",
range.beg, range.end, mod.full_name());
const uptr idx =
atomic_load(&instrumented_ranges_count_, memory_order_relaxed);
CHECK_LT(idx, kMaxLibs);
instrumented_code_ranges_[idx].begin = range.beg;
instrumented_code_ranges_[idx].end = range.end;
atomic_store(&instrumented_ranges_count_, idx + 1,
memory_order_release);
}
}
}
}
void LibIgnore::OnLibraryUnloaded() {

View File

@ -30,6 +30,9 @@ class LibIgnore {
// Must be called during initialization.
void AddIgnoredLibrary(const char *name_templ);
void IgnoreNoninstrumentedModules(bool enable) {
track_instrumented_libs_ = enable;
}
// Must be called after a new dynamic library is loaded.
void OnLibraryLoaded(const char *name);
@ -37,8 +40,14 @@ class LibIgnore {
// Must be called after a dynamic library is unloaded.
void OnLibraryUnloaded();
// Checks whether the provided PC belongs to one of the ignored libraries.
bool IsIgnored(uptr pc) const;
// Checks whether the provided PC belongs to one of the ignored libraries or
// the PC should be ignored because it belongs to an non-instrumented module
// (when ignore_noninstrumented_modules=1). Also returns true via
// "pc_in_ignored_lib" if the PC is in an ignored library, false otherwise.
bool IsIgnored(uptr pc, bool *pc_in_ignored_lib) const;
// Checks whether the provided PC belongs to an instrumented module.
bool IsPcInstrumented(uptr pc) const;
private:
struct Lib {
@ -53,26 +62,48 @@ class LibIgnore {
uptr end;
};
inline bool IsInRange(uptr pc, const LibCodeRange &range) const {
return (pc >= range.begin && pc < range.end);
}
static const uptr kMaxLibs = 128;
// Hot part:
atomic_uintptr_t loaded_count_;
LibCodeRange code_ranges_[kMaxLibs];
atomic_uintptr_t ignored_ranges_count_;
LibCodeRange ignored_code_ranges_[kMaxLibs];
atomic_uintptr_t instrumented_ranges_count_;
LibCodeRange instrumented_code_ranges_[kMaxLibs];
// Cold part:
BlockingMutex mutex_;
uptr count_;
Lib libs_[kMaxLibs];
bool track_instrumented_libs_;
// Disallow copying of LibIgnore objects.
LibIgnore(const LibIgnore&); // not implemented
void operator = (const LibIgnore&); // not implemented
};
inline bool LibIgnore::IsIgnored(uptr pc) const {
const uptr n = atomic_load(&loaded_count_, memory_order_acquire);
inline bool LibIgnore::IsIgnored(uptr pc, bool *pc_in_ignored_lib) const {
const uptr n = atomic_load(&ignored_ranges_count_, memory_order_acquire);
for (uptr i = 0; i < n; i++) {
if (pc >= code_ranges_[i].begin && pc < code_ranges_[i].end)
if (IsInRange(pc, ignored_code_ranges_[i])) {
*pc_in_ignored_lib = true;
return true;
}
}
*pc_in_ignored_lib = false;
if (track_instrumented_libs_ && !IsPcInstrumented(pc))
return true;
return false;
}
inline bool LibIgnore::IsPcInstrumented(uptr pc) const {
const uptr n = atomic_load(&instrumented_ranges_count_, memory_order_acquire);
for (uptr i = 0; i < n; i++) {
if (IsInRange(pc, instrumented_code_ranges_[i]))
return true;
}
return false;

View File

@ -639,9 +639,12 @@ namespace __sanitizer {
#ifndef __mips__
#if defined(__sparc__)
#if __GLIBC_PREREQ (2, 20)
// On sparc glibc 2.19 and earlier sa_flags was unsigned long, and
// __glibc_reserved0 didn't exist.
// On sparc glibc 2.19 and earlier sa_flags was unsigned long.
#if defined(__arch64__)
// To maintain ABI compatibility on sparc64 when switching to an int,
// __glibc_reserved0 was added.
int __glibc_reserved0;
#endif
int sa_flags;
#else
unsigned long sa_flags;

View File

@ -49,18 +49,31 @@ class Quarantine {
}
void Init(uptr size, uptr cache_size) {
atomic_store(&max_size_, size, memory_order_release);
// Thread local quarantine size can be zero only when global quarantine size
// is zero (it allows us to perform just one atomic read per Put() call).
CHECK((size == 0 && cache_size == 0) || cache_size != 0);
atomic_store(&max_size_, size, memory_order_relaxed);
atomic_store(&min_size_, size / 10 * 9,
memory_order_release); // 90% of max size.
max_cache_size_ = cache_size;
memory_order_relaxed); // 90% of max size.
atomic_store(&max_cache_size_, cache_size, memory_order_relaxed);
}
uptr GetSize() const { return atomic_load(&max_size_, memory_order_acquire); }
uptr GetCacheSize() const { return max_cache_size_; }
uptr GetSize() const { return atomic_load(&max_size_, memory_order_relaxed); }
uptr GetCacheSize() const {
return atomic_load(&max_cache_size_, memory_order_relaxed);
}
void Put(Cache *c, Callback cb, Node *ptr, uptr size) {
c->Enqueue(cb, ptr, size);
if (c->Size() > max_cache_size_)
uptr cache_size = GetCacheSize();
if (cache_size) {
c->Enqueue(cb, ptr, size);
} else {
// cache_size == 0 only when size == 0 (see Init).
cb.Recycle(ptr);
}
// Check cache size anyway to accommodate for runtime cache_size change.
if (c->Size() > cache_size)
Drain(c, cb);
}
@ -83,7 +96,7 @@ class Quarantine {
char pad0_[kCacheLineSize];
atomic_uintptr_t max_size_;
atomic_uintptr_t min_size_;
uptr max_cache_size_;
atomic_uintptr_t max_cache_size_;
char pad1_[kCacheLineSize];
SpinMutex cache_mutex_;
SpinMutex recycle_mutex_;
@ -92,7 +105,7 @@ class Quarantine {
void NOINLINE Recycle(Callback cb) {
Cache tmp;
uptr min_size = atomic_load(&min_size_, memory_order_acquire);
uptr min_size = atomic_load(&min_size_, memory_order_relaxed);
{
SpinMutexLock l(&cache_mutex_);
while (cache_.Size() > min_size) {
@ -205,6 +218,7 @@ class QuarantineCache {
return b;
}
};
} // namespace __sanitizer
#endif // SANITIZER_QUARANTINE_H

View File

@ -15,6 +15,7 @@
//===----------------------------------------------------------------------===//
#include "scudo_allocator.h"
#include "scudo_crc32.h"
#include "scudo_utils.h"
#include "sanitizer_common/sanitizer_allocator_interface.h"
@ -25,22 +26,6 @@
#include <cstring>
// Hardware CRC32 is supported at compilation via the following:
// - for i386 & x86_64: -msse4.2
// - for ARM & AArch64: -march=armv8-a+crc
// An additional check must be performed at runtime as well to make sure the
// emitted instructions are valid on the target host.
#if defined(__SSE4_2__) || defined(__ARM_FEATURE_CRC32)
# ifdef __SSE4_2__
# include <smmintrin.h>
# define HW_CRC32 FIRST_32_SECOND_64(_mm_crc32_u32, _mm_crc32_u64)
# endif
# ifdef __ARM_FEATURE_CRC32
# include <arm_acle.h>
# define HW_CRC32 FIRST_32_SECOND_64(__crc32cw, __crc32cd)
# endif
#endif
namespace __scudo {
#if SANITIZER_CAN_USE_ALLOCATOR64
@ -84,10 +69,6 @@ static thread_local Xorshift128Plus Prng;
// Global static cookie, initialized at start-up.
static uptr Cookie;
enum : u8 {
CRC32Software = 0,
CRC32Hardware = 1,
};
// We default to software CRC32 if the alternatives are not supported, either
// at compilation or at runtime.
static atomic_uint8_t HashAlgorithm = { CRC32Software };
@ -97,17 +78,9 @@ static atomic_uint8_t HashAlgorithm = { CRC32Software };
// the checksumming function if available.
INLINE u32 hashUptrs(uptr Pointer, uptr *Array, uptr ArraySize, u8 HashType) {
u32 Crc;
#if defined(__SSE4_2__) || defined(__ARM_FEATURE_CRC32)
if (HashType == CRC32Hardware) {
Crc = HW_CRC32(Cookie, Pointer);
for (uptr i = 0; i < ArraySize; i++)
Crc = HW_CRC32(Crc, Array[i]);
return Crc;
}
#endif
Crc = computeCRC32(Cookie, Pointer);
Crc = computeCRC32(Cookie, Pointer, HashType);
for (uptr i = 0; i < ArraySize; i++)
Crc = computeCRC32(Crc, Array[i]);
Crc = computeCRC32(Crc, Array[i], HashType);
return Crc;
}

View File

@ -0,0 +1,53 @@
//===-- scudo_crc32.cpp -----------------------------------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
///
/// CRC32 function leveraging hardware specific instructions. This has to be
/// kept separated to restrict the use of compiler specific flags to this file.
///
//===----------------------------------------------------------------------===//
// Hardware CRC32 is supported at compilation via the following:
// - for i386 & x86_64: -msse4.2
// - for ARM & AArch64: -march=armv8-a+crc or -mcrc
// An additional check must be performed at runtime as well to make sure the
// emitted instructions are valid on the target host.
#include "scudo_crc32.h"
#include "scudo_utils.h"
#if defined(__SSE4_2__) || defined(__ARM_FEATURE_CRC32)
# ifdef __SSE4_2__
# include <smmintrin.h>
# define CRC32_INTRINSIC FIRST_32_SECOND_64(_mm_crc32_u32, _mm_crc32_u64)
# endif
# ifdef __ARM_FEATURE_CRC32
# include <arm_acle.h>
# define CRC32_INTRINSIC FIRST_32_SECOND_64(__crc32cw, __crc32cd)
# endif
#endif // defined(__SSE4_2__) || defined(__ARM_FEATURE_CRC32)
namespace __scudo {
#if defined(__SSE4_2__) || defined(__ARM_FEATURE_CRC32)
INLINE u32 computeHardwareCRC32(u32 Crc, uptr Data) {
return CRC32_INTRINSIC(Crc, Data);
}
u32 computeCRC32(u32 Crc, uptr Data, u8 HashType) {
if (HashType == CRC32Hardware) {
return computeHardwareCRC32(Crc, Data);
}
return computeSoftwareCRC32(Crc, Data);
}
#else
u32 computeCRC32(u32 Crc, uptr Data, u8 HashType) {
return computeSoftwareCRC32(Crc, Data);
}
#endif // defined(__SSE4_2__) || defined(__ARM_FEATURE_CRC32)
} // namespace __scudo

View File

@ -0,0 +1,30 @@
//===-- scudo_crc32.h -------------------------------------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
///
/// Header for scudo_crc32.cpp.
///
//===----------------------------------------------------------------------===//
#ifndef SCUDO_CRC32_H_
#define SCUDO_CRC32_H_
#include "sanitizer_common/sanitizer_internal_defs.h"
namespace __scudo {
enum : u8 {
CRC32Software = 0,
CRC32Hardware = 1,
};
u32 computeCRC32(u32 Crc, uptr Data, u8 HashType);
} // namespace __scudo
#endif // SCUDO_CRC32_H_

View File

@ -185,8 +185,7 @@ const static u32 CRC32Table[] = {
0xb40bbe37, 0xc30c8ea1, 0x5a05df1b, 0x2d02ef8d
};
u32 computeCRC32(u32 Crc, uptr Data)
{
u32 computeSoftwareCRC32(u32 Crc, uptr Data) {
for (uptr i = 0; i < sizeof(Data); i++) {
Crc = CRC32Table[(Crc ^ Data) & 0xff] ^ (Crc >> 8);
Data >>= 8;

View File

@ -53,8 +53,8 @@ struct Xorshift128Plus {
u64 State[2];
};
// Software CRC32 functions, to be used when SSE 4.2 support is not detected.
u32 computeCRC32(u32 Crc, uptr Data);
// Software CRC32 functions, to be used when hardware support is not detected.
u32 computeSoftwareCRC32(u32 Crc, uptr Data);
} // namespace __scudo

View File

@ -79,5 +79,8 @@ TSAN_FLAG(bool, die_after_fork, true,
TSAN_FLAG(const char *, suppressions, "", "Suppressions file name.")
TSAN_FLAG(bool, ignore_interceptors_accesses, false,
"Ignore reads and writes from all interceptors.")
TSAN_FLAG(bool, ignore_noninstrumented_modules, false,
"Interceptors should only detect races when called from instrumented "
"modules.")
TSAN_FLAG(bool, shared_ptr_interceptor, true,
"Track atomic reference counting in libc++ shared_ptr and weak_ptr.")

View File

@ -231,6 +231,8 @@ void InitializeLibIgnore() {
if (0 == internal_strcmp(s->type, kSuppressionLib))
libignore()->AddIgnoredLibrary(s->templ);
}
if (flags()->ignore_noninstrumented_modules)
libignore()->IgnoreNoninstrumentedModules(true);
libignore()->OnLibraryLoaded(0);
}
@ -252,31 +254,20 @@ static unsigned g_thread_finalize_key;
ScopedInterceptor::ScopedInterceptor(ThreadState *thr, const char *fname,
uptr pc)
: thr_(thr)
, pc_(pc)
, in_ignored_lib_(false) {
: thr_(thr), pc_(pc), in_ignored_lib_(false), ignoring_(false) {
Initialize(thr);
if (!thr_->is_inited)
return;
if (!thr_->ignore_interceptors)
FuncEntry(thr, pc);
if (!thr_->is_inited) return;
if (!thr_->ignore_interceptors) FuncEntry(thr, pc);
DPrintf("#%d: intercept %s()\n", thr_->tid, fname);
if (!thr_->in_ignored_lib && libignore()->IsIgnored(pc)) {
in_ignored_lib_ = true;
thr_->in_ignored_lib = true;
ThreadIgnoreBegin(thr_, pc_);
}
if (flags()->ignore_interceptors_accesses) ThreadIgnoreBegin(thr_, pc_);
ignoring_ =
!thr_->in_ignored_lib && (flags()->ignore_interceptors_accesses ||
libignore()->IsIgnored(pc, &in_ignored_lib_));
EnableIgnores();
}
ScopedInterceptor::~ScopedInterceptor() {
if (!thr_->is_inited)
return;
if (flags()->ignore_interceptors_accesses) ThreadIgnoreEnd(thr_, pc_);
if (in_ignored_lib_) {
thr_->in_ignored_lib = false;
ThreadIgnoreEnd(thr_, pc_);
}
if (!thr_->is_inited) return;
DisableIgnores();
if (!thr_->ignore_interceptors) {
ProcessPendingSignals(thr_);
FuncExit(thr_);
@ -284,20 +275,24 @@ ScopedInterceptor::~ScopedInterceptor() {
}
}
void ScopedInterceptor::UserCallbackStart() {
if (flags()->ignore_interceptors_accesses) ThreadIgnoreEnd(thr_, pc_);
if (in_ignored_lib_) {
thr_->in_ignored_lib = false;
ThreadIgnoreEnd(thr_, pc_);
void ScopedInterceptor::EnableIgnores() {
if (ignoring_) {
ThreadIgnoreBegin(thr_, pc_);
if (in_ignored_lib_) {
DCHECK(!thr_->in_ignored_lib);
thr_->in_ignored_lib = true;
}
}
}
void ScopedInterceptor::UserCallbackEnd() {
if (in_ignored_lib_) {
thr_->in_ignored_lib = true;
ThreadIgnoreBegin(thr_, pc_);
void ScopedInterceptor::DisableIgnores() {
if (ignoring_) {
ThreadIgnoreEnd(thr_, pc_);
if (in_ignored_lib_) {
DCHECK(thr_->in_ignored_lib);
thr_->in_ignored_lib = false;
}
}
if (flags()->ignore_interceptors_accesses) ThreadIgnoreBegin(thr_, pc_);
}
#define TSAN_INTERCEPT(func) INTERCEPT_FUNCTION(func)

View File

@ -10,12 +10,13 @@ class ScopedInterceptor {
public:
ScopedInterceptor(ThreadState *thr, const char *fname, uptr pc);
~ScopedInterceptor();
void UserCallbackStart();
void UserCallbackEnd();
void DisableIgnores();
void EnableIgnores();
private:
ThreadState *const thr_;
const uptr pc_;
bool in_ignored_lib_;
bool ignoring_;
};
} // namespace __tsan
@ -39,10 +40,10 @@ class ScopedInterceptor {
/**/
#define SCOPED_TSAN_INTERCEPTOR_USER_CALLBACK_START() \
si.UserCallbackStart();
si.DisableIgnores();
#define SCOPED_TSAN_INTERCEPTOR_USER_CALLBACK_END() \
si.UserCallbackEnd();
si.EnableIgnores();
#define TSAN_INTERCEPTOR(ret, func, ...) INTERCEPTOR(ret, func, __VA_ARGS__)

View File

@ -19,6 +19,9 @@
#include <atomic>
#include <cassert>
extern "C" void __clear_cache(void* start, void* end);
namespace __xray {
uint64_t cycleFrequency() XRAY_NEVER_INSTRUMENT {
@ -75,8 +78,8 @@ inline static bool patchSled(const bool Enable, const uint32_t FuncId,
// B #32
uint32_t *FirstAddress = reinterpret_cast<uint32_t *>(Sled.Address);
uint32_t *CurAddress = FirstAddress + 1;
if (Enable) {
uint32_t *CurAddress = FirstAddress + 1;
*CurAddress = uint32_t(PatchOpcodes::PO_LdrW0_12);
CurAddress++;
*CurAddress = uint32_t(PatchOpcodes::PO_LdrX16_12);
@ -88,6 +91,7 @@ inline static bool patchSled(const bool Enable, const uint32_t FuncId,
*reinterpret_cast<void (**)()>(CurAddress) = TracingHook;
CurAddress += 2;
*CurAddress = uint32_t(PatchOpcodes::PO_LdpX0X30SP_16);
CurAddress++;
std::atomic_store_explicit(
reinterpret_cast<std::atomic<uint32_t> *>(FirstAddress),
uint32_t(PatchOpcodes::PO_StpX0X30SP_m16e), std::memory_order_release);
@ -96,6 +100,8 @@ inline static bool patchSled(const bool Enable, const uint32_t FuncId,
reinterpret_cast<std::atomic<uint32_t> *>(FirstAddress),
uint32_t(PatchOpcodes::PO_B32), std::memory_order_release);
}
__clear_cache(reinterpret_cast<char*>(FirstAddress),
reinterpret_cast<char*>(CurAddress));
return true;
}