Merge llvm, clang, lld, lldb, compiler-rt and libc++ r307894, and update
build glue.
This commit is contained in:
commit
4d0d296fa3
@ -61,10 +61,9 @@ static void MaybeDumpRegisters(void *context) {
|
||||
static void MaybeReportNonExecRegion(uptr pc) {
|
||||
#if SANITIZER_FREEBSD || SANITIZER_LINUX
|
||||
MemoryMappingLayout proc_maps(/*cache_enabled*/ true);
|
||||
uptr start, end, protection;
|
||||
while (proc_maps.Next(&start, &end, nullptr, nullptr, 0, &protection)) {
|
||||
if (pc >= start && pc < end &&
|
||||
!(protection & MemoryMappingLayout::kProtectionExecute))
|
||||
MemoryMappedSegment segment;
|
||||
while (proc_maps.Next(&segment)) {
|
||||
if (pc >= segment.start && pc < segment.end && !segment.IsExecutable())
|
||||
Report("Hint: PC is at a non-executable region. Maybe a wild jump?\n");
|
||||
}
|
||||
#endif
|
||||
|
@ -75,6 +75,7 @@ void NORETURN ShowStatsAndAbort();
|
||||
void ReplaceSystemMalloc();
|
||||
|
||||
// asan_linux.cc / asan_mac.cc / asan_win.cc
|
||||
uptr FindDynamicShadowStart();
|
||||
void *AsanDoesNotSupportStaticLinkage();
|
||||
void AsanCheckDynamicRTPrereqs();
|
||||
void AsanCheckIncompatibleRT();
|
||||
|
@ -77,6 +77,11 @@ void *AsanDoesNotSupportStaticLinkage() {
|
||||
return &_DYNAMIC; // defined in link.h
|
||||
}
|
||||
|
||||
uptr FindDynamicShadowStart() {
|
||||
UNREACHABLE("FindDynamicShadowStart is not available");
|
||||
return 0;
|
||||
}
|
||||
|
||||
void AsanApplyToGlobals(globals_op_fptr op, const void *needle) {
|
||||
UNIMPLEMENTED();
|
||||
}
|
||||
@ -140,9 +145,9 @@ void AsanCheckIncompatibleRT() {
|
||||
// system libraries, causing crashes later in ASan initialization.
|
||||
MemoryMappingLayout proc_maps(/*cache_enabled*/true);
|
||||
char filename[128];
|
||||
while (proc_maps.Next(nullptr, nullptr, nullptr, filename,
|
||||
sizeof(filename), nullptr)) {
|
||||
if (IsDynamicRTName(filename)) {
|
||||
MemoryMappedSegment segment(filename, sizeof(filename));
|
||||
while (proc_maps.Next(&segment)) {
|
||||
if (IsDynamicRTName(segment.filename)) {
|
||||
Report("Your application is linked against "
|
||||
"incompatible ASan runtimes.\n");
|
||||
Die();
|
||||
|
@ -55,6 +55,29 @@ void *AsanDoesNotSupportStaticLinkage() {
|
||||
return 0;
|
||||
}
|
||||
|
||||
uptr FindDynamicShadowStart() {
|
||||
uptr granularity = GetMmapGranularity();
|
||||
uptr alignment = 8 * granularity;
|
||||
uptr left_padding = granularity;
|
||||
uptr space_size = kHighShadowEnd + left_padding;
|
||||
|
||||
uptr largest_gap_found = 0;
|
||||
uptr shadow_start = FindAvailableMemoryRange(space_size, alignment,
|
||||
granularity, &largest_gap_found);
|
||||
// If the shadow doesn't fit, restrict the address space to make it fit.
|
||||
if (shadow_start == 0) {
|
||||
uptr new_max_vm = RoundDownTo(largest_gap_found << SHADOW_SCALE, alignment);
|
||||
RestrictMemoryToMaxAddress(new_max_vm);
|
||||
kHighMemEnd = new_max_vm - 1;
|
||||
space_size = kHighShadowEnd + left_padding;
|
||||
shadow_start =
|
||||
FindAvailableMemoryRange(space_size, alignment, granularity, nullptr);
|
||||
}
|
||||
CHECK_NE((uptr)0, shadow_start);
|
||||
CHECK(IsAligned(shadow_start, alignment));
|
||||
return shadow_start;
|
||||
}
|
||||
|
||||
// No-op. Mac does not support static linkage anyway.
|
||||
void AsanCheckDynamicRTPrereqs() {}
|
||||
|
||||
|
@ -26,7 +26,7 @@
|
||||
// VS2015 dynamic CRT (MD) work.
|
||||
#if SANITIZER_WINDOWS
|
||||
#define CXX_OPERATOR_ATTRIBUTE
|
||||
#define COMMENT_EXPORT(sym) __pragma(comment(linker, "/export:"##sym))
|
||||
#define COMMENT_EXPORT(sym) __pragma(comment(linker, "/export:" sym))
|
||||
#ifdef _WIN64
|
||||
COMMENT_EXPORT("??2@YAPEAX_K@Z") // operator new
|
||||
COMMENT_EXPORT("??2@YAPEAX_KAEBUnothrow_t@std@@@Z") // operator new nothrow
|
||||
|
@ -59,7 +59,7 @@ void AsanOnDeadlySignal(int signo, void *siginfo, void *context) {
|
||||
// lis r0,-10000
|
||||
// stdux r1,r1,r0 # store sp to [sp-10000] and update sp by -10000
|
||||
// If the store faults then sp will not have been updated, so test above
|
||||
// will not work, becase the fault address will be more than just "slightly"
|
||||
// will not work, because the fault address will be more than just "slightly"
|
||||
// below sp.
|
||||
if (!IsStackAccess && IsAccessibleMemoryRange(sig.pc, 4)) {
|
||||
u32 inst = *(unsigned *)sig.pc;
|
||||
|
@ -438,15 +438,7 @@ static void InitializeShadowMemory() {
|
||||
if (shadow_start == kDefaultShadowSentinel) {
|
||||
__asan_shadow_memory_dynamic_address = 0;
|
||||
CHECK_EQ(0, kLowShadowBeg);
|
||||
|
||||
uptr granularity = GetMmapGranularity();
|
||||
uptr alignment = 8 * granularity;
|
||||
uptr left_padding = granularity;
|
||||
uptr space_size = kHighShadowEnd + left_padding;
|
||||
|
||||
shadow_start = FindAvailableMemoryRange(space_size, alignment, granularity);
|
||||
CHECK_NE((uptr)0, shadow_start);
|
||||
CHECK(IsAligned(shadow_start, alignment));
|
||||
shadow_start = FindDynamicShadowStart();
|
||||
}
|
||||
// Update the shadow memory address (potentially) used by instrumentation.
|
||||
__asan_shadow_memory_dynamic_address = shadow_start;
|
||||
|
@ -200,7 +200,6 @@ FakeStack *AsanThread::AsyncSignalSafeLazyInitFakeStack() {
|
||||
uptr stack_size = this->stack_size();
|
||||
if (stack_size == 0) // stack_size is not yet available, don't use FakeStack.
|
||||
return nullptr;
|
||||
CHECK_LE(stack_size, 0x10000000);
|
||||
uptr old_val = 0;
|
||||
// fake_stack_ has 3 states:
|
||||
// 0 -- not initialized
|
||||
|
@ -217,6 +217,18 @@ void *AsanDoesNotSupportStaticLinkage() {
|
||||
return 0;
|
||||
}
|
||||
|
||||
uptr FindDynamicShadowStart() {
|
||||
uptr granularity = GetMmapGranularity();
|
||||
uptr alignment = 8 * granularity;
|
||||
uptr left_padding = granularity;
|
||||
uptr space_size = kHighShadowEnd + left_padding;
|
||||
uptr shadow_start =
|
||||
FindAvailableMemoryRange(space_size, alignment, granularity, nullptr);
|
||||
CHECK_NE((uptr)0, shadow_start);
|
||||
CHECK(IsAligned(shadow_start, alignment));
|
||||
return shadow_start;
|
||||
}
|
||||
|
||||
void AsanCheckDynamicRTPrereqs() {}
|
||||
|
||||
void AsanCheckIncompatibleRT() {}
|
||||
|
@ -44,29 +44,16 @@ enum ProcessorVendors {
|
||||
};
|
||||
|
||||
enum ProcessorTypes {
|
||||
INTEL_ATOM = 1,
|
||||
INTEL_BONNELL = 1,
|
||||
INTEL_CORE2,
|
||||
INTEL_COREI7,
|
||||
AMDFAM10H,
|
||||
AMDFAM15H,
|
||||
INTEL_i386,
|
||||
INTEL_i486,
|
||||
INTEL_PENTIUM,
|
||||
INTEL_PENTIUM_PRO,
|
||||
INTEL_PENTIUM_II,
|
||||
INTEL_PENTIUM_III,
|
||||
INTEL_PENTIUM_IV,
|
||||
INTEL_PENTIUM_M,
|
||||
INTEL_CORE_DUO,
|
||||
INTEL_XEONPHI,
|
||||
INTEL_X86_64,
|
||||
INTEL_NOCONA,
|
||||
INTEL_PRESCOTT,
|
||||
AMD_i486,
|
||||
AMDPENTIUM,
|
||||
AMDATHLON,
|
||||
AMDFAM14H,
|
||||
AMDFAM16H,
|
||||
INTEL_SILVERMONT,
|
||||
INTEL_KNL,
|
||||
AMD_BTVER1,
|
||||
AMD_BTVER2,
|
||||
AMDFAM17H,
|
||||
CPU_TYPE_MAX
|
||||
};
|
||||
|
||||
@ -79,32 +66,14 @@ enum ProcessorSubtypes {
|
||||
AMDFAM10H_ISTANBUL,
|
||||
AMDFAM15H_BDVER1,
|
||||
AMDFAM15H_BDVER2,
|
||||
INTEL_PENTIUM_MMX,
|
||||
INTEL_CORE2_65,
|
||||
INTEL_CORE2_45,
|
||||
AMDFAM15H_BDVER3,
|
||||
AMDFAM15H_BDVER4,
|
||||
AMDFAM17H_ZNVER1,
|
||||
INTEL_COREI7_IVYBRIDGE,
|
||||
INTEL_COREI7_HASWELL,
|
||||
INTEL_COREI7_BROADWELL,
|
||||
INTEL_COREI7_SKYLAKE,
|
||||
INTEL_COREI7_SKYLAKE_AVX512,
|
||||
INTEL_ATOM_BONNELL,
|
||||
INTEL_ATOM_SILVERMONT,
|
||||
INTEL_KNIGHTS_LANDING,
|
||||
AMDPENTIUM_K6,
|
||||
AMDPENTIUM_K62,
|
||||
AMDPENTIUM_K63,
|
||||
AMDPENTIUM_GEODE,
|
||||
AMDATHLON_TBIRD,
|
||||
AMDATHLON_MP,
|
||||
AMDATHLON_XP,
|
||||
AMDATHLON_K8SSE3,
|
||||
AMDATHLON_OPTERON,
|
||||
AMDATHLON_FX,
|
||||
AMDATHLON_64,
|
||||
AMD_BTVER1,
|
||||
AMD_BTVER2,
|
||||
AMDFAM15H_BDVER3,
|
||||
AMDFAM15H_BDVER4,
|
||||
CPU_SUBTYPE_MAX
|
||||
};
|
||||
|
||||
@ -120,11 +89,26 @@ enum ProcessorFeatures {
|
||||
FEATURE_SSE4_2,
|
||||
FEATURE_AVX,
|
||||
FEATURE_AVX2,
|
||||
FEATURE_AVX512,
|
||||
FEATURE_AVX512SAVE,
|
||||
FEATURE_MOVBE,
|
||||
FEATURE_ADX,
|
||||
FEATURE_EM64T
|
||||
FEATURE_SSE4_A,
|
||||
FEATURE_FMA4,
|
||||
FEATURE_XOP,
|
||||
FEATURE_FMA,
|
||||
FEATURE_AVX512F,
|
||||
FEATURE_BMI,
|
||||
FEATURE_BMI2,
|
||||
FEATURE_AES,
|
||||
FEATURE_PCLMUL,
|
||||
FEATURE_AVX512VL,
|
||||
FEATURE_AVX512BW,
|
||||
FEATURE_AVX512DQ,
|
||||
FEATURE_AVX512CD,
|
||||
FEATURE_AVX512ER,
|
||||
FEATURE_AVX512PF,
|
||||
FEATURE_AVX512VBMI,
|
||||
FEATURE_AVX512IFMA,
|
||||
FEATURE_AVX5124VNNIW,
|
||||
FEATURE_AVX5124FMAPS,
|
||||
FEATURE_AVX512VPOPCNTDQ
|
||||
};
|
||||
|
||||
// The check below for i386 was copied from clang's cpuid.h (__get_cpuid_max).
|
||||
@ -164,26 +148,27 @@ static bool isCpuIdSupported() {
|
||||
|
||||
/// getX86CpuIDAndInfo - Execute the specified cpuid and return the 4 values in
|
||||
/// the specified arguments. If we can't run cpuid on the host, return true.
|
||||
static void getX86CpuIDAndInfo(unsigned value, unsigned *rEAX, unsigned *rEBX,
|
||||
static bool getX86CpuIDAndInfo(unsigned value, unsigned *rEAX, unsigned *rEBX,
|
||||
unsigned *rECX, unsigned *rEDX) {
|
||||
#if defined(__GNUC__) || defined(__clang__)
|
||||
#if defined(__x86_64__)
|
||||
// gcc doesn't know cpuid would clobber ebx/rbx. Preseve it manually.
|
||||
// gcc doesn't know cpuid would clobber ebx/rbx. Preserve it manually.
|
||||
// FIXME: should we save this for Clang?
|
||||
__asm__("movq\t%%rbx, %%rsi\n\t"
|
||||
"cpuid\n\t"
|
||||
"xchgq\t%%rbx, %%rsi\n\t"
|
||||
: "=a"(*rEAX), "=S"(*rEBX), "=c"(*rECX), "=d"(*rEDX)
|
||||
: "a"(value));
|
||||
return false;
|
||||
#elif defined(__i386__)
|
||||
__asm__("movl\t%%ebx, %%esi\n\t"
|
||||
"cpuid\n\t"
|
||||
"xchgl\t%%ebx, %%esi\n\t"
|
||||
: "=a"(*rEAX), "=S"(*rEBX), "=c"(*rECX), "=d"(*rEDX)
|
||||
: "a"(value));
|
||||
// pedantic #else returns to appease -Wunreachable-code (so we don't generate
|
||||
// postprocessed code that looks like "return true; return false;")
|
||||
return false;
|
||||
#else
|
||||
assert(0 && "This method is defined only for x86.");
|
||||
return true;
|
||||
#endif
|
||||
#elif defined(_MSC_VER)
|
||||
// The MSVC intrinsic is portable across x86 and x64.
|
||||
@ -193,15 +178,16 @@ static void getX86CpuIDAndInfo(unsigned value, unsigned *rEAX, unsigned *rEBX,
|
||||
*rEBX = registers[1];
|
||||
*rECX = registers[2];
|
||||
*rEDX = registers[3];
|
||||
return false;
|
||||
#else
|
||||
assert(0 && "This method is defined only for GNUC, Clang or MSVC.");
|
||||
return true;
|
||||
#endif
|
||||
}
|
||||
|
||||
/// getX86CpuIDAndInfoEx - Execute the specified cpuid with subleaf and return
|
||||
/// the 4 values in the specified arguments. If we can't run cpuid on the host,
|
||||
/// return true.
|
||||
static void getX86CpuIDAndInfoEx(unsigned value, unsigned subleaf,
|
||||
static bool getX86CpuIDAndInfoEx(unsigned value, unsigned subleaf,
|
||||
unsigned *rEAX, unsigned *rEBX, unsigned *rECX,
|
||||
unsigned *rEDX) {
|
||||
#if defined(__x86_64__) || defined(_M_X64)
|
||||
@ -213,6 +199,7 @@ static void getX86CpuIDAndInfoEx(unsigned value, unsigned subleaf,
|
||||
"xchgq\t%%rbx, %%rsi\n\t"
|
||||
: "=a"(*rEAX), "=S"(*rEBX), "=c"(*rECX), "=d"(*rEDX)
|
||||
: "a"(value), "c"(subleaf));
|
||||
return false;
|
||||
#elif defined(_MSC_VER)
|
||||
int registers[4];
|
||||
__cpuidex(registers, value, subleaf);
|
||||
@ -220,8 +207,9 @@ static void getX86CpuIDAndInfoEx(unsigned value, unsigned subleaf,
|
||||
*rEBX = registers[1];
|
||||
*rECX = registers[2];
|
||||
*rEDX = registers[3];
|
||||
return false;
|
||||
#else
|
||||
assert(0 && "This method is defined only for GNUC, Clang or MSVC.");
|
||||
return true;
|
||||
#endif
|
||||
#elif defined(__i386__) || defined(_M_IX86)
|
||||
#if defined(__GNUC__) || defined(__clang__)
|
||||
@ -230,6 +218,7 @@ static void getX86CpuIDAndInfoEx(unsigned value, unsigned subleaf,
|
||||
"xchgl\t%%ebx, %%esi\n\t"
|
||||
: "=a"(*rEAX), "=S"(*rEBX), "=c"(*rECX), "=d"(*rEDX)
|
||||
: "a"(value), "c"(subleaf));
|
||||
return false;
|
||||
#elif defined(_MSC_VER)
|
||||
__asm {
|
||||
mov eax,value
|
||||
@ -244,11 +233,12 @@ static void getX86CpuIDAndInfoEx(unsigned value, unsigned subleaf,
|
||||
mov esi,rEDX
|
||||
mov dword ptr [esi],edx
|
||||
}
|
||||
return false;
|
||||
#else
|
||||
assert(0 && "This method is defined only for GNUC, Clang or MSVC.");
|
||||
return true;
|
||||
#endif
|
||||
#else
|
||||
assert(0 && "This method is defined only for x86.");
|
||||
return true;
|
||||
#endif
|
||||
}
|
||||
|
||||
@ -283,84 +273,15 @@ static void detectX86FamilyModel(unsigned EAX, unsigned *Family,
|
||||
}
|
||||
}
|
||||
|
||||
static void getIntelProcessorTypeAndSubtype(unsigned int Family,
|
||||
unsigned int Model,
|
||||
unsigned int Brand_id,
|
||||
unsigned int Features,
|
||||
unsigned *Type, unsigned *Subtype) {
|
||||
static void
|
||||
getIntelProcessorTypeAndSubtype(unsigned Family, unsigned Model,
|
||||
unsigned Brand_id, unsigned Features,
|
||||
unsigned *Type, unsigned *Subtype) {
|
||||
if (Brand_id != 0)
|
||||
return;
|
||||
switch (Family) {
|
||||
case 3:
|
||||
*Type = INTEL_i386;
|
||||
break;
|
||||
case 4:
|
||||
switch (Model) {
|
||||
case 0: // Intel486 DX processors
|
||||
case 1: // Intel486 DX processors
|
||||
case 2: // Intel486 SX processors
|
||||
case 3: // Intel487 processors, IntelDX2 OverDrive processors,
|
||||
// IntelDX2 processors
|
||||
case 4: // Intel486 SL processor
|
||||
case 5: // IntelSX2 processors
|
||||
case 7: // Write-Back Enhanced IntelDX2 processors
|
||||
case 8: // IntelDX4 OverDrive processors, IntelDX4 processors
|
||||
default:
|
||||
*Type = INTEL_i486;
|
||||
break;
|
||||
}
|
||||
case 5:
|
||||
switch (Model) {
|
||||
case 1: // Pentium OverDrive processor for Pentium processor (60, 66),
|
||||
// Pentium processors (60, 66)
|
||||
case 2: // Pentium OverDrive processor for Pentium processor (75, 90,
|
||||
// 100, 120, 133), Pentium processors (75, 90, 100, 120, 133,
|
||||
// 150, 166, 200)
|
||||
case 3: // Pentium OverDrive processors for Intel486 processor-based
|
||||
// systems
|
||||
*Type = INTEL_PENTIUM;
|
||||
break;
|
||||
case 4: // Pentium OverDrive processor with MMX technology for Pentium
|
||||
// processor (75, 90, 100, 120, 133), Pentium processor with
|
||||
// MMX technology (166, 200)
|
||||
*Type = INTEL_PENTIUM;
|
||||
*Subtype = INTEL_PENTIUM_MMX;
|
||||
break;
|
||||
default:
|
||||
*Type = INTEL_PENTIUM;
|
||||
break;
|
||||
}
|
||||
case 6:
|
||||
switch (Model) {
|
||||
case 0x01: // Pentium Pro processor
|
||||
*Type = INTEL_PENTIUM_PRO;
|
||||
break;
|
||||
case 0x03: // Intel Pentium II OverDrive processor, Pentium II processor,
|
||||
// model 03
|
||||
case 0x05: // Pentium II processor, model 05, Pentium II Xeon processor,
|
||||
// model 05, and Intel Celeron processor, model 05
|
||||
case 0x06: // Celeron processor, model 06
|
||||
*Type = INTEL_PENTIUM_II;
|
||||
break;
|
||||
case 0x07: // Pentium III processor, model 07, and Pentium III Xeon
|
||||
// processor, model 07
|
||||
case 0x08: // Pentium III processor, model 08, Pentium III Xeon processor,
|
||||
// model 08, and Celeron processor, model 08
|
||||
case 0x0a: // Pentium III Xeon processor, model 0Ah
|
||||
case 0x0b: // Pentium III processor, model 0Bh
|
||||
*Type = INTEL_PENTIUM_III;
|
||||
break;
|
||||
case 0x09: // Intel Pentium M processor, Intel Celeron M processor model 09.
|
||||
case 0x0d: // Intel Pentium M processor, Intel Celeron M processor, model
|
||||
// 0Dh. All processors are manufactured using the 90 nm process.
|
||||
case 0x15: // Intel EP80579 Integrated Processor and Intel EP80579
|
||||
// Integrated Processor with Intel QuickAssist Technology
|
||||
*Type = INTEL_PENTIUM_M;
|
||||
break;
|
||||
case 0x0e: // Intel Core Duo processor, Intel Core Solo processor, model
|
||||
// 0Eh. All processors are manufactured using the 65 nm process.
|
||||
*Type = INTEL_CORE_DUO;
|
||||
break; // yonah
|
||||
case 0x0f: // Intel Core 2 Duo processor, Intel Core 2 Duo mobile
|
||||
// processor, Intel Core 2 Quad processor, Intel Core 2 Quad
|
||||
// mobile processor, Intel Core 2 Extreme processor, Intel
|
||||
@ -368,9 +289,6 @@ static void getIntelProcessorTypeAndSubtype(unsigned int Family,
|
||||
// 0Fh. All processors are manufactured using the 65 nm process.
|
||||
case 0x16: // Intel Celeron processor model 16h. All processors are
|
||||
// manufactured using the 65 nm process
|
||||
*Type = INTEL_CORE2; // "core2"
|
||||
*Subtype = INTEL_CORE2_65;
|
||||
break;
|
||||
case 0x17: // Intel Core 2 Extreme processor, Intel Xeon processor, model
|
||||
// 17h. All processors are manufactured using the 45 nm process.
|
||||
//
|
||||
@ -378,14 +296,13 @@ static void getIntelProcessorTypeAndSubtype(unsigned int Family,
|
||||
case 0x1d: // Intel Xeon processor MP. All processors are manufactured using
|
||||
// the 45 nm process.
|
||||
*Type = INTEL_CORE2; // "penryn"
|
||||
*Subtype = INTEL_CORE2_45;
|
||||
break;
|
||||
case 0x1a: // Intel Core i7 processor and Intel Xeon processor. All
|
||||
// processors are manufactured using the 45 nm process.
|
||||
case 0x1e: // Intel(R) Core(TM) i7 CPU 870 @ 2.93GHz.
|
||||
// As found in a Summer 2010 model iMac.
|
||||
case 0x1f:
|
||||
case 0x2e: // Nehalem EX
|
||||
case 0x2e: // Nehalem EX
|
||||
*Type = INTEL_COREI7; // "nehalem"
|
||||
*Subtype = INTEL_COREI7_NEHALEM;
|
||||
break;
|
||||
@ -403,7 +320,7 @@ static void getIntelProcessorTypeAndSubtype(unsigned int Family,
|
||||
*Subtype = INTEL_COREI7_SANDYBRIDGE;
|
||||
break;
|
||||
case 0x3a:
|
||||
case 0x3e: // Ivy Bridge EP
|
||||
case 0x3e: // Ivy Bridge EP
|
||||
*Type = INTEL_COREI7; // "ivybridge"
|
||||
*Subtype = INTEL_COREI7_IVYBRIDGE;
|
||||
break;
|
||||
@ -427,22 +344,26 @@ static void getIntelProcessorTypeAndSubtype(unsigned int Family,
|
||||
break;
|
||||
|
||||
// Skylake:
|
||||
case 0x4e:
|
||||
*Type = INTEL_COREI7; // "skylake-avx512"
|
||||
*Subtype = INTEL_COREI7_SKYLAKE_AVX512;
|
||||
break;
|
||||
case 0x5e:
|
||||
case 0x4e: // Skylake mobile
|
||||
case 0x5e: // Skylake desktop
|
||||
case 0x8e: // Kaby Lake mobile
|
||||
case 0x9e: // Kaby Lake desktop
|
||||
*Type = INTEL_COREI7; // "skylake"
|
||||
*Subtype = INTEL_COREI7_SKYLAKE;
|
||||
break;
|
||||
|
||||
// Skylake Xeon:
|
||||
case 0x55:
|
||||
*Type = INTEL_COREI7;
|
||||
*Subtype = INTEL_COREI7_SKYLAKE_AVX512; // "skylake-avx512"
|
||||
break;
|
||||
|
||||
case 0x1c: // Most 45 nm Intel Atom processors
|
||||
case 0x26: // 45 nm Atom Lincroft
|
||||
case 0x27: // 32 nm Atom Medfield
|
||||
case 0x35: // 32 nm Atom Midview
|
||||
case 0x36: // 32 nm Atom Midview
|
||||
*Type = INTEL_ATOM;
|
||||
*Subtype = INTEL_ATOM_BONNELL;
|
||||
*Type = INTEL_BONNELL;
|
||||
break; // "bonnell"
|
||||
|
||||
// Atom Silvermont codes from the Intel software optimization guide.
|
||||
@ -452,185 +373,29 @@ static void getIntelProcessorTypeAndSubtype(unsigned int Family,
|
||||
case 0x5a:
|
||||
case 0x5d:
|
||||
case 0x4c: // really airmont
|
||||
*Type = INTEL_ATOM;
|
||||
*Subtype = INTEL_ATOM_SILVERMONT;
|
||||
*Type = INTEL_SILVERMONT;
|
||||
break; // "silvermont"
|
||||
|
||||
case 0x57:
|
||||
*Type = INTEL_XEONPHI; // knl
|
||||
*Subtype = INTEL_KNIGHTS_LANDING;
|
||||
*Type = INTEL_KNL; // knl
|
||||
break;
|
||||
|
||||
default: // Unknown family 6 CPU, try to guess.
|
||||
if (Features & (1 << FEATURE_AVX512)) {
|
||||
*Type = INTEL_XEONPHI; // knl
|
||||
*Subtype = INTEL_KNIGHTS_LANDING;
|
||||
break;
|
||||
}
|
||||
if (Features & (1 << FEATURE_ADX)) {
|
||||
*Type = INTEL_COREI7;
|
||||
*Subtype = INTEL_COREI7_BROADWELL;
|
||||
break;
|
||||
}
|
||||
if (Features & (1 << FEATURE_AVX2)) {
|
||||
*Type = INTEL_COREI7;
|
||||
*Subtype = INTEL_COREI7_HASWELL;
|
||||
break;
|
||||
}
|
||||
if (Features & (1 << FEATURE_AVX)) {
|
||||
*Type = INTEL_COREI7;
|
||||
*Subtype = INTEL_COREI7_SANDYBRIDGE;
|
||||
break;
|
||||
}
|
||||
if (Features & (1 << FEATURE_SSE4_2)) {
|
||||
if (Features & (1 << FEATURE_MOVBE)) {
|
||||
*Type = INTEL_ATOM;
|
||||
*Subtype = INTEL_ATOM_SILVERMONT;
|
||||
} else {
|
||||
*Type = INTEL_COREI7;
|
||||
*Subtype = INTEL_COREI7_NEHALEM;
|
||||
}
|
||||
break;
|
||||
}
|
||||
if (Features & (1 << FEATURE_SSE4_1)) {
|
||||
*Type = INTEL_CORE2; // "penryn"
|
||||
*Subtype = INTEL_CORE2_45;
|
||||
break;
|
||||
}
|
||||
if (Features & (1 << FEATURE_SSSE3)) {
|
||||
if (Features & (1 << FEATURE_MOVBE)) {
|
||||
*Type = INTEL_ATOM;
|
||||
*Subtype = INTEL_ATOM_BONNELL; // "bonnell"
|
||||
} else {
|
||||
*Type = INTEL_CORE2; // "core2"
|
||||
*Subtype = INTEL_CORE2_65;
|
||||
}
|
||||
break;
|
||||
}
|
||||
if (Features & (1 << FEATURE_EM64T)) {
|
||||
*Type = INTEL_X86_64;
|
||||
break; // x86-64
|
||||
}
|
||||
if (Features & (1 << FEATURE_SSE2)) {
|
||||
*Type = INTEL_PENTIUM_M;
|
||||
break;
|
||||
}
|
||||
if (Features & (1 << FEATURE_SSE)) {
|
||||
*Type = INTEL_PENTIUM_III;
|
||||
break;
|
||||
}
|
||||
if (Features & (1 << FEATURE_MMX)) {
|
||||
*Type = INTEL_PENTIUM_II;
|
||||
break;
|
||||
}
|
||||
*Type = INTEL_PENTIUM_PRO;
|
||||
default: // Unknown family 6 CPU.
|
||||
break;
|
||||
break;
|
||||
}
|
||||
case 15: {
|
||||
switch (Model) {
|
||||
case 0: // Pentium 4 processor, Intel Xeon processor. All processors are
|
||||
// model 00h and manufactured using the 0.18 micron process.
|
||||
case 1: // Pentium 4 processor, Intel Xeon processor, Intel Xeon
|
||||
// processor MP, and Intel Celeron processor. All processors are
|
||||
// model 01h and manufactured using the 0.18 micron process.
|
||||
case 2: // Pentium 4 processor, Mobile Intel Pentium 4 processor - M,
|
||||
// Intel Xeon processor, Intel Xeon processor MP, Intel Celeron
|
||||
// processor, and Mobile Intel Celeron processor. All processors
|
||||
// are model 02h and manufactured using the 0.13 micron process.
|
||||
*Type =
|
||||
((Features & (1 << FEATURE_EM64T)) ? INTEL_X86_64 : INTEL_PENTIUM_IV);
|
||||
break;
|
||||
|
||||
case 3: // Pentium 4 processor, Intel Xeon processor, Intel Celeron D
|
||||
// processor. All processors are model 03h and manufactured using
|
||||
// the 90 nm process.
|
||||
case 4: // Pentium 4 processor, Pentium 4 processor Extreme Edition,
|
||||
// Pentium D processor, Intel Xeon processor, Intel Xeon
|
||||
// processor MP, Intel Celeron D processor. All processors are
|
||||
// model 04h and manufactured using the 90 nm process.
|
||||
case 6: // Pentium 4 processor, Pentium D processor, Pentium processor
|
||||
// Extreme Edition, Intel Xeon processor, Intel Xeon processor
|
||||
// MP, Intel Celeron D processor. All processors are model 06h
|
||||
// and manufactured using the 65 nm process.
|
||||
*Type =
|
||||
((Features & (1 << FEATURE_EM64T)) ? INTEL_NOCONA : INTEL_PRESCOTT);
|
||||
break;
|
||||
|
||||
default:
|
||||
*Type =
|
||||
((Features & (1 << FEATURE_EM64T)) ? INTEL_X86_64 : INTEL_PENTIUM_IV);
|
||||
break;
|
||||
}
|
||||
}
|
||||
default:
|
||||
break; /*"generic"*/
|
||||
break; // Unknown.
|
||||
}
|
||||
}
|
||||
|
||||
static void getAMDProcessorTypeAndSubtype(unsigned int Family,
|
||||
unsigned int Model,
|
||||
unsigned int Features, unsigned *Type,
|
||||
static void getAMDProcessorTypeAndSubtype(unsigned Family, unsigned Model,
|
||||
unsigned Features, unsigned *Type,
|
||||
unsigned *Subtype) {
|
||||
// FIXME: this poorly matches the generated SubtargetFeatureKV table. There
|
||||
// appears to be no way to generate the wide variety of AMD-specific targets
|
||||
// from the information returned from CPUID.
|
||||
switch (Family) {
|
||||
case 4:
|
||||
*Type = AMD_i486;
|
||||
case 5:
|
||||
*Type = AMDPENTIUM;
|
||||
switch (Model) {
|
||||
case 6:
|
||||
case 7:
|
||||
*Subtype = AMDPENTIUM_K6;
|
||||
break; // "k6"
|
||||
case 8:
|
||||
*Subtype = AMDPENTIUM_K62;
|
||||
break; // "k6-2"
|
||||
case 9:
|
||||
case 13:
|
||||
*Subtype = AMDPENTIUM_K63;
|
||||
break; // "k6-3"
|
||||
case 10:
|
||||
*Subtype = AMDPENTIUM_GEODE;
|
||||
break; // "geode"
|
||||
default:
|
||||
break;
|
||||
}
|
||||
case 6:
|
||||
*Type = AMDATHLON;
|
||||
switch (Model) {
|
||||
case 4:
|
||||
*Subtype = AMDATHLON_TBIRD;
|
||||
break; // "athlon-tbird"
|
||||
case 6:
|
||||
case 7:
|
||||
case 8:
|
||||
*Subtype = AMDATHLON_MP;
|
||||
break; // "athlon-mp"
|
||||
case 10:
|
||||
*Subtype = AMDATHLON_XP;
|
||||
break; // "athlon-xp"
|
||||
default:
|
||||
break;
|
||||
}
|
||||
case 15:
|
||||
*Type = AMDATHLON;
|
||||
if (Features & (1 << FEATURE_SSE3)) {
|
||||
*Subtype = AMDATHLON_K8SSE3;
|
||||
break; // "k8-sse3"
|
||||
}
|
||||
switch (Model) {
|
||||
case 1:
|
||||
*Subtype = AMDATHLON_OPTERON;
|
||||
break; // "opteron"
|
||||
case 5:
|
||||
*Subtype = AMDATHLON_FX;
|
||||
break; // "athlon-fx"; also opteron
|
||||
default:
|
||||
*Subtype = AMDATHLON_64;
|
||||
break; // "athlon64"
|
||||
}
|
||||
case 16:
|
||||
*Type = AMDFAM10H; // "amdfam10"
|
||||
switch (Model) {
|
||||
@ -643,23 +408,16 @@ static void getAMDProcessorTypeAndSubtype(unsigned int Family,
|
||||
case 8:
|
||||
*Subtype = AMDFAM10H_ISTANBUL;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
break;
|
||||
case 20:
|
||||
*Type = AMDFAM14H;
|
||||
*Subtype = AMD_BTVER1;
|
||||
*Type = AMD_BTVER1;
|
||||
break; // "btver1";
|
||||
case 21:
|
||||
*Type = AMDFAM15H;
|
||||
if (!(Features &
|
||||
(1 << FEATURE_AVX))) { // If no AVX support, provide a sane fallback.
|
||||
*Subtype = AMD_BTVER1;
|
||||
break; // "btver1"
|
||||
}
|
||||
if (Model >= 0x50 && Model <= 0x6f) {
|
||||
if (Model >= 0x60 && Model <= 0x7f) {
|
||||
*Subtype = AMDFAM15H_BDVER4;
|
||||
break; // "bdver4"; 50h-6Fh: Excavator
|
||||
break; // "bdver4"; 60h-7Fh: Excavator
|
||||
}
|
||||
if (Model >= 0x30 && Model <= 0x3f) {
|
||||
*Subtype = AMDFAM15H_BDVER3;
|
||||
@ -675,31 +433,47 @@ static void getAMDProcessorTypeAndSubtype(unsigned int Family,
|
||||
}
|
||||
break;
|
||||
case 22:
|
||||
*Type = AMDFAM16H;
|
||||
if (!(Features &
|
||||
(1 << FEATURE_AVX))) { // If no AVX support provide a sane fallback.
|
||||
*Subtype = AMD_BTVER1;
|
||||
break; // "btver1";
|
||||
}
|
||||
*Subtype = AMD_BTVER2;
|
||||
*Type = AMD_BTVER2;
|
||||
break; // "btver2"
|
||||
case 23:
|
||||
*Type = AMDFAM17H;
|
||||
*Subtype = AMDFAM17H_ZNVER1;
|
||||
break;
|
||||
default:
|
||||
break; // "generic"
|
||||
}
|
||||
}
|
||||
|
||||
static unsigned getAvailableFeatures(unsigned int ECX, unsigned int EDX,
|
||||
unsigned MaxLeaf) {
|
||||
static void getAvailableFeatures(unsigned ECX, unsigned EDX, unsigned MaxLeaf,
|
||||
unsigned *FeaturesOut) {
|
||||
unsigned Features = 0;
|
||||
unsigned int EAX, EBX;
|
||||
Features |= (((EDX >> 23) & 1) << FEATURE_MMX);
|
||||
Features |= (((EDX >> 25) & 1) << FEATURE_SSE);
|
||||
Features |= (((EDX >> 26) & 1) << FEATURE_SSE2);
|
||||
Features |= (((ECX >> 0) & 1) << FEATURE_SSE3);
|
||||
Features |= (((ECX >> 9) & 1) << FEATURE_SSSE3);
|
||||
Features |= (((ECX >> 19) & 1) << FEATURE_SSE4_1);
|
||||
Features |= (((ECX >> 20) & 1) << FEATURE_SSE4_2);
|
||||
Features |= (((ECX >> 22) & 1) << FEATURE_MOVBE);
|
||||
unsigned EAX, EBX;
|
||||
|
||||
if ((EDX >> 15) & 1)
|
||||
Features |= 1 << FEATURE_CMOV;
|
||||
if ((EDX >> 23) & 1)
|
||||
Features |= 1 << FEATURE_MMX;
|
||||
if ((EDX >> 25) & 1)
|
||||
Features |= 1 << FEATURE_SSE;
|
||||
if ((EDX >> 26) & 1)
|
||||
Features |= 1 << FEATURE_SSE2;
|
||||
|
||||
if ((ECX >> 0) & 1)
|
||||
Features |= 1 << FEATURE_SSE3;
|
||||
if ((ECX >> 1) & 1)
|
||||
Features |= 1 << FEATURE_PCLMUL;
|
||||
if ((ECX >> 9) & 1)
|
||||
Features |= 1 << FEATURE_SSSE3;
|
||||
if ((ECX >> 12) & 1)
|
||||
Features |= 1 << FEATURE_FMA;
|
||||
if ((ECX >> 19) & 1)
|
||||
Features |= 1 << FEATURE_SSE4_1;
|
||||
if ((ECX >> 20) & 1)
|
||||
Features |= 1 << FEATURE_SSE4_2;
|
||||
if ((ECX >> 23) & 1)
|
||||
Features |= 1 << FEATURE_POPCNT;
|
||||
if ((ECX >> 25) & 1)
|
||||
Features |= 1 << FEATURE_AES;
|
||||
|
||||
// If CPUID indicates support for XSAVE, XRESTORE and AVX, and XGETBV
|
||||
// indicates that the AVX registers will be saved and restored on context
|
||||
@ -708,20 +482,59 @@ static unsigned getAvailableFeatures(unsigned int ECX, unsigned int EDX,
|
||||
bool HasAVX = ((ECX & AVXBits) == AVXBits) && !getX86XCR0(&EAX, &EDX) &&
|
||||
((EAX & 0x6) == 0x6);
|
||||
bool HasAVX512Save = HasAVX && ((EAX & 0xe0) == 0xe0);
|
||||
bool HasLeaf7 = MaxLeaf >= 0x7;
|
||||
getX86CpuIDAndInfoEx(0x7, 0x0, &EAX, &EBX, &ECX, &EDX);
|
||||
bool HasADX = HasLeaf7 && ((EBX >> 19) & 1);
|
||||
bool HasAVX2 = HasAVX && HasLeaf7 && (EBX & 0x20);
|
||||
bool HasAVX512 = HasLeaf7 && HasAVX512Save && ((EBX >> 16) & 1);
|
||||
Features |= (HasAVX << FEATURE_AVX);
|
||||
Features |= (HasAVX2 << FEATURE_AVX2);
|
||||
Features |= (HasAVX512 << FEATURE_AVX512);
|
||||
Features |= (HasAVX512Save << FEATURE_AVX512SAVE);
|
||||
Features |= (HasADX << FEATURE_ADX);
|
||||
|
||||
getX86CpuIDAndInfo(0x80000001, &EAX, &EBX, &ECX, &EDX);
|
||||
Features |= (((EDX >> 29) & 0x1) << FEATURE_EM64T);
|
||||
return Features;
|
||||
if (HasAVX)
|
||||
Features |= 1 << FEATURE_AVX;
|
||||
|
||||
bool HasLeaf7 =
|
||||
MaxLeaf >= 0x7 && !getX86CpuIDAndInfoEx(0x7, 0x0, &EAX, &EBX, &ECX, &EDX);
|
||||
|
||||
if (HasLeaf7 && ((EBX >> 3) & 1))
|
||||
Features |= 1 << FEATURE_BMI;
|
||||
if (HasLeaf7 && ((EBX >> 5) & 1) && HasAVX)
|
||||
Features |= 1 << FEATURE_AVX2;
|
||||
if (HasLeaf7 && ((EBX >> 9) & 1))
|
||||
Features |= 1 << FEATURE_BMI2;
|
||||
if (HasLeaf7 && ((EBX >> 16) & 1) && HasAVX512Save)
|
||||
Features |= 1 << FEATURE_AVX512F;
|
||||
if (HasLeaf7 && ((EBX >> 17) & 1) && HasAVX512Save)
|
||||
Features |= 1 << FEATURE_AVX512DQ;
|
||||
if (HasLeaf7 && ((EBX >> 21) & 1) && HasAVX512Save)
|
||||
Features |= 1 << FEATURE_AVX512IFMA;
|
||||
if (HasLeaf7 && ((EBX >> 26) & 1) && HasAVX512Save)
|
||||
Features |= 1 << FEATURE_AVX512PF;
|
||||
if (HasLeaf7 && ((EBX >> 27) & 1) && HasAVX512Save)
|
||||
Features |= 1 << FEATURE_AVX512ER;
|
||||
if (HasLeaf7 && ((EBX >> 28) & 1) && HasAVX512Save)
|
||||
Features |= 1 << FEATURE_AVX512CD;
|
||||
if (HasLeaf7 && ((EBX >> 30) & 1) && HasAVX512Save)
|
||||
Features |= 1 << FEATURE_AVX512BW;
|
||||
if (HasLeaf7 && ((EBX >> 31) & 1) && HasAVX512Save)
|
||||
Features |= 1 << FEATURE_AVX512VL;
|
||||
|
||||
if (HasLeaf7 && ((ECX >> 1) & 1) && HasAVX512Save)
|
||||
Features |= 1 << FEATURE_AVX512VBMI;
|
||||
if (HasLeaf7 && ((ECX >> 14) & 1) && HasAVX512Save)
|
||||
Features |= 1 << FEATURE_AVX512VPOPCNTDQ;
|
||||
|
||||
if (HasLeaf7 && ((EDX >> 2) & 1) && HasAVX512Save)
|
||||
Features |= 1 << FEATURE_AVX5124VNNIW;
|
||||
if (HasLeaf7 && ((EDX >> 3) & 1) && HasAVX512Save)
|
||||
Features |= 1 << FEATURE_AVX5124FMAPS;
|
||||
|
||||
unsigned MaxExtLevel;
|
||||
getX86CpuIDAndInfo(0x80000000, &MaxExtLevel, &EBX, &ECX, &EDX);
|
||||
|
||||
bool HasExtLeaf1 = MaxExtLevel >= 0x80000001 &&
|
||||
!getX86CpuIDAndInfo(0x80000001, &EAX, &EBX, &ECX, &EDX);
|
||||
if (HasExtLeaf1 && ((ECX >> 6) & 1))
|
||||
Features |= 1 << FEATURE_SSE4_A;
|
||||
if (HasExtLeaf1 && ((ECX >> 11) & 1))
|
||||
Features |= 1 << FEATURE_XOP;
|
||||
if (HasExtLeaf1 && ((ECX >> 16) & 1))
|
||||
Features |= 1 << FEATURE_FMA4;
|
||||
|
||||
*FeaturesOut = Features;
|
||||
}
|
||||
|
||||
#if defined(HAVE_INIT_PRIORITY)
|
||||
@ -751,11 +564,11 @@ struct __processor_model {
|
||||
|
||||
int CONSTRUCTOR_ATTRIBUTE
|
||||
__cpu_indicator_init(void) {
|
||||
unsigned int EAX, EBX, ECX, EDX;
|
||||
unsigned int MaxLeaf = 5;
|
||||
unsigned int Vendor;
|
||||
unsigned int Model, Family, Brand_id;
|
||||
unsigned int Features = 0;
|
||||
unsigned EAX, EBX, ECX, EDX;
|
||||
unsigned MaxLeaf = 5;
|
||||
unsigned Vendor;
|
||||
unsigned Model, Family, Brand_id;
|
||||
unsigned Features = 0;
|
||||
|
||||
/* This function needs to run just once. */
|
||||
if (__cpu_model.__cpu_vendor)
|
||||
@ -765,9 +578,7 @@ __cpu_indicator_init(void) {
|
||||
return -1;
|
||||
|
||||
/* Assume cpuid insn present. Run in level 0 to get vendor id. */
|
||||
getX86CpuIDAndInfo(0, &MaxLeaf, &Vendor, &ECX, &EDX);
|
||||
|
||||
if (MaxLeaf < 1) {
|
||||
if (getX86CpuIDAndInfo(0, &MaxLeaf, &Vendor, &ECX, &EDX) || MaxLeaf < 1) {
|
||||
__cpu_model.__cpu_vendor = VENDOR_OTHER;
|
||||
return -1;
|
||||
}
|
||||
@ -776,7 +587,7 @@ __cpu_indicator_init(void) {
|
||||
Brand_id = EBX & 0xff;
|
||||
|
||||
/* Find available features. */
|
||||
Features = getAvailableFeatures(ECX, EDX, MaxLeaf);
|
||||
getAvailableFeatures(ECX, EDX, MaxLeaf, &Features);
|
||||
__cpu_model.__cpu_features[0] = Features;
|
||||
|
||||
if (Vendor == SIG_INTEL) {
|
||||
|
@ -45,6 +45,16 @@ void compilerrt_abort_impl(const char *file, int line, const char *function) {
|
||||
__assert_rtn(function, file, line, "libcompiler_rt abort");
|
||||
}
|
||||
|
||||
#elif __Fuchsia__
|
||||
|
||||
#ifndef _WIN32
|
||||
__attribute__((weak))
|
||||
__attribute__((visibility("hidden")))
|
||||
#endif
|
||||
void compilerrt_abort_impl(const char *file, int line, const char *function) {
|
||||
__builtin_trap();
|
||||
}
|
||||
|
||||
#else
|
||||
|
||||
/* Get the system definition of abort() */
|
||||
|
@ -160,15 +160,16 @@ static u32 countAndClearShadowValues(u32 BitIdx, uptr ShadowStart,
|
||||
static u32 computeWorkingSizeAndReset(u32 BitIdx) {
|
||||
u32 WorkingSetSize = 0;
|
||||
MemoryMappingLayout MemIter(true/*cache*/);
|
||||
uptr Start, End, Prot;
|
||||
while (MemIter.Next(&Start, &End, nullptr/*offs*/, nullptr/*file*/,
|
||||
0/*file size*/, &Prot)) {
|
||||
VPrintf(4, "%s: considering %p-%p app=%d shadow=%d prot=%u\n",
|
||||
__FUNCTION__, Start, End, Prot, isAppMem(Start),
|
||||
isShadowMem(Start));
|
||||
if (isShadowMem(Start) && (Prot & MemoryMappingLayout::kProtectionWrite)) {
|
||||
VPrintf(3, "%s: walking %p-%p\n", __FUNCTION__, Start, End);
|
||||
WorkingSetSize += countAndClearShadowValues(BitIdx, Start, End);
|
||||
MemoryMappedSegment Segment;
|
||||
while (MemIter.Next(&Segment)) {
|
||||
VPrintf(4, "%s: considering %p-%p app=%d shadow=%d prot=%u\n", __FUNCTION__,
|
||||
Segment.start, Segment.end, Segment.protection,
|
||||
isAppMem(Segment.start), isShadowMem(Segment.start));
|
||||
if (isShadowMem(Segment.start) && Segment.IsWritable()) {
|
||||
VPrintf(3, "%s: walking %p-%p\n", __FUNCTION__, Segment.start,
|
||||
Segment.end);
|
||||
WorkingSetSize +=
|
||||
countAndClearShadowValues(BitIdx, Segment.start, Segment.end);
|
||||
}
|
||||
}
|
||||
return WorkingSetSize;
|
||||
|
@ -74,6 +74,10 @@ static const char kStdSuppressions[] =
|
||||
// definition.
|
||||
"leak:*pthread_exit*\n"
|
||||
#endif // SANITIZER_SUPPRESS_LEAK_ON_PTHREAD_EXIT
|
||||
#if SANITIZER_MAC
|
||||
// For Darwin and os_log/os_trace: https://reviews.llvm.org/D35173
|
||||
"leak:*_os_trace*\n"
|
||||
#endif
|
||||
// TLS leak in some glibc versions, described in
|
||||
// https://sourceware.org/bugzilla/show_bug.cgi?id=12650.
|
||||
"leak:*tls_get_addr*\n";
|
||||
@ -301,11 +305,10 @@ static void ProcessThreads(SuspendedThreadsList const &suspended_threads,
|
||||
}
|
||||
|
||||
void ScanRootRegion(Frontier *frontier, const RootRegion &root_region,
|
||||
uptr region_begin, uptr region_end, uptr prot) {
|
||||
uptr region_begin, uptr region_end, bool is_readable) {
|
||||
uptr intersection_begin = Max(root_region.begin, region_begin);
|
||||
uptr intersection_end = Min(region_end, root_region.begin + root_region.size);
|
||||
if (intersection_begin >= intersection_end) return;
|
||||
bool is_readable = prot & MemoryMappingLayout::kProtectionRead;
|
||||
LOG_POINTERS("Root region %p-%p intersects with mapped region %p-%p (%s)\n",
|
||||
root_region.begin, root_region.begin + root_region.size,
|
||||
region_begin, region_end,
|
||||
@ -318,11 +321,10 @@ void ScanRootRegion(Frontier *frontier, const RootRegion &root_region,
|
||||
static void ProcessRootRegion(Frontier *frontier,
|
||||
const RootRegion &root_region) {
|
||||
MemoryMappingLayout proc_maps(/*cache_enabled*/ true);
|
||||
uptr begin, end, prot;
|
||||
while (proc_maps.Next(&begin, &end,
|
||||
/*offset*/ nullptr, /*filename*/ nullptr,
|
||||
/*filename_size*/ 0, &prot)) {
|
||||
ScanRootRegion(frontier, root_region, begin, end, prot);
|
||||
MemoryMappedSegment segment;
|
||||
while (proc_maps.Next(&segment)) {
|
||||
ScanRootRegion(frontier, root_region, segment.start, segment.end,
|
||||
segment.IsReadable());
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -127,7 +127,7 @@ struct RootRegion {
|
||||
|
||||
InternalMmapVector<RootRegion> const *GetRootRegions();
|
||||
void ScanRootRegion(Frontier *frontier, RootRegion const ®ion,
|
||||
uptr region_begin, uptr region_end, uptr prot);
|
||||
uptr region_begin, uptr region_end, bool is_readable);
|
||||
// Run stoptheworld while holding any platform-specific locks.
|
||||
void DoStopTheWorld(StopTheWorldCallback callback, void* argument);
|
||||
|
||||
|
@ -156,7 +156,7 @@ void ProcessPlatformSpecificAllocations(Frontier *frontier) {
|
||||
if (flags()->use_root_regions) {
|
||||
for (uptr i = 0; i < root_regions->size(); i++) {
|
||||
ScanRootRegion(frontier, (*root_regions)[i], address, end_address,
|
||||
info.protection);
|
||||
info.protection & kProtectionRead);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -27,6 +27,7 @@
|
||||
#include "sanitizer_common/sanitizer_allocator_internal.h"
|
||||
#include "sanitizer_common/sanitizer_atomic.h"
|
||||
#include "sanitizer_common/sanitizer_common.h"
|
||||
#include "sanitizer_common/sanitizer_errno.h"
|
||||
#include "sanitizer_common/sanitizer_stackdepot.h"
|
||||
#include "sanitizer_common/sanitizer_libc.h"
|
||||
#include "sanitizer_common/sanitizer_linux.h"
|
||||
@ -48,15 +49,9 @@ DECLARE_REAL(SIZE_T, strnlen, const char *s, SIZE_T maxlen)
|
||||
DECLARE_REAL(void *, memcpy, void *dest, const void *src, uptr n)
|
||||
DECLARE_REAL(void *, memset, void *dest, int c, uptr n)
|
||||
|
||||
#if SANITIZER_FREEBSD
|
||||
#define __errno_location __error
|
||||
#endif
|
||||
|
||||
// True if this is a nested interceptor.
|
||||
static THREADLOCAL int in_interceptor_scope;
|
||||
|
||||
extern "C" int *__errno_location(void);
|
||||
|
||||
struct InterceptorScope {
|
||||
InterceptorScope() { ++in_interceptor_scope; }
|
||||
~InterceptorScope() { --in_interceptor_scope; }
|
||||
@ -915,7 +910,7 @@ INTERCEPTOR(void *, mmap, void *addr, SIZE_T length, int prot, int flags,
|
||||
ENSURE_MSAN_INITED();
|
||||
if (addr && !MEM_IS_APP(addr)) {
|
||||
if (flags & map_fixed) {
|
||||
*__errno_location() = errno_EINVAL;
|
||||
errno = errno_EINVAL;
|
||||
return (void *)-1;
|
||||
} else {
|
||||
addr = nullptr;
|
||||
@ -933,7 +928,7 @@ INTERCEPTOR(void *, mmap64, void *addr, SIZE_T length, int prot, int flags,
|
||||
ENSURE_MSAN_INITED();
|
||||
if (addr && !MEM_IS_APP(addr)) {
|
||||
if (flags & map_fixed) {
|
||||
*__errno_location() = errno_EINVAL;
|
||||
errno = errno_EINVAL;
|
||||
return (void *)-1;
|
||||
} else {
|
||||
addr = nullptr;
|
||||
|
@ -107,7 +107,8 @@ bool MprotectNoAccess(uptr addr, uptr size);
|
||||
bool MprotectReadOnly(uptr addr, uptr size);
|
||||
|
||||
// Find an available address space.
|
||||
uptr FindAvailableMemoryRange(uptr size, uptr alignment, uptr left_padding);
|
||||
uptr FindAvailableMemoryRange(uptr size, uptr alignment, uptr left_padding,
|
||||
uptr *largest_gap_found);
|
||||
|
||||
// Used to check if we can map shadow memory to a fixed location.
|
||||
bool MemoryRangeIsAvailable(uptr range_start, uptr range_end);
|
||||
|
@ -40,6 +40,7 @@
|
||||
|
||||
#include "interception/interception.h"
|
||||
#include "sanitizer_addrhashmap.h"
|
||||
#include "sanitizer_errno.h"
|
||||
#include "sanitizer_placement_new.h"
|
||||
#include "sanitizer_platform_interceptors.h"
|
||||
#include "sanitizer_tls_get_addr.h"
|
||||
|
35
contrib/compiler-rt/lib/sanitizer_common/sanitizer_errno.cc
Normal file
35
contrib/compiler-rt/lib/sanitizer_common/sanitizer_errno.cc
Normal file
@ -0,0 +1,35 @@
|
||||
//===-- sanitizer_errno.cc --------------------------------------*- C++ -*-===//
|
||||
//
|
||||
// The LLVM Compiler Infrastructure
|
||||
//
|
||||
// This file is distributed under the University of Illinois Open Source
|
||||
// License. See LICENSE.TXT for details.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
//
|
||||
// This file is shared between sanitizers run-time libraries.
|
||||
//
|
||||
// Defines errno to avoid including errno.h and its dependencies into other
|
||||
// files (e.g. interceptors are not supposed to include any system headers).
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#include "sanitizer_errno_codes.h"
|
||||
#include "sanitizer_internal_defs.h"
|
||||
|
||||
#include <errno.h>
|
||||
|
||||
namespace __sanitizer {
|
||||
|
||||
COMPILER_CHECK(errno_ENOMEM == ENOMEM);
|
||||
COMPILER_CHECK(errno_EBUSY == EBUSY);
|
||||
COMPILER_CHECK(errno_EINVAL == EINVAL);
|
||||
|
||||
// EOWNERDEAD is not present in some older platforms.
|
||||
#if defined(EOWNERDEAD)
|
||||
extern const int errno_EOWNERDEAD = EOWNERDEAD;
|
||||
#else
|
||||
extern const int errno_EOWNERDEAD = -1;
|
||||
#endif
|
||||
|
||||
} // namespace __sanitizer
|
35
contrib/compiler-rt/lib/sanitizer_common/sanitizer_errno.h
Normal file
35
contrib/compiler-rt/lib/sanitizer_common/sanitizer_errno.h
Normal file
@ -0,0 +1,35 @@
|
||||
//===-- sanitizer_errno.h ---------------------------------------*- C++ -*-===//
|
||||
//
|
||||
// The LLVM Compiler Infrastructure
|
||||
//
|
||||
// This file is distributed under the University of Illinois Open Source
|
||||
// License. See LICENSE.TXT for details.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
//
|
||||
// This file is shared between sanitizers run-time libraries.
|
||||
//
|
||||
// Defines errno to avoid including errno.h and its dependencies into sensitive
|
||||
// files (e.g. interceptors are not supposed to include any system headers).
|
||||
// It's ok to use errno.h directly when your file already depend on other system
|
||||
// includes though.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#ifndef SANITIZER_ERRNO_H
|
||||
#define SANITIZER_ERRNO_H
|
||||
|
||||
#include "sanitizer_errno_codes.h"
|
||||
#include "sanitizer_platform.h"
|
||||
|
||||
#if SANITIZER_FREEBSD || SANITIZER_MAC
|
||||
# define __errno_location __error
|
||||
#elif SANITIZER_ANDROID
|
||||
# define __errno_location __errno
|
||||
#endif
|
||||
|
||||
extern "C" int *__errno_location();
|
||||
|
||||
#define errno (*__errno_location())
|
||||
|
||||
#endif // SANITIZER_ERRNO_H
|
@ -0,0 +1,34 @@
|
||||
//===-- sanitizer_errno_codes.h ---------------------------------*- C++ -*-===//
|
||||
//
|
||||
// The LLVM Compiler Infrastructure
|
||||
//
|
||||
// This file is distributed under the University of Illinois Open Source
|
||||
// License. See LICENSE.TXT for details.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
//
|
||||
// This file is shared between sanitizers run-time libraries.
|
||||
//
|
||||
// Defines errno codes to avoid including errno.h and its dependencies into
|
||||
// sensitive files (e.g. interceptors are not supposed to include any system
|
||||
// headers).
|
||||
// It's ok to use errno.h directly when your file already depend on other system
|
||||
// includes though.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#ifndef SANITIZER_ERRNO_CODES_H
|
||||
#define SANITIZER_ERRNO_CODES_H
|
||||
|
||||
namespace __sanitizer {
|
||||
|
||||
#define errno_ENOMEM 12
|
||||
#define errno_EBUSY 16
|
||||
#define errno_EINVAL 22
|
||||
|
||||
// Those might not present or their value differ on different platforms.
|
||||
extern const int errno_EOWNERDEAD;
|
||||
|
||||
} // namespace __sanitizer
|
||||
|
||||
#endif // SANITIZER_ERRNO_CODES_H
|
@ -59,6 +59,14 @@
|
||||
#include <ucontext.h>
|
||||
#include <unistd.h>
|
||||
|
||||
#if SANITIZER_LINUX
|
||||
#include <sys/utsname.h>
|
||||
#endif
|
||||
|
||||
#if SANITIZER_LINUX && !SANITIZER_ANDROID
|
||||
#include <sys/personality.h>
|
||||
#endif
|
||||
|
||||
#if SANITIZER_FREEBSD
|
||||
#include <sys/exec.h>
|
||||
#include <sys/sysctl.h>
|
||||
@ -209,7 +217,6 @@ static void stat64_to_stat(struct stat64 *in, struct stat *out) {
|
||||
out->st_atime = in->st_atime;
|
||||
out->st_mtime = in->st_mtime;
|
||||
out->st_ctime = in->st_ctime;
|
||||
out->st_ino = in->st_ino;
|
||||
}
|
||||
#endif
|
||||
|
||||
@ -229,7 +236,6 @@ static void kernel_stat_to_stat(struct kernel_stat *in, struct stat *out) {
|
||||
out->st_atime = in->st_atime_nsec;
|
||||
out->st_mtime = in->st_mtime_nsec;
|
||||
out->st_ctime = in->st_ctime_nsec;
|
||||
out->st_ino = in->st_ino;
|
||||
}
|
||||
#endif
|
||||
|
||||
@ -815,6 +821,72 @@ bool ThreadLister::GetDirectoryEntries() {
|
||||
return true;
|
||||
}
|
||||
|
||||
#if SANITIZER_WORDSIZE == 32
|
||||
// Take care of unusable kernel area in top gigabyte.
|
||||
static uptr GetKernelAreaSize() {
|
||||
#if SANITIZER_LINUX && !SANITIZER_X32
|
||||
const uptr gbyte = 1UL << 30;
|
||||
|
||||
// Firstly check if there are writable segments
|
||||
// mapped to top gigabyte (e.g. stack).
|
||||
MemoryMappingLayout proc_maps(/*cache_enabled*/true);
|
||||
MemoryMappedSegment segment;
|
||||
while (proc_maps.Next(&segment)) {
|
||||
if ((segment.end >= 3 * gbyte) && segment.IsWritable()) return 0;
|
||||
}
|
||||
|
||||
#if !SANITIZER_ANDROID
|
||||
// Even if nothing is mapped, top Gb may still be accessible
|
||||
// if we are running on 64-bit kernel.
|
||||
// Uname may report misleading results if personality type
|
||||
// is modified (e.g. under schroot) so check this as well.
|
||||
struct utsname uname_info;
|
||||
int pers = personality(0xffffffffUL);
|
||||
if (!(pers & PER_MASK)
|
||||
&& uname(&uname_info) == 0
|
||||
&& internal_strstr(uname_info.machine, "64"))
|
||||
return 0;
|
||||
#endif // SANITIZER_ANDROID
|
||||
|
||||
// Top gigabyte is reserved for kernel.
|
||||
return gbyte;
|
||||
#else
|
||||
return 0;
|
||||
#endif // SANITIZER_LINUX && !SANITIZER_X32
|
||||
}
|
||||
#endif // SANITIZER_WORDSIZE == 32
|
||||
|
||||
uptr GetMaxVirtualAddress() {
|
||||
#if SANITIZER_WORDSIZE == 64
|
||||
# if defined(__powerpc64__) || defined(__aarch64__)
|
||||
// On PowerPC64 we have two different address space layouts: 44- and 46-bit.
|
||||
// We somehow need to figure out which one we are using now and choose
|
||||
// one of 0x00000fffffffffffUL and 0x00003fffffffffffUL.
|
||||
// Note that with 'ulimit -s unlimited' the stack is moved away from the top
|
||||
// of the address space, so simply checking the stack address is not enough.
|
||||
// This should (does) work for both PowerPC64 Endian modes.
|
||||
// Similarly, aarch64 has multiple address space layouts: 39, 42 and 47-bit.
|
||||
return (1ULL << (MostSignificantSetBitIndex(GET_CURRENT_FRAME()) + 1)) - 1;
|
||||
# elif defined(__mips64)
|
||||
return (1ULL << 40) - 1; // 0x000000ffffffffffUL;
|
||||
# elif defined(__s390x__)
|
||||
return (1ULL << 53) - 1; // 0x001fffffffffffffUL;
|
||||
# else
|
||||
return (1ULL << 47) - 1; // 0x00007fffffffffffUL;
|
||||
# endif
|
||||
#else // SANITIZER_WORDSIZE == 32
|
||||
# if defined(__s390__)
|
||||
return (1ULL << 31) - 1; // 0x7fffffff;
|
||||
# else
|
||||
uptr res = (1ULL << 32) - 1; // 0xffffffff;
|
||||
if (!common_flags()->full_address_space)
|
||||
res -= GetKernelAreaSize();
|
||||
CHECK_LT(reinterpret_cast<uptr>(&res), res);
|
||||
return res;
|
||||
# endif
|
||||
#endif // SANITIZER_WORDSIZE
|
||||
}
|
||||
|
||||
uptr GetPageSize() {
|
||||
// Android post-M sysconf(_SC_PAGESIZE) crashes if called from .preinit_array.
|
||||
#if SANITIZER_ANDROID
|
||||
@ -1599,7 +1671,8 @@ void CheckNoDeepBind(const char *filename, int flag) {
|
||||
#endif
|
||||
}
|
||||
|
||||
uptr FindAvailableMemoryRange(uptr size, uptr alignment, uptr left_padding) {
|
||||
uptr FindAvailableMemoryRange(uptr size, uptr alignment, uptr left_padding,
|
||||
uptr *largest_gap_found) {
|
||||
UNREACHABLE("FindAvailableMemoryRange is not available");
|
||||
return 0;
|
||||
}
|
||||
|
@ -81,28 +81,25 @@ void GetThreadStackTopAndBottom(bool at_initialization, uptr *stack_top,
|
||||
|
||||
// Find the mapping that contains a stack variable.
|
||||
MemoryMappingLayout proc_maps(/*cache_enabled*/true);
|
||||
uptr start, end, offset;
|
||||
MemoryMappedSegment segment;
|
||||
uptr prev_end = 0;
|
||||
while (proc_maps.Next(&start, &end, &offset, nullptr, 0,
|
||||
/* protection */nullptr)) {
|
||||
if ((uptr)&rl < end)
|
||||
break;
|
||||
prev_end = end;
|
||||
while (proc_maps.Next(&segment)) {
|
||||
if ((uptr)&rl < segment.end) break;
|
||||
prev_end = segment.end;
|
||||
}
|
||||
CHECK((uptr)&rl >= start && (uptr)&rl < end);
|
||||
CHECK((uptr)&rl >= segment.start && (uptr)&rl < segment.end);
|
||||
|
||||
// Get stacksize from rlimit, but clip it so that it does not overlap
|
||||
// with other mappings.
|
||||
uptr stacksize = rl.rlim_cur;
|
||||
if (stacksize > end - prev_end)
|
||||
stacksize = end - prev_end;
|
||||
if (stacksize > segment.end - prev_end) stacksize = segment.end - prev_end;
|
||||
// When running with unlimited stack size, we still want to set some limit.
|
||||
// The unlimited stack size is caused by 'ulimit -s unlimited'.
|
||||
// Also, for some reason, GNU make spawns subprocesses with unlimited stack.
|
||||
if (stacksize > kMaxThreadStackSize)
|
||||
stacksize = kMaxThreadStackSize;
|
||||
*stack_top = end;
|
||||
*stack_bottom = end - stacksize;
|
||||
*stack_top = segment.end;
|
||||
*stack_bottom = segment.end - stacksize;
|
||||
return;
|
||||
}
|
||||
pthread_attr_t attr;
|
||||
|
@ -191,7 +191,8 @@ void internal_sigfillset(__sanitizer_sigset_t *set) { sigfillset(set); }
|
||||
|
||||
uptr internal_sigprocmask(int how, __sanitizer_sigset_t *set,
|
||||
__sanitizer_sigset_t *oldset) {
|
||||
return sigprocmask(how, set, oldset);
|
||||
// Don't use sigprocmask here, because it affects all threads.
|
||||
return pthread_sigmask(how, set, oldset);
|
||||
}
|
||||
|
||||
// Doesn't call pthread_atfork() handlers (but not available on 10.6).
|
||||
@ -799,9 +800,48 @@ char **GetArgv() {
|
||||
return *_NSGetArgv();
|
||||
}
|
||||
|
||||
#if defined(__aarch64__) && SANITIZER_IOS && !SANITIZER_IOSSIM
|
||||
// The task_vm_info struct is normally provided by the macOS SDK, but we need
|
||||
// fields only available in 10.12+. Declare the struct manually to be able to
|
||||
// build against older SDKs.
|
||||
struct __sanitizer_task_vm_info {
|
||||
uptr _unused[(SANITIZER_WORDSIZE == 32) ? 20 : 19];
|
||||
uptr min_address;
|
||||
uptr max_address;
|
||||
};
|
||||
|
||||
uptr GetTaskInfoMaxAddress() {
|
||||
__sanitizer_task_vm_info vm_info = {{0}, 0, 0};
|
||||
mach_msg_type_number_t count = sizeof(vm_info) / sizeof(int);
|
||||
int err = task_info(mach_task_self(), TASK_VM_INFO, (int *)&vm_info, &count);
|
||||
if (err == 0) {
|
||||
return vm_info.max_address - 1;
|
||||
} else {
|
||||
// xnu cannot provide vm address limit
|
||||
return 0x200000000 - 1;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
uptr GetMaxVirtualAddress() {
|
||||
#if SANITIZER_WORDSIZE == 64
|
||||
# if defined(__aarch64__) && SANITIZER_IOS && !SANITIZER_IOSSIM
|
||||
// Get the maximum VM address
|
||||
static uptr max_vm = GetTaskInfoMaxAddress();
|
||||
CHECK(max_vm);
|
||||
return max_vm;
|
||||
# else
|
||||
return (1ULL << 47) - 1; // 0x00007fffffffffffUL;
|
||||
# endif
|
||||
#else // SANITIZER_WORDSIZE == 32
|
||||
return (1ULL << 32) - 1; // 0xffffffff;
|
||||
#endif // SANITIZER_WORDSIZE
|
||||
}
|
||||
|
||||
uptr FindAvailableMemoryRange(uptr shadow_size,
|
||||
uptr alignment,
|
||||
uptr left_padding) {
|
||||
uptr left_padding,
|
||||
uptr *largest_gap_found) {
|
||||
typedef vm_region_submap_short_info_data_64_t RegionInfo;
|
||||
enum { kRegionInfoSize = VM_REGION_SUBMAP_SHORT_INFO_COUNT_64 };
|
||||
// Start searching for available memory region past PAGEZERO, which is
|
||||
@ -812,6 +852,7 @@ uptr FindAvailableMemoryRange(uptr shadow_size,
|
||||
mach_vm_address_t address = start_address;
|
||||
mach_vm_address_t free_begin = start_address;
|
||||
kern_return_t kr = KERN_SUCCESS;
|
||||
if (largest_gap_found) *largest_gap_found = 0;
|
||||
while (kr == KERN_SUCCESS) {
|
||||
mach_vm_size_t vmsize = 0;
|
||||
natural_t depth = 0;
|
||||
@ -821,10 +862,15 @@ uptr FindAvailableMemoryRange(uptr shadow_size,
|
||||
(vm_region_info_t)&vminfo, &count);
|
||||
if (free_begin != address) {
|
||||
// We found a free region [free_begin..address-1].
|
||||
uptr shadow_address = RoundUpTo((uptr)free_begin + left_padding,
|
||||
alignment);
|
||||
if (shadow_address + shadow_size < (uptr)address) {
|
||||
return shadow_address;
|
||||
uptr gap_start = RoundUpTo((uptr)free_begin + left_padding, alignment);
|
||||
uptr gap_end = RoundDownTo((uptr)address, alignment);
|
||||
uptr gap_size = gap_end > gap_start ? gap_end - gap_start : 0;
|
||||
if (shadow_size < gap_size) {
|
||||
return gap_start;
|
||||
}
|
||||
|
||||
if (largest_gap_found && *largest_gap_found < gap_size) {
|
||||
*largest_gap_found = gap_size;
|
||||
}
|
||||
}
|
||||
// Move to the next region.
|
||||
|
@ -36,6 +36,8 @@ MacosVersion GetMacosVersion();
|
||||
|
||||
char **GetEnviron();
|
||||
|
||||
void RestrictMemoryToMaxAddress(uptr max_address);
|
||||
|
||||
} // namespace __sanitizer
|
||||
|
||||
extern "C" {
|
||||
|
@ -0,0 +1,30 @@
|
||||
//===-- sanitizer_mac_libcdep.cc ------------------------------------------===//
|
||||
//
|
||||
// The LLVM Compiler Infrastructure
|
||||
//
|
||||
// This file is distributed under the University of Illinois Open Source
|
||||
// License. See LICENSE.TXT for details.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
//
|
||||
// This file is shared between various sanitizers' runtime libraries and
|
||||
// implements OSX-specific functions.
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#include "sanitizer_platform.h"
|
||||
#if SANITIZER_MAC
|
||||
#include "sanitizer_mac.h"
|
||||
|
||||
#include <sys/mman.h>
|
||||
|
||||
namespace __sanitizer {
|
||||
|
||||
void RestrictMemoryToMaxAddress(uptr max_address) {
|
||||
uptr size_to_mmap = GetMaxVirtualAddress() + 1 - max_address;
|
||||
void *res = MmapFixedNoAccess(max_address, size_to_mmap, "high gap");
|
||||
CHECK(res != MAP_FAILED);
|
||||
}
|
||||
|
||||
} // namespace __sanitizer
|
||||
|
||||
#endif // SANITIZER_MAC
|
@ -25,7 +25,6 @@
|
||||
#endif
|
||||
#include <arpa/inet.h>
|
||||
#include <dirent.h>
|
||||
#include <errno.h>
|
||||
#include <grp.h>
|
||||
#include <limits.h>
|
||||
#include <net/if.h>
|
||||
@ -931,14 +930,6 @@ unsigned struct_ElfW_Phdr_sz = sizeof(Elf_Phdr);
|
||||
unsigned IOCTL_SNDCTL_DSP_GETOSPACE = SNDCTL_DSP_GETOSPACE;
|
||||
#endif // (SANITIZER_LINUX || SANITIZER_FREEBSD) && !SANITIZER_ANDROID
|
||||
|
||||
const int errno_EINVAL = EINVAL;
|
||||
// EOWNERDEAD is not present in some older platforms.
|
||||
#if defined(EOWNERDEAD)
|
||||
const int errno_EOWNERDEAD = EOWNERDEAD;
|
||||
#else
|
||||
const int errno_EOWNERDEAD = -1;
|
||||
#endif
|
||||
|
||||
const int si_SEGV_MAPERR = SEGV_MAPERR;
|
||||
const int si_SEGV_ACCERR = SEGV_ACCERR;
|
||||
} // namespace __sanitizer
|
||||
|
@ -1468,9 +1468,6 @@ struct __sanitizer_cookie_io_functions_t {
|
||||
extern unsigned IOCTL_PIO_SCRNMAP;
|
||||
#endif
|
||||
|
||||
extern const int errno_EINVAL;
|
||||
extern const int errno_EOWNERDEAD;
|
||||
|
||||
extern const int si_SEGV_MAPERR;
|
||||
extern const int si_SEGV_ACCERR;
|
||||
} // namespace __sanitizer
|
||||
|
@ -27,14 +27,6 @@
|
||||
#include <signal.h>
|
||||
#include <sys/mman.h>
|
||||
|
||||
#if SANITIZER_LINUX
|
||||
#include <sys/utsname.h>
|
||||
#endif
|
||||
|
||||
#if SANITIZER_LINUX && !SANITIZER_ANDROID
|
||||
#include <sys/personality.h>
|
||||
#endif
|
||||
|
||||
#if SANITIZER_FREEBSD
|
||||
// The MAP_NORESERVE define has been removed in FreeBSD 11.x, and even before
|
||||
// that, it was never implemented. So just define it to zero.
|
||||
@ -49,80 +41,6 @@ uptr GetMmapGranularity() {
|
||||
return GetPageSize();
|
||||
}
|
||||
|
||||
#if SANITIZER_WORDSIZE == 32
|
||||
// Take care of unusable kernel area in top gigabyte.
|
||||
static uptr GetKernelAreaSize() {
|
||||
#if SANITIZER_LINUX && !SANITIZER_X32
|
||||
const uptr gbyte = 1UL << 30;
|
||||
|
||||
// Firstly check if there are writable segments
|
||||
// mapped to top gigabyte (e.g. stack).
|
||||
MemoryMappingLayout proc_maps(/*cache_enabled*/true);
|
||||
uptr end, prot;
|
||||
while (proc_maps.Next(/*start*/nullptr, &end,
|
||||
/*offset*/nullptr, /*filename*/nullptr,
|
||||
/*filename_size*/0, &prot)) {
|
||||
if ((end >= 3 * gbyte)
|
||||
&& (prot & MemoryMappingLayout::kProtectionWrite) != 0)
|
||||
return 0;
|
||||
}
|
||||
|
||||
#if !SANITIZER_ANDROID
|
||||
// Even if nothing is mapped, top Gb may still be accessible
|
||||
// if we are running on 64-bit kernel.
|
||||
// Uname may report misleading results if personality type
|
||||
// is modified (e.g. under schroot) so check this as well.
|
||||
struct utsname uname_info;
|
||||
int pers = personality(0xffffffffUL);
|
||||
if (!(pers & PER_MASK)
|
||||
&& uname(&uname_info) == 0
|
||||
&& internal_strstr(uname_info.machine, "64"))
|
||||
return 0;
|
||||
#endif // SANITIZER_ANDROID
|
||||
|
||||
// Top gigabyte is reserved for kernel.
|
||||
return gbyte;
|
||||
#else
|
||||
return 0;
|
||||
#endif // SANITIZER_LINUX && !SANITIZER_X32
|
||||
}
|
||||
#endif // SANITIZER_WORDSIZE == 32
|
||||
|
||||
uptr GetMaxVirtualAddress() {
|
||||
#if SANITIZER_WORDSIZE == 64
|
||||
# if defined(__aarch64__) && SANITIZER_IOS && !SANITIZER_IOSSIM
|
||||
// Ideally, we would derive the upper bound from MACH_VM_MAX_ADDRESS. The
|
||||
// upper bound can change depending on the device.
|
||||
return 0x200000000 - 1;
|
||||
# elif defined(__powerpc64__) || defined(__aarch64__)
|
||||
// On PowerPC64 we have two different address space layouts: 44- and 46-bit.
|
||||
// We somehow need to figure out which one we are using now and choose
|
||||
// one of 0x00000fffffffffffUL and 0x00003fffffffffffUL.
|
||||
// Note that with 'ulimit -s unlimited' the stack is moved away from the top
|
||||
// of the address space, so simply checking the stack address is not enough.
|
||||
// This should (does) work for both PowerPC64 Endian modes.
|
||||
// Similarly, aarch64 has multiple address space layouts: 39, 42 and 47-bit.
|
||||
return (1ULL << (MostSignificantSetBitIndex(GET_CURRENT_FRAME()) + 1)) - 1;
|
||||
# elif defined(__mips64)
|
||||
return (1ULL << 40) - 1; // 0x000000ffffffffffUL;
|
||||
# elif defined(__s390x__)
|
||||
return (1ULL << 53) - 1; // 0x001fffffffffffffUL;
|
||||
# else
|
||||
return (1ULL << 47) - 1; // 0x00007fffffffffffUL;
|
||||
# endif
|
||||
#else // SANITIZER_WORDSIZE == 32
|
||||
# if defined(__s390__)
|
||||
return (1ULL << 31) - 1; // 0x7fffffff;
|
||||
# else
|
||||
uptr res = (1ULL << 32) - 1; // 0xffffffff;
|
||||
if (!common_flags()->full_address_space)
|
||||
res -= GetKernelAreaSize();
|
||||
CHECK_LT(reinterpret_cast<uptr>(&res), res);
|
||||
return res;
|
||||
# endif
|
||||
#endif // SANITIZER_WORDSIZE
|
||||
}
|
||||
|
||||
void *MmapOrDie(uptr size, const char *mem_type, bool raw_report) {
|
||||
size = RoundUpTo(size, GetPageSizeCached());
|
||||
uptr res = internal_mmap(nullptr, size,
|
||||
@ -162,7 +80,7 @@ void *MmapOrDieOnFatalError(uptr size, const char *mem_type) {
|
||||
}
|
||||
|
||||
// We want to map a chunk of address space aligned to 'alignment'.
|
||||
// We do it by maping a bit more and then unmaping redundant pieces.
|
||||
// We do it by mapping a bit more and then unmapping redundant pieces.
|
||||
// We probably can do it with fewer syscalls in some OS-dependent way.
|
||||
void *MmapAlignedOrDieOnFatalError(uptr size, uptr alignment,
|
||||
const char *mem_type) {
|
||||
@ -313,13 +231,12 @@ static inline bool IntervalsAreSeparate(uptr start1, uptr end1,
|
||||
// memory).
|
||||
bool MemoryRangeIsAvailable(uptr range_start, uptr range_end) {
|
||||
MemoryMappingLayout proc_maps(/*cache_enabled*/true);
|
||||
uptr start, end;
|
||||
while (proc_maps.Next(&start, &end,
|
||||
/*offset*/nullptr, /*filename*/nullptr,
|
||||
/*filename_size*/0, /*protection*/nullptr)) {
|
||||
if (start == end) continue; // Empty range.
|
||||
CHECK_NE(0, end);
|
||||
if (!IntervalsAreSeparate(start, end - 1, range_start, range_end))
|
||||
MemoryMappedSegment segment;
|
||||
while (proc_maps.Next(&segment)) {
|
||||
if (segment.start == segment.end) continue; // Empty range.
|
||||
CHECK_NE(0, segment.end);
|
||||
if (!IntervalsAreSeparate(segment.start, segment.end - 1, range_start,
|
||||
range_end))
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
@ -327,13 +244,13 @@ bool MemoryRangeIsAvailable(uptr range_start, uptr range_end) {
|
||||
|
||||
void DumpProcessMap() {
|
||||
MemoryMappingLayout proc_maps(/*cache_enabled*/true);
|
||||
uptr start, end;
|
||||
const sptr kBufSize = 4095;
|
||||
char *filename = (char*)MmapOrDie(kBufSize, __func__);
|
||||
MemoryMappedSegment segment(filename, kBufSize);
|
||||
Report("Process memory map follows:\n");
|
||||
while (proc_maps.Next(&start, &end, /* file_offset */nullptr,
|
||||
filename, kBufSize, /* protection */nullptr)) {
|
||||
Printf("\t%p-%p\t%s\n", (void*)start, (void*)end, filename);
|
||||
while (proc_maps.Next(&segment)) {
|
||||
Printf("\t%p-%p\t%s\n", (void *)segment.start, (void *)segment.end,
|
||||
segment.filename);
|
||||
}
|
||||
Report("End of process memory map.\n");
|
||||
UnmapOrDie(filename, kBufSize);
|
||||
@ -363,14 +280,14 @@ void ReportFile::Write(const char *buffer, uptr length) {
|
||||
}
|
||||
|
||||
bool GetCodeRangeForFile(const char *module, uptr *start, uptr *end) {
|
||||
uptr s, e, off, prot;
|
||||
InternalScopedString buff(kMaxPathLength);
|
||||
MemoryMappingLayout proc_maps(/*cache_enabled*/false);
|
||||
while (proc_maps.Next(&s, &e, &off, buff.data(), buff.size(), &prot)) {
|
||||
if ((prot & MemoryMappingLayout::kProtectionExecute) != 0
|
||||
&& internal_strcmp(module, buff.data()) == 0) {
|
||||
*start = s;
|
||||
*end = e;
|
||||
InternalScopedString buff(kMaxPathLength);
|
||||
MemoryMappedSegment segment(buff.data(), kMaxPathLength);
|
||||
while (proc_maps.Next(&segment)) {
|
||||
if (segment.IsExecutable() &&
|
||||
internal_strcmp(module, segment.filename) == 0) {
|
||||
*start = segment.start;
|
||||
*end = segment.end;
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
@ -31,13 +31,37 @@ struct ProcSelfMapsBuff {
|
||||
void ReadProcMaps(ProcSelfMapsBuff *proc_maps);
|
||||
#endif // SANITIZER_FREEBSD || SANITIZER_LINUX
|
||||
|
||||
// Memory protection masks.
|
||||
static const uptr kProtectionRead = 1;
|
||||
static const uptr kProtectionWrite = 2;
|
||||
static const uptr kProtectionExecute = 4;
|
||||
static const uptr kProtectionShared = 8;
|
||||
|
||||
struct MemoryMappedSegment {
|
||||
MemoryMappedSegment(char *buff = nullptr, uptr size = 0)
|
||||
: filename(buff), filename_size(size) {}
|
||||
~MemoryMappedSegment() {}
|
||||
|
||||
bool IsReadable() { return protection & kProtectionRead; }
|
||||
bool IsWritable() { return protection & kProtectionWrite; }
|
||||
bool IsExecutable() { return protection & kProtectionExecute; }
|
||||
bool IsShared() { return protection & kProtectionShared; }
|
||||
|
||||
uptr start;
|
||||
uptr end;
|
||||
uptr offset;
|
||||
char *filename; // owned by caller
|
||||
uptr filename_size;
|
||||
uptr protection;
|
||||
ModuleArch arch;
|
||||
u8 uuid[kModuleUUIDSize];
|
||||
};
|
||||
|
||||
class MemoryMappingLayout {
|
||||
public:
|
||||
explicit MemoryMappingLayout(bool cache_enabled);
|
||||
~MemoryMappingLayout();
|
||||
bool Next(uptr *start, uptr *end, uptr *offset, char filename[],
|
||||
uptr filename_size, uptr *protection, ModuleArch *arch = nullptr,
|
||||
u8 *uuid = nullptr);
|
||||
bool Next(MemoryMappedSegment *segment);
|
||||
void Reset();
|
||||
// In some cases, e.g. when running under a sandbox on Linux, ASan is unable
|
||||
// to obtain the memory mappings. It should fall back to pre-cached data
|
||||
@ -47,12 +71,6 @@ class MemoryMappingLayout {
|
||||
// Adds all mapped objects into a vector.
|
||||
void DumpListOfModules(InternalMmapVector<LoadedModule> *modules);
|
||||
|
||||
// Memory protection masks.
|
||||
static const uptr kProtectionRead = 1;
|
||||
static const uptr kProtectionWrite = 2;
|
||||
static const uptr kProtectionExecute = 4;
|
||||
static const uptr kProtectionShared = 8;
|
||||
|
||||
private:
|
||||
void LoadFromCache();
|
||||
|
||||
@ -67,10 +85,7 @@ class MemoryMappingLayout {
|
||||
static StaticSpinMutex cache_lock_; // protects cached_proc_self_maps_.
|
||||
# elif SANITIZER_MAC
|
||||
template <u32 kLCSegment, typename SegmentCommand>
|
||||
bool NextSegmentLoad(uptr *start, uptr *end, uptr *offset, char filename[],
|
||||
uptr filename_size, ModuleArch *arch, u8 *uuid,
|
||||
uptr *protection);
|
||||
void GetSegmentAddrRange(uptr *start, uptr *end, uptr vmaddr, uptr vmsize);
|
||||
bool NextSegmentLoad(MemoryMappedSegment *segment);
|
||||
int current_image_;
|
||||
u32 current_magic_;
|
||||
u32 current_filetype_;
|
||||
|
@ -119,12 +119,10 @@ void MemoryMappingLayout::LoadFromCache() {
|
||||
void MemoryMappingLayout::DumpListOfModules(
|
||||
InternalMmapVector<LoadedModule> *modules) {
|
||||
Reset();
|
||||
uptr cur_beg, cur_end, cur_offset, prot;
|
||||
InternalScopedString module_name(kMaxPathLength);
|
||||
for (uptr i = 0; Next(&cur_beg, &cur_end, &cur_offset, module_name.data(),
|
||||
module_name.size(), &prot);
|
||||
i++) {
|
||||
const char *cur_name = module_name.data();
|
||||
MemoryMappedSegment segment(module_name.data(), module_name.size());
|
||||
for (uptr i = 0; Next(&segment); i++) {
|
||||
const char *cur_name = segment.filename;
|
||||
if (cur_name[0] == '\0')
|
||||
continue;
|
||||
// Don't subtract 'cur_beg' from the first entry:
|
||||
@ -138,11 +136,11 @@ void MemoryMappingLayout::DumpListOfModules(
|
||||
// mapped high at address space (in particular, higher than
|
||||
// shadow memory of the tool), so the module can't be the
|
||||
// first entry.
|
||||
uptr base_address = (i ? cur_beg : 0) - cur_offset;
|
||||
uptr base_address = (i ? segment.start : 0) - segment.offset;
|
||||
LoadedModule cur_module;
|
||||
cur_module.set(cur_name, base_address);
|
||||
cur_module.addAddressRange(cur_beg, cur_end, prot & kProtectionExecute,
|
||||
prot & kProtectionWrite);
|
||||
cur_module.addAddressRange(segment.start, segment.end,
|
||||
segment.IsExecutable(), segment.IsWritable());
|
||||
modules->push_back(cur_module);
|
||||
}
|
||||
}
|
||||
|
@ -48,36 +48,27 @@ void ReadProcMaps(ProcSelfMapsBuff *proc_maps) {
|
||||
proc_maps->len = Size;
|
||||
}
|
||||
|
||||
bool MemoryMappingLayout::Next(uptr *start, uptr *end, uptr *offset,
|
||||
char filename[], uptr filename_size,
|
||||
uptr *protection, ModuleArch *arch, u8 *uuid) {
|
||||
CHECK(!arch && "not implemented");
|
||||
CHECK(!uuid && "not implemented");
|
||||
bool MemoryMappingLayout::Next(MemoryMappedSegment *segment) {
|
||||
char *last = proc_self_maps_.data + proc_self_maps_.len;
|
||||
if (current_ >= last) return false;
|
||||
uptr dummy;
|
||||
if (!start) start = &dummy;
|
||||
if (!end) end = &dummy;
|
||||
if (!offset) offset = &dummy;
|
||||
if (!protection) protection = &dummy;
|
||||
struct kinfo_vmentry *VmEntry = (struct kinfo_vmentry*)current_;
|
||||
|
||||
*start = (uptr)VmEntry->kve_start;
|
||||
*end = (uptr)VmEntry->kve_end;
|
||||
*offset = (uptr)VmEntry->kve_offset;
|
||||
segment->start = (uptr)VmEntry->kve_start;
|
||||
segment->end = (uptr)VmEntry->kve_end;
|
||||
segment->offset = (uptr)VmEntry->kve_offset;
|
||||
|
||||
*protection = 0;
|
||||
segment->protection = 0;
|
||||
if ((VmEntry->kve_protection & KVME_PROT_READ) != 0)
|
||||
*protection |= kProtectionRead;
|
||||
segment->protection |= kProtectionRead;
|
||||
if ((VmEntry->kve_protection & KVME_PROT_WRITE) != 0)
|
||||
*protection |= kProtectionWrite;
|
||||
segment->protection |= kProtectionWrite;
|
||||
if ((VmEntry->kve_protection & KVME_PROT_EXEC) != 0)
|
||||
*protection |= kProtectionExecute;
|
||||
segment->protection |= kProtectionExecute;
|
||||
|
||||
if (filename != NULL && filename_size > 0) {
|
||||
internal_snprintf(filename,
|
||||
Min(filename_size, (uptr)PATH_MAX),
|
||||
"%s", VmEntry->kve_path);
|
||||
if (segment->filename != NULL && segment->filename_size > 0) {
|
||||
internal_snprintf(segment->filename,
|
||||
Min(segment->filename_size, (uptr)PATH_MAX), "%s",
|
||||
VmEntry->kve_path);
|
||||
}
|
||||
|
||||
current_ += VmEntry->kve_structsize;
|
||||
|
@ -26,41 +26,28 @@ static bool IsOneOf(char c, char c1, char c2) {
|
||||
return c == c1 || c == c2;
|
||||
}
|
||||
|
||||
bool MemoryMappingLayout::Next(uptr *start, uptr *end, uptr *offset,
|
||||
char filename[], uptr filename_size,
|
||||
uptr *protection, ModuleArch *arch, u8 *uuid) {
|
||||
CHECK(!arch && "not implemented");
|
||||
CHECK(!uuid && "not implemented");
|
||||
bool MemoryMappingLayout::Next(MemoryMappedSegment *segment) {
|
||||
char *last = proc_self_maps_.data + proc_self_maps_.len;
|
||||
if (current_ >= last) return false;
|
||||
uptr dummy;
|
||||
if (!start) start = &dummy;
|
||||
if (!end) end = &dummy;
|
||||
if (!offset) offset = &dummy;
|
||||
if (!protection) protection = &dummy;
|
||||
char *next_line = (char*)internal_memchr(current_, '\n', last - current_);
|
||||
if (next_line == 0)
|
||||
next_line = last;
|
||||
// Example: 08048000-08056000 r-xp 00000000 03:0c 64593 /foo/bar
|
||||
*start = ParseHex(¤t_);
|
||||
segment->start = ParseHex(¤t_);
|
||||
CHECK_EQ(*current_++, '-');
|
||||
*end = ParseHex(¤t_);
|
||||
segment->end = ParseHex(¤t_);
|
||||
CHECK_EQ(*current_++, ' ');
|
||||
CHECK(IsOneOf(*current_, '-', 'r'));
|
||||
*protection = 0;
|
||||
if (*current_++ == 'r')
|
||||
*protection |= kProtectionRead;
|
||||
segment->protection = 0;
|
||||
if (*current_++ == 'r') segment->protection |= kProtectionRead;
|
||||
CHECK(IsOneOf(*current_, '-', 'w'));
|
||||
if (*current_++ == 'w')
|
||||
*protection |= kProtectionWrite;
|
||||
if (*current_++ == 'w') segment->protection |= kProtectionWrite;
|
||||
CHECK(IsOneOf(*current_, '-', 'x'));
|
||||
if (*current_++ == 'x')
|
||||
*protection |= kProtectionExecute;
|
||||
if (*current_++ == 'x') segment->protection |= kProtectionExecute;
|
||||
CHECK(IsOneOf(*current_, 's', 'p'));
|
||||
if (*current_++ == 's')
|
||||
*protection |= kProtectionShared;
|
||||
if (*current_++ == 's') segment->protection |= kProtectionShared;
|
||||
CHECK_EQ(*current_++, ' ');
|
||||
*offset = ParseHex(¤t_);
|
||||
segment->offset = ParseHex(¤t_);
|
||||
CHECK_EQ(*current_++, ' ');
|
||||
ParseHex(¤t_);
|
||||
CHECK_EQ(*current_++, ':');
|
||||
@ -75,14 +62,12 @@ bool MemoryMappingLayout::Next(uptr *start, uptr *end, uptr *offset,
|
||||
while (current_ < next_line && *current_ == ' ')
|
||||
current_++;
|
||||
// Fill in the filename.
|
||||
uptr i = 0;
|
||||
while (current_ < next_line) {
|
||||
if (filename && i < filename_size - 1)
|
||||
filename[i++] = *current_;
|
||||
current_++;
|
||||
if (segment->filename) {
|
||||
uptr len = Min((uptr)(next_line - current_), segment->filename_size - 1);
|
||||
internal_strncpy(segment->filename, current_, len);
|
||||
segment->filename[len] = 0;
|
||||
}
|
||||
if (filename && i < filename_size)
|
||||
filename[i] = 0;
|
||||
|
||||
current_ = next_line + 1;
|
||||
return true;
|
||||
}
|
||||
|
@ -88,6 +88,48 @@ void MemoryMappingLayout::LoadFromCache() {
|
||||
// No-op on Mac for now.
|
||||
}
|
||||
|
||||
// _dyld_get_image_header() and related APIs don't report dyld itself.
|
||||
// We work around this by manually recursing through the memory map
|
||||
// until we hit a Mach header matching dyld instead. These recurse
|
||||
// calls are expensive, but the first memory map generation occurs
|
||||
// early in the process, when dyld is one of the only images loaded,
|
||||
// so it will be hit after only a few iterations.
|
||||
static mach_header *get_dyld_image_header() {
|
||||
mach_port_name_t port;
|
||||
if (task_for_pid(mach_task_self(), internal_getpid(), &port) !=
|
||||
KERN_SUCCESS) {
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
unsigned depth = 1;
|
||||
vm_size_t size = 0;
|
||||
vm_address_t address = 0;
|
||||
kern_return_t err = KERN_SUCCESS;
|
||||
mach_msg_type_number_t count = VM_REGION_SUBMAP_INFO_COUNT_64;
|
||||
|
||||
while (true) {
|
||||
struct vm_region_submap_info_64 info;
|
||||
err = vm_region_recurse_64(port, &address, &size, &depth,
|
||||
(vm_region_info_t)&info, &count);
|
||||
if (err != KERN_SUCCESS) return nullptr;
|
||||
|
||||
if (size >= sizeof(mach_header) && info.protection & kProtectionRead) {
|
||||
mach_header *hdr = (mach_header *)address;
|
||||
if ((hdr->magic == MH_MAGIC || hdr->magic == MH_MAGIC_64) &&
|
||||
hdr->filetype == MH_DYLINKER) {
|
||||
return hdr;
|
||||
}
|
||||
}
|
||||
address += size;
|
||||
}
|
||||
}
|
||||
|
||||
const mach_header *get_dyld_hdr() {
|
||||
if (!dyld_hdr) dyld_hdr = get_dyld_image_header();
|
||||
|
||||
return dyld_hdr;
|
||||
}
|
||||
|
||||
// Next and NextSegmentLoad were inspired by base/sysinfo.cc in
|
||||
// Google Perftools, https://github.com/gperftools/gperftools.
|
||||
|
||||
@ -96,40 +138,39 @@ void MemoryMappingLayout::LoadFromCache() {
|
||||
// segment.
|
||||
// Note that the segment addresses are not necessarily sorted.
|
||||
template <u32 kLCSegment, typename SegmentCommand>
|
||||
bool MemoryMappingLayout::NextSegmentLoad(uptr *start, uptr *end, uptr *offset,
|
||||
char filename[], uptr filename_size,
|
||||
ModuleArch *arch, u8 *uuid,
|
||||
uptr *protection) {
|
||||
bool MemoryMappingLayout::NextSegmentLoad(MemoryMappedSegment *segment) {
|
||||
const char *lc = current_load_cmd_addr_;
|
||||
current_load_cmd_addr_ += ((const load_command *)lc)->cmdsize;
|
||||
if (((const load_command *)lc)->cmd == kLCSegment) {
|
||||
const SegmentCommand* sc = (const SegmentCommand *)lc;
|
||||
GetSegmentAddrRange(start, end, sc->vmaddr, sc->vmsize);
|
||||
if (protection) {
|
||||
// Return the initial protection.
|
||||
*protection = sc->initprot;
|
||||
|
||||
if (current_image_ == kDyldImageIdx) {
|
||||
// vmaddr is masked with 0xfffff because on macOS versions < 10.12,
|
||||
// it contains an absolute address rather than an offset for dyld.
|
||||
// To make matters even more complicated, this absolute address
|
||||
// isn't actually the absolute segment address, but the offset portion
|
||||
// of the address is accurate when combined with the dyld base address,
|
||||
// and the mask will give just this offset.
|
||||
segment->start = (sc->vmaddr & 0xfffff) + (uptr)get_dyld_hdr();
|
||||
segment->end = (sc->vmaddr & 0xfffff) + sc->vmsize + (uptr)get_dyld_hdr();
|
||||
} else {
|
||||
const sptr dlloff = _dyld_get_image_vmaddr_slide(current_image_);
|
||||
segment->start = sc->vmaddr + dlloff;
|
||||
segment->end = sc->vmaddr + sc->vmsize + dlloff;
|
||||
}
|
||||
if (offset) {
|
||||
if (current_filetype_ == /*MH_EXECUTE*/ 0x2) {
|
||||
*offset = sc->vmaddr;
|
||||
} else {
|
||||
*offset = sc->fileoff;
|
||||
}
|
||||
}
|
||||
if (filename) {
|
||||
if (current_image_ == kDyldImageIdx) {
|
||||
internal_strncpy(filename, kDyldPath, filename_size);
|
||||
} else {
|
||||
internal_strncpy(filename, _dyld_get_image_name(current_image_),
|
||||
filename_size);
|
||||
}
|
||||
}
|
||||
if (arch) {
|
||||
*arch = current_arch_;
|
||||
}
|
||||
if (uuid) {
|
||||
internal_memcpy(uuid, current_uuid_, kModuleUUIDSize);
|
||||
|
||||
// Return the initial protection.
|
||||
segment->protection = sc->initprot;
|
||||
segment->offset =
|
||||
(current_filetype_ == /*MH_EXECUTE*/ 0x2) ? sc->vmaddr : sc->fileoff;
|
||||
if (segment->filename) {
|
||||
const char *src = (current_image_ == kDyldImageIdx)
|
||||
? kDyldPath
|
||||
: _dyld_get_image_name(current_image_);
|
||||
internal_strncpy(segment->filename, src, segment->filename_size);
|
||||
}
|
||||
segment->arch = current_arch_;
|
||||
internal_memcpy(segment->uuid, current_uuid_, kModuleUUIDSize);
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
@ -190,70 +231,7 @@ static bool IsModuleInstrumented(const load_command *first_lc) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// _dyld_get_image_header() and related APIs don't report dyld itself.
|
||||
// We work around this by manually recursing through the memory map
|
||||
// until we hit a Mach header matching dyld instead. These recurse
|
||||
// calls are expensive, but the first memory map generation occurs
|
||||
// early in the process, when dyld is one of the only images loaded,
|
||||
// so it will be hit after only a few iterations.
|
||||
static mach_header *get_dyld_image_header() {
|
||||
mach_port_name_t port;
|
||||
if (task_for_pid(mach_task_self(), internal_getpid(), &port) !=
|
||||
KERN_SUCCESS) {
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
unsigned depth = 1;
|
||||
vm_size_t size = 0;
|
||||
vm_address_t address = 0;
|
||||
kern_return_t err = KERN_SUCCESS;
|
||||
mach_msg_type_number_t count = VM_REGION_SUBMAP_INFO_COUNT_64;
|
||||
|
||||
while (true) {
|
||||
struct vm_region_submap_info_64 info;
|
||||
err = vm_region_recurse_64(port, &address, &size, &depth,
|
||||
(vm_region_info_t)&info, &count);
|
||||
if (err != KERN_SUCCESS) return nullptr;
|
||||
|
||||
if (size >= sizeof(mach_header) &&
|
||||
info.protection & MemoryMappingLayout::kProtectionRead) {
|
||||
mach_header *hdr = (mach_header *)address;
|
||||
if ((hdr->magic == MH_MAGIC || hdr->magic == MH_MAGIC_64) &&
|
||||
hdr->filetype == MH_DYLINKER) {
|
||||
return hdr;
|
||||
}
|
||||
}
|
||||
address += size;
|
||||
}
|
||||
}
|
||||
|
||||
const mach_header *get_dyld_hdr() {
|
||||
if (!dyld_hdr) dyld_hdr = get_dyld_image_header();
|
||||
|
||||
return dyld_hdr;
|
||||
}
|
||||
|
||||
void MemoryMappingLayout::GetSegmentAddrRange(uptr *start, uptr *end,
|
||||
uptr vmaddr, uptr vmsize) {
|
||||
if (current_image_ == kDyldImageIdx) {
|
||||
// vmaddr is masked with 0xfffff because on macOS versions < 10.12,
|
||||
// it contains an absolute address rather than an offset for dyld.
|
||||
// To make matters even more complicated, this absolute address
|
||||
// isn't actually the absolute segment address, but the offset portion
|
||||
// of the address is accurate when combined with the dyld base address,
|
||||
// and the mask will give just this offset.
|
||||
if (start) *start = (vmaddr & 0xfffff) + (uptr)get_dyld_hdr();
|
||||
if (end) *end = (vmaddr & 0xfffff) + vmsize + (uptr)get_dyld_hdr();
|
||||
} else {
|
||||
const sptr dlloff = _dyld_get_image_vmaddr_slide(current_image_);
|
||||
if (start) *start = vmaddr + dlloff;
|
||||
if (end) *end = vmaddr + vmsize + dlloff;
|
||||
}
|
||||
}
|
||||
|
||||
bool MemoryMappingLayout::Next(uptr *start, uptr *end, uptr *offset,
|
||||
char filename[], uptr filename_size,
|
||||
uptr *protection, ModuleArch *arch, u8 *uuid) {
|
||||
bool MemoryMappingLayout::Next(MemoryMappedSegment *segment) {
|
||||
for (; current_image_ >= kDyldImageIdx; current_image_--) {
|
||||
const mach_header *hdr = (current_image_ == kDyldImageIdx)
|
||||
? get_dyld_hdr()
|
||||
@ -291,16 +269,13 @@ bool MemoryMappingLayout::Next(uptr *start, uptr *end, uptr *offset,
|
||||
#ifdef MH_MAGIC_64
|
||||
case MH_MAGIC_64: {
|
||||
if (NextSegmentLoad<LC_SEGMENT_64, struct segment_command_64>(
|
||||
start, end, offset, filename, filename_size, arch, uuid,
|
||||
protection))
|
||||
segment))
|
||||
return true;
|
||||
break;
|
||||
}
|
||||
#endif
|
||||
case MH_MAGIC: {
|
||||
if (NextSegmentLoad<LC_SEGMENT, struct segment_command>(
|
||||
start, end, offset, filename, filename_size, arch, uuid,
|
||||
protection))
|
||||
if (NextSegmentLoad<LC_SEGMENT, struct segment_command>(segment))
|
||||
return true;
|
||||
break;
|
||||
}
|
||||
@ -315,28 +290,22 @@ bool MemoryMappingLayout::Next(uptr *start, uptr *end, uptr *offset,
|
||||
void MemoryMappingLayout::DumpListOfModules(
|
||||
InternalMmapVector<LoadedModule> *modules) {
|
||||
Reset();
|
||||
uptr cur_beg, cur_end, prot;
|
||||
ModuleArch cur_arch;
|
||||
u8 cur_uuid[kModuleUUIDSize];
|
||||
InternalScopedString module_name(kMaxPathLength);
|
||||
for (uptr i = 0; Next(&cur_beg, &cur_end, 0, module_name.data(),
|
||||
module_name.size(), &prot, &cur_arch, &cur_uuid[0]);
|
||||
i++) {
|
||||
const char *cur_name = module_name.data();
|
||||
if (cur_name[0] == '\0')
|
||||
continue;
|
||||
MemoryMappedSegment segment(module_name.data(), kMaxPathLength);
|
||||
for (uptr i = 0; Next(&segment); i++) {
|
||||
if (segment.filename[0] == '\0') continue;
|
||||
LoadedModule *cur_module = nullptr;
|
||||
if (!modules->empty() &&
|
||||
0 == internal_strcmp(cur_name, modules->back().full_name())) {
|
||||
0 == internal_strcmp(segment.filename, modules->back().full_name())) {
|
||||
cur_module = &modules->back();
|
||||
} else {
|
||||
modules->push_back(LoadedModule());
|
||||
cur_module = &modules->back();
|
||||
cur_module->set(cur_name, cur_beg, cur_arch, cur_uuid,
|
||||
current_instrumented_);
|
||||
cur_module->set(segment.filename, segment.start, segment.arch,
|
||||
segment.uuid, current_instrumented_);
|
||||
}
|
||||
cur_module->addAddressRange(cur_beg, cur_end, prot & kProtectionExecute,
|
||||
prot & kProtectionWrite);
|
||||
cur_module->addAddressRange(segment.start, segment.end,
|
||||
segment.IsExecutable(), segment.IsWritable());
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -43,7 +43,8 @@ void StackTrace::Print() const {
|
||||
if (dedup_frames-- > 0) {
|
||||
if (dedup_token.length())
|
||||
dedup_token.append("--");
|
||||
dedup_token.append(cur->info.function);
|
||||
if (cur->info.function != nullptr)
|
||||
dedup_token.append(cur->info.function);
|
||||
}
|
||||
}
|
||||
frames->ClearAll();
|
||||
|
@ -291,7 +291,8 @@ void DontDumpShadowMemory(uptr addr, uptr length) {
|
||||
// FIXME: add madvise-analog when we move to 64-bits.
|
||||
}
|
||||
|
||||
uptr FindAvailableMemoryRange(uptr size, uptr alignment, uptr left_padding) {
|
||||
uptr FindAvailableMemoryRange(uptr size, uptr alignment, uptr left_padding,
|
||||
uptr *largest_gap_found) {
|
||||
uptr address = 0;
|
||||
while (true) {
|
||||
MEMORY_BASIC_INFORMATION info;
|
||||
|
@ -264,7 +264,7 @@ ScudoQuarantineCache *getQuarantineCache(ScudoThreadContext *ThreadContext) {
|
||||
ScudoQuarantineCache *>(ThreadContext->QuarantineCachePlaceHolder);
|
||||
}
|
||||
|
||||
Xorshift128Plus *getPrng(ScudoThreadContext *ThreadContext) {
|
||||
ScudoPrng *getPrng(ScudoThreadContext *ThreadContext) {
|
||||
return &ThreadContext->Prng;
|
||||
}
|
||||
|
||||
@ -283,7 +283,7 @@ struct ScudoAllocator {
|
||||
StaticSpinMutex FallbackMutex;
|
||||
AllocatorCache FallbackAllocatorCache;
|
||||
ScudoQuarantineCache FallbackQuarantineCache;
|
||||
Xorshift128Plus FallbackPrng;
|
||||
ScudoPrng FallbackPrng;
|
||||
|
||||
bool DeallocationTypeMismatch;
|
||||
bool ZeroContents;
|
||||
@ -333,8 +333,8 @@ struct ScudoAllocator {
|
||||
static_cast<uptr>(Options.QuarantineSizeMb) << 20,
|
||||
static_cast<uptr>(Options.ThreadLocalQuarantineSizeKb) << 10);
|
||||
BackendAllocator.InitCache(&FallbackAllocatorCache);
|
||||
FallbackPrng.initFromURandom();
|
||||
Cookie = FallbackPrng.getNext();
|
||||
FallbackPrng.init();
|
||||
Cookie = FallbackPrng.getU64();
|
||||
}
|
||||
|
||||
// Helper function that checks for a valid Scudo chunk. nullptr isn't.
|
||||
@ -373,19 +373,19 @@ struct ScudoAllocator {
|
||||
bool FromPrimary = PrimaryAllocator::CanAllocate(AlignedSize, MinAlignment);
|
||||
|
||||
void *Ptr;
|
||||
uptr Salt;
|
||||
u8 Salt;
|
||||
uptr AllocationSize = FromPrimary ? AlignedSize : NeededSize;
|
||||
uptr AllocationAlignment = FromPrimary ? MinAlignment : Alignment;
|
||||
ScudoThreadContext *ThreadContext = getThreadContextAndLock();
|
||||
if (LIKELY(ThreadContext)) {
|
||||
Salt = getPrng(ThreadContext)->getNext();
|
||||
Salt = getPrng(ThreadContext)->getU8();
|
||||
Ptr = BackendAllocator.Allocate(getAllocatorCache(ThreadContext),
|
||||
AllocationSize, AllocationAlignment,
|
||||
FromPrimary);
|
||||
ThreadContext->unlock();
|
||||
} else {
|
||||
SpinMutexLock l(&FallbackMutex);
|
||||
Salt = FallbackPrng.getNext();
|
||||
Salt = FallbackPrng.getU8();
|
||||
Ptr = BackendAllocator.Allocate(&FallbackAllocatorCache, AllocationSize,
|
||||
AllocationAlignment, FromPrimary);
|
||||
}
|
||||
@ -612,7 +612,7 @@ static void initScudoInternal(const AllocatorOptions &Options) {
|
||||
|
||||
void ScudoThreadContext::init() {
|
||||
getBackendAllocator().InitCache(&Cache);
|
||||
Prng.initFromURandom();
|
||||
Prng.init();
|
||||
memset(QuarantineCachePlaceHolder, 0, sizeof(QuarantineCachePlaceHolder));
|
||||
}
|
||||
|
||||
|
@ -30,7 +30,7 @@ namespace __scudo {
|
||||
|
||||
struct ALIGNED(64) ScudoThreadContext : public ScudoThreadContextPlatform {
|
||||
AllocatorCache Cache;
|
||||
Xorshift128Plus Prng;
|
||||
ScudoPrng Prng;
|
||||
uptr QuarantineCachePlaceHolder[4];
|
||||
void init();
|
||||
void commitBack();
|
||||
|
@ -123,40 +123,4 @@ bool testCPUFeature(CPUFeature Feature) {
|
||||
}
|
||||
#endif // defined(__x86_64__) || defined(__i386__)
|
||||
|
||||
// readRetry will attempt to read Count bytes from the Fd specified, and if
|
||||
// interrupted will retry to read additional bytes to reach Count.
|
||||
static ssize_t readRetry(int Fd, u8 *Buffer, size_t Count) {
|
||||
ssize_t AmountRead = 0;
|
||||
while (static_cast<size_t>(AmountRead) < Count) {
|
||||
ssize_t Result = read(Fd, Buffer + AmountRead, Count - AmountRead);
|
||||
if (Result > 0)
|
||||
AmountRead += Result;
|
||||
else if (!Result)
|
||||
break;
|
||||
else if (errno != EINTR) {
|
||||
AmountRead = -1;
|
||||
break;
|
||||
}
|
||||
}
|
||||
return AmountRead;
|
||||
}
|
||||
|
||||
static void fillRandom(u8 *Data, ssize_t Size) {
|
||||
int Fd = open("/dev/urandom", O_RDONLY);
|
||||
if (Fd < 0) {
|
||||
dieWithMessage("ERROR: failed to open /dev/urandom.\n");
|
||||
}
|
||||
bool Success = readRetry(Fd, Data, Size) == Size;
|
||||
close(Fd);
|
||||
if (!Success) {
|
||||
dieWithMessage("ERROR: failed to read enough data from /dev/urandom.\n");
|
||||
}
|
||||
}
|
||||
|
||||
// Seeds the xorshift state with /dev/urandom.
|
||||
// TODO(kostyak): investigate using getrandom() if available.
|
||||
void Xorshift128Plus::initFromURandom() {
|
||||
fillRandom(reinterpret_cast<u8 *>(State), sizeof(State));
|
||||
}
|
||||
|
||||
} // namespace __scudo
|
||||
|
@ -36,23 +36,58 @@ enum CPUFeature {
|
||||
};
|
||||
bool testCPUFeature(CPUFeature feature);
|
||||
|
||||
// Tiny PRNG based on https://en.wikipedia.org/wiki/Xorshift#xorshift.2B
|
||||
// The state (128 bits) will be stored in thread local storage.
|
||||
struct Xorshift128Plus {
|
||||
INLINE u64 rotl(const u64 X, int K) {
|
||||
return (X << K) | (X >> (64 - K));
|
||||
}
|
||||
|
||||
// XoRoShiRo128+ PRNG (http://xoroshiro.di.unimi.it/).
|
||||
struct XoRoShiRo128Plus {
|
||||
public:
|
||||
void initFromURandom();
|
||||
u64 getNext() {
|
||||
u64 x = State[0];
|
||||
const u64 y = State[1];
|
||||
State[0] = y;
|
||||
x ^= x << 23;
|
||||
State[1] = x ^ y ^ (x >> 17) ^ (y >> 26);
|
||||
return State[1] + y;
|
||||
void init() {
|
||||
if (UNLIKELY(!GetRandom(reinterpret_cast<void *>(State), sizeof(State)))) {
|
||||
// Early processes (eg: init) do not have /dev/urandom yet, but we still
|
||||
// have to provide them with some degree of entropy. Not having a secure
|
||||
// seed is not as problematic for them, as they are less likely to be
|
||||
// the target of heap based vulnerabilities exploitation attempts.
|
||||
State[0] = NanoTime();
|
||||
State[1] = 0;
|
||||
}
|
||||
fillCache();
|
||||
}
|
||||
u8 getU8() {
|
||||
if (UNLIKELY(isCacheEmpty()))
|
||||
fillCache();
|
||||
const u8 Result = static_cast<u8>(CachedBytes & 0xff);
|
||||
CachedBytes >>= 8;
|
||||
CachedBytesAvailable--;
|
||||
return Result;
|
||||
}
|
||||
u64 getU64() { return next(); }
|
||||
|
||||
private:
|
||||
u8 CachedBytesAvailable;
|
||||
u64 CachedBytes;
|
||||
u64 State[2];
|
||||
u64 next() {
|
||||
const u64 S0 = State[0];
|
||||
u64 S1 = State[1];
|
||||
const u64 Result = S0 + S1;
|
||||
S1 ^= S0;
|
||||
State[0] = rotl(S0, 55) ^ S1 ^ (S1 << 14);
|
||||
State[1] = rotl(S1, 36);
|
||||
return Result;
|
||||
}
|
||||
bool isCacheEmpty() {
|
||||
return CachedBytesAvailable == 0;
|
||||
}
|
||||
void fillCache() {
|
||||
CachedBytes = next();
|
||||
CachedBytesAvailable = sizeof(CachedBytes);
|
||||
}
|
||||
};
|
||||
|
||||
typedef XoRoShiRo128Plus ScudoPrng;
|
||||
|
||||
} // namespace __scudo
|
||||
|
||||
#endif // SCUDO_UTILS_H_
|
||||
|
@ -270,20 +270,19 @@ namespace __dsan {
|
||||
|
||||
static void InitDataSeg() {
|
||||
MemoryMappingLayout proc_maps(true);
|
||||
uptr start, end, offset;
|
||||
char name[128];
|
||||
MemoryMappedSegment segment(name, ARRAY_SIZE(name));
|
||||
bool prev_is_data = false;
|
||||
while (proc_maps.Next(&start, &end, &offset, name, ARRAY_SIZE(name),
|
||||
/*protection*/ 0)) {
|
||||
bool is_data = offset != 0 && name[0] != 0;
|
||||
while (proc_maps.Next(&segment)) {
|
||||
bool is_data = segment.offset != 0 && segment.filename[0] != 0;
|
||||
// BSS may get merged with [heap] in /proc/self/maps. This is not very
|
||||
// reliable.
|
||||
bool is_bss = offset == 0 &&
|
||||
(name[0] == 0 || internal_strcmp(name, "[heap]") == 0) && prev_is_data;
|
||||
if (g_data_start == 0 && is_data)
|
||||
g_data_start = start;
|
||||
if (is_bss)
|
||||
g_data_end = end;
|
||||
bool is_bss = segment.offset == 0 &&
|
||||
(segment.filename[0] == 0 ||
|
||||
internal_strcmp(segment.filename, "[heap]") == 0) &&
|
||||
prev_is_data;
|
||||
if (g_data_start == 0 && is_data) g_data_start = segment.start;
|
||||
if (is_bss) g_data_end = segment.end;
|
||||
prev_is_data = is_data;
|
||||
}
|
||||
VPrintf(1, "guessed data_start=%p data_end=%p\n", g_data_start, g_data_end);
|
||||
|
@ -101,6 +101,9 @@ ThreadClock::ThreadClock(unsigned tid, unsigned reused)
|
||||
clk_[tid_].reused = reused_;
|
||||
}
|
||||
|
||||
void ThreadClock::ResetCached(ClockCache *c) {
|
||||
}
|
||||
|
||||
void ThreadClock::acquire(ClockCache *c, const SyncClock *src) {
|
||||
DCHECK_LE(nclk_, kMaxTid);
|
||||
DCHECK_LE(src->size_, kMaxTid);
|
||||
@ -116,9 +119,7 @@ void ThreadClock::acquire(ClockCache *c, const SyncClock *src) {
|
||||
// Check if we've already acquired src after the last release operation on src
|
||||
bool acquired = false;
|
||||
if (nclk > tid_) {
|
||||
CPP_STAT_INC(StatClockAcquireLarge);
|
||||
if (src->elem(tid_).reused == reused_) {
|
||||
CPP_STAT_INC(StatClockAcquireRepeat);
|
||||
for (unsigned i = 0; i < kDirtyTids; i++) {
|
||||
unsigned tid = src->dirty_tids_[i];
|
||||
if (tid != kInvalidTid) {
|
||||
@ -266,11 +267,11 @@ void ThreadClock::UpdateCurrentThread(SyncClock *dst) const {
|
||||
|
||||
for (unsigned i = 0; i < kDirtyTids; i++) {
|
||||
if (dst->dirty_tids_[i] == tid_) {
|
||||
CPP_STAT_INC(StatClockReleaseFast1);
|
||||
CPP_STAT_INC(StatClockReleaseFast);
|
||||
return;
|
||||
}
|
||||
if (dst->dirty_tids_[i] == kInvalidTid) {
|
||||
CPP_STAT_INC(StatClockReleaseFast2);
|
||||
CPP_STAT_INC(StatClockReleaseFast);
|
||||
dst->dirty_tids_[i] = tid_;
|
||||
return;
|
||||
}
|
||||
@ -297,6 +298,64 @@ bool ThreadClock::IsAlreadyAcquired(const SyncClock *src) const {
|
||||
return true;
|
||||
}
|
||||
|
||||
// Sets a single element in the vector clock.
|
||||
// This function is called only from weird places like AcquireGlobal.
|
||||
void ThreadClock::set(ClockCache *c, unsigned tid, u64 v) {
|
||||
DCHECK_LT(tid, kMaxTid);
|
||||
DCHECK_GE(v, clk_[tid].epoch);
|
||||
clk_[tid].epoch = v;
|
||||
if (nclk_ <= tid)
|
||||
nclk_ = tid + 1;
|
||||
last_acquire_ = clk_[tid_].epoch;
|
||||
}
|
||||
|
||||
void ThreadClock::DebugDump(int(*printf)(const char *s, ...)) {
|
||||
printf("clock=[");
|
||||
for (uptr i = 0; i < nclk_; i++)
|
||||
printf("%s%llu", i == 0 ? "" : ",", clk_[i].epoch);
|
||||
printf("] reused=[");
|
||||
for (uptr i = 0; i < nclk_; i++)
|
||||
printf("%s%llu", i == 0 ? "" : ",", clk_[i].reused);
|
||||
printf("] tid=%u/%u last_acq=%llu",
|
||||
tid_, reused_, last_acquire_);
|
||||
}
|
||||
|
||||
SyncClock::SyncClock() {
|
||||
ResetImpl();
|
||||
}
|
||||
|
||||
SyncClock::~SyncClock() {
|
||||
// Reset must be called before dtor.
|
||||
CHECK_EQ(size_, 0);
|
||||
CHECK_EQ(tab_, 0);
|
||||
CHECK_EQ(tab_idx_, 0);
|
||||
}
|
||||
|
||||
void SyncClock::Reset(ClockCache *c) {
|
||||
if (size_ == 0) {
|
||||
// nothing
|
||||
} else if (size_ <= ClockBlock::kClockCount) {
|
||||
// One-level table.
|
||||
ctx->clock_alloc.Free(c, tab_idx_);
|
||||
} else {
|
||||
// Two-level table.
|
||||
for (uptr i = 0; i < size_; i += ClockBlock::kClockCount)
|
||||
ctx->clock_alloc.Free(c, tab_->table[i / ClockBlock::kClockCount]);
|
||||
ctx->clock_alloc.Free(c, tab_idx_);
|
||||
}
|
||||
ResetImpl();
|
||||
}
|
||||
|
||||
void SyncClock::ResetImpl() {
|
||||
tab_ = 0;
|
||||
tab_idx_ = 0;
|
||||
size_ = 0;
|
||||
release_store_tid_ = kInvalidTid;
|
||||
release_store_reused_ = 0;
|
||||
for (uptr i = 0; i < kDirtyTids; i++)
|
||||
dirty_tids_[i] = kInvalidTid;
|
||||
}
|
||||
|
||||
void SyncClock::Resize(ClockCache *c, uptr nclk) {
|
||||
CPP_STAT_INC(StatClockReleaseResize);
|
||||
if (RoundUpTo(nclk, ClockBlock::kClockCount) <=
|
||||
@ -344,66 +403,6 @@ void SyncClock::Resize(ClockCache *c, uptr nclk) {
|
||||
size_ = nclk;
|
||||
}
|
||||
|
||||
// Sets a single element in the vector clock.
|
||||
// This function is called only from weird places like AcquireGlobal.
|
||||
void ThreadClock::set(unsigned tid, u64 v) {
|
||||
DCHECK_LT(tid, kMaxTid);
|
||||
DCHECK_GE(v, clk_[tid].epoch);
|
||||
clk_[tid].epoch = v;
|
||||
if (nclk_ <= tid)
|
||||
nclk_ = tid + 1;
|
||||
last_acquire_ = clk_[tid_].epoch;
|
||||
}
|
||||
|
||||
void ThreadClock::DebugDump(int(*printf)(const char *s, ...)) {
|
||||
printf("clock=[");
|
||||
for (uptr i = 0; i < nclk_; i++)
|
||||
printf("%s%llu", i == 0 ? "" : ",", clk_[i].epoch);
|
||||
printf("] reused=[");
|
||||
for (uptr i = 0; i < nclk_; i++)
|
||||
printf("%s%llu", i == 0 ? "" : ",", clk_[i].reused);
|
||||
printf("] tid=%u/%u last_acq=%llu",
|
||||
tid_, reused_, last_acquire_);
|
||||
}
|
||||
|
||||
SyncClock::SyncClock()
|
||||
: release_store_tid_(kInvalidTid)
|
||||
, release_store_reused_()
|
||||
, tab_()
|
||||
, tab_idx_()
|
||||
, size_() {
|
||||
for (uptr i = 0; i < kDirtyTids; i++)
|
||||
dirty_tids_[i] = kInvalidTid;
|
||||
}
|
||||
|
||||
SyncClock::~SyncClock() {
|
||||
// Reset must be called before dtor.
|
||||
CHECK_EQ(size_, 0);
|
||||
CHECK_EQ(tab_, 0);
|
||||
CHECK_EQ(tab_idx_, 0);
|
||||
}
|
||||
|
||||
void SyncClock::Reset(ClockCache *c) {
|
||||
if (size_ == 0) {
|
||||
// nothing
|
||||
} else if (size_ <= ClockBlock::kClockCount) {
|
||||
// One-level table.
|
||||
ctx->clock_alloc.Free(c, tab_idx_);
|
||||
} else {
|
||||
// Two-level table.
|
||||
for (uptr i = 0; i < size_; i += ClockBlock::kClockCount)
|
||||
ctx->clock_alloc.Free(c, tab_->table[i / ClockBlock::kClockCount]);
|
||||
ctx->clock_alloc.Free(c, tab_idx_);
|
||||
}
|
||||
tab_ = 0;
|
||||
tab_idx_ = 0;
|
||||
size_ = 0;
|
||||
release_store_tid_ = kInvalidTid;
|
||||
release_store_reused_ = 0;
|
||||
for (uptr i = 0; i < kDirtyTids; i++)
|
||||
dirty_tids_[i] = kInvalidTid;
|
||||
}
|
||||
|
||||
ClockElem &SyncClock::elem(unsigned tid) const {
|
||||
DCHECK_LT(tid, size_);
|
||||
if (size_ <= ClockBlock::kClockCount)
|
||||
|
@ -74,6 +74,7 @@ class SyncClock {
|
||||
u32 tab_idx_;
|
||||
u32 size_;
|
||||
|
||||
void ResetImpl();
|
||||
ClockElem &elem(unsigned tid) const;
|
||||
};
|
||||
|
||||
@ -89,7 +90,7 @@ struct ThreadClock {
|
||||
return clk_[tid].epoch;
|
||||
}
|
||||
|
||||
void set(unsigned tid, u64 v);
|
||||
void set(ClockCache *c, unsigned tid, u64 v);
|
||||
|
||||
void set(u64 v) {
|
||||
DCHECK_GE(v, clk_[tid_].epoch);
|
||||
@ -108,6 +109,7 @@ struct ThreadClock {
|
||||
void release(ClockCache *c, SyncClock *dst) const;
|
||||
void acq_rel(ClockCache *c, SyncClock *dst);
|
||||
void ReleaseStore(ClockCache *c, SyncClock *dst) const;
|
||||
void ResetCached(ClockCache *c);
|
||||
|
||||
void DebugReset();
|
||||
void DebugDump(int(*printf)(const char *s, ...));
|
||||
|
@ -39,7 +39,7 @@ class DenseSlabAlloc {
|
||||
typedef DenseSlabAllocCache Cache;
|
||||
typedef typename Cache::IndexT IndexT;
|
||||
|
||||
DenseSlabAlloc() {
|
||||
explicit DenseSlabAlloc(const char *name) {
|
||||
// Check that kL1Size and kL2Size are sane.
|
||||
CHECK_EQ(kL1Size & (kL1Size - 1), 0);
|
||||
CHECK_EQ(kL2Size & (kL2Size - 1), 0);
|
||||
@ -49,6 +49,7 @@ class DenseSlabAlloc {
|
||||
internal_memset(map_, 0, sizeof(map_));
|
||||
freelist_ = 0;
|
||||
fillpos_ = 0;
|
||||
name_ = name;
|
||||
}
|
||||
|
||||
~DenseSlabAlloc() {
|
||||
@ -96,15 +97,19 @@ class DenseSlabAlloc {
|
||||
SpinMutex mtx_;
|
||||
IndexT freelist_;
|
||||
uptr fillpos_;
|
||||
const char *name_;
|
||||
|
||||
void Refill(Cache *c) {
|
||||
SpinMutexLock lock(&mtx_);
|
||||
if (freelist_ == 0) {
|
||||
if (fillpos_ == kL1Size) {
|
||||
Printf("ThreadSanitizer: DenseSlabAllocator overflow. Dying.\n");
|
||||
Printf("ThreadSanitizer: %s overflow (%zu*%zu). Dying.\n",
|
||||
name_, kL1Size, kL2Size);
|
||||
Die();
|
||||
}
|
||||
T *batch = (T*)MmapOrDie(kL2Size * sizeof(T), "DenseSlabAllocator");
|
||||
VPrintf(2, "ThreadSanitizer: growing %s: %zu out of %zu*%zu\n",
|
||||
name_, fillpos_, kL1Size, kL2Size);
|
||||
T *batch = (T*)MmapOrDie(kL2Size * sizeof(T), name_);
|
||||
// Reserve 0 as invalid index.
|
||||
IndexT start = fillpos_ == 0 ? 1 : 0;
|
||||
for (IndexT i = start; i < kL2Size; i++) {
|
||||
|
@ -14,6 +14,7 @@
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#include "sanitizer_common/sanitizer_atomic.h"
|
||||
#include "sanitizer_common/sanitizer_errno.h"
|
||||
#include "sanitizer_common/sanitizer_libc.h"
|
||||
#include "sanitizer_common/sanitizer_linux.h"
|
||||
#include "sanitizer_common/sanitizer_platform_limits_posix.h"
|
||||
@ -34,13 +35,11 @@
|
||||
using namespace __tsan; // NOLINT
|
||||
|
||||
#if SANITIZER_FREEBSD || SANITIZER_MAC
|
||||
#define __errno_location __error
|
||||
#define stdout __stdoutp
|
||||
#define stderr __stderrp
|
||||
#endif
|
||||
|
||||
#if SANITIZER_ANDROID
|
||||
#define __errno_location __errno
|
||||
#define mallopt(a, b)
|
||||
#endif
|
||||
|
||||
@ -84,7 +83,6 @@ DECLARE_REAL_AND_INTERCEPTOR(void *, malloc, uptr size)
|
||||
DECLARE_REAL_AND_INTERCEPTOR(void, free, void *ptr)
|
||||
extern "C" void *pthread_self();
|
||||
extern "C" void _exit(int status);
|
||||
extern "C" int *__errno_location();
|
||||
extern "C" int fileno_unlocked(void *stream);
|
||||
extern "C" int dirfd(void *dirp);
|
||||
#if !SANITIZER_FREEBSD && !SANITIZER_ANDROID
|
||||
@ -98,9 +96,6 @@ const int PTHREAD_MUTEX_RECURSIVE_NP = 1;
|
||||
const int PTHREAD_MUTEX_RECURSIVE = 2;
|
||||
const int PTHREAD_MUTEX_RECURSIVE_NP = 2;
|
||||
#endif
|
||||
const int EINVAL = 22;
|
||||
const int EBUSY = 16;
|
||||
const int EOWNERDEAD = 130;
|
||||
#if !SANITIZER_FREEBSD && !SANITIZER_MAC
|
||||
const int EPOLL_CTL_ADD = 1;
|
||||
#endif
|
||||
@ -130,8 +125,6 @@ typedef long long_t; // NOLINT
|
||||
# define F_TLOCK 2 /* Test and lock a region for exclusive use. */
|
||||
# define F_TEST 3 /* Test a region for other processes locks. */
|
||||
|
||||
#define errno (*__errno_location())
|
||||
|
||||
typedef void (*sighandler_t)(int sig);
|
||||
typedef void (*sigactionhandler_t)(int sig, my_siginfo_t *siginfo, void *uctx);
|
||||
|
||||
@ -268,7 +261,7 @@ ScopedInterceptor::~ScopedInterceptor() {
|
||||
|
||||
void ScopedInterceptor::EnableIgnores() {
|
||||
if (ignoring_) {
|
||||
ThreadIgnoreBegin(thr_, pc_, false);
|
||||
ThreadIgnoreBegin(thr_, pc_, /*save_stack=*/false);
|
||||
if (flags()->ignore_noninstrumented_modules) thr_->suppress_reports++;
|
||||
if (in_ignored_lib_) {
|
||||
DCHECK(!thr_->in_ignored_lib);
|
||||
@ -466,8 +459,14 @@ static void SetJmp(ThreadState *thr, uptr sp, uptr mangled_sp) {
|
||||
static void LongJmp(ThreadState *thr, uptr *env) {
|
||||
#ifdef __powerpc__
|
||||
uptr mangled_sp = env[0];
|
||||
#elif SANITIZER_FREEBSD || SANITIZER_MAC
|
||||
#elif SANITIZER_FREEBSD
|
||||
uptr mangled_sp = env[2];
|
||||
#elif SANITIZER_MAC
|
||||
# ifdef __aarch64__
|
||||
uptr mangled_sp = env[13];
|
||||
# else
|
||||
uptr mangled_sp = env[2];
|
||||
# endif
|
||||
#elif defined(SANITIZER_LINUX)
|
||||
# ifdef __aarch64__
|
||||
uptr mangled_sp = env[13];
|
||||
@ -665,7 +664,7 @@ static bool fix_mmap_addr(void **addr, long_t sz, int flags) {
|
||||
if (*addr) {
|
||||
if (!IsAppMem((uptr)*addr) || !IsAppMem((uptr)*addr + sz - 1)) {
|
||||
if (flags & MAP_FIXED) {
|
||||
errno = EINVAL;
|
||||
errno = errno_EINVAL;
|
||||
return false;
|
||||
} else {
|
||||
*addr = 0;
|
||||
@ -1122,7 +1121,7 @@ TSAN_INTERCEPTOR(int, pthread_mutex_init, void *m, void *a) {
|
||||
TSAN_INTERCEPTOR(int, pthread_mutex_destroy, void *m) {
|
||||
SCOPED_TSAN_INTERCEPTOR(pthread_mutex_destroy, m);
|
||||
int res = REAL(pthread_mutex_destroy)(m);
|
||||
if (res == 0 || res == EBUSY) {
|
||||
if (res == 0 || res == errno_EBUSY) {
|
||||
MutexDestroy(thr, pc, (uptr)m);
|
||||
}
|
||||
return res;
|
||||
@ -1131,9 +1130,9 @@ TSAN_INTERCEPTOR(int, pthread_mutex_destroy, void *m) {
|
||||
TSAN_INTERCEPTOR(int, pthread_mutex_trylock, void *m) {
|
||||
SCOPED_TSAN_INTERCEPTOR(pthread_mutex_trylock, m);
|
||||
int res = REAL(pthread_mutex_trylock)(m);
|
||||
if (res == EOWNERDEAD)
|
||||
if (res == errno_EOWNERDEAD)
|
||||
MutexRepair(thr, pc, (uptr)m);
|
||||
if (res == 0 || res == EOWNERDEAD)
|
||||
if (res == 0 || res == errno_EOWNERDEAD)
|
||||
MutexPostLock(thr, pc, (uptr)m, MutexFlagTryLock);
|
||||
return res;
|
||||
}
|
||||
@ -1311,7 +1310,7 @@ TSAN_INTERCEPTOR(int, pthread_barrier_wait, void *b) {
|
||||
TSAN_INTERCEPTOR(int, pthread_once, void *o, void (*f)()) {
|
||||
SCOPED_INTERCEPTOR_RAW(pthread_once, o, f);
|
||||
if (o == 0 || f == 0)
|
||||
return EINVAL;
|
||||
return errno_EINVAL;
|
||||
atomic_uint32_t *a;
|
||||
if (!SANITIZER_MAC)
|
||||
a = static_cast<atomic_uint32_t*>(o);
|
||||
|
@ -21,7 +21,10 @@
|
||||
#include "tsan_interface_ann.h"
|
||||
|
||||
#include <libkern/OSAtomic.h>
|
||||
|
||||
#if defined(__has_include) && __has_include(<xpc/xpc.h>)
|
||||
#include <xpc/xpc.h>
|
||||
#endif // #if defined(__has_include) && __has_include(<xpc/xpc.h>)
|
||||
|
||||
typedef long long_t; // NOLINT
|
||||
|
||||
@ -235,6 +238,8 @@ TSAN_INTERCEPTOR(void, os_lock_unlock, void *lock) {
|
||||
REAL(os_lock_unlock)(lock);
|
||||
}
|
||||
|
||||
#if defined(__has_include) && __has_include(<xpc/xpc.h>)
|
||||
|
||||
TSAN_INTERCEPTOR(void, xpc_connection_set_event_handler,
|
||||
xpc_connection_t connection, xpc_handler_t handler) {
|
||||
SCOPED_TSAN_INTERCEPTOR(xpc_connection_set_event_handler, connection,
|
||||
@ -287,6 +292,8 @@ TSAN_INTERCEPTOR(void, xpc_connection_cancel, xpc_connection_t connection) {
|
||||
REAL(xpc_connection_cancel)(connection);
|
||||
}
|
||||
|
||||
#endif // #if defined(__has_include) && __has_include(<xpc/xpc.h>)
|
||||
|
||||
// On macOS, libc++ is always linked dynamically, so intercepting works the
|
||||
// usual way.
|
||||
#define STDCXX_INTERCEPTOR TSAN_INTERCEPTOR
|
||||
|
@ -483,8 +483,8 @@ void __tsan_mutex_pre_lock(void *m, unsigned flagz) {
|
||||
else
|
||||
MutexPreLock(thr, pc, (uptr)m);
|
||||
}
|
||||
ThreadIgnoreBegin(thr, pc, false);
|
||||
ThreadIgnoreSyncBegin(thr, pc, false);
|
||||
ThreadIgnoreBegin(thr, pc, /*save_stack=*/false);
|
||||
ThreadIgnoreSyncBegin(thr, pc, /*save_stack=*/false);
|
||||
}
|
||||
|
||||
INTERFACE_ATTRIBUTE
|
||||
@ -510,8 +510,8 @@ int __tsan_mutex_pre_unlock(void *m, unsigned flagz) {
|
||||
} else {
|
||||
ret = MutexUnlock(thr, pc, (uptr)m, flagz);
|
||||
}
|
||||
ThreadIgnoreBegin(thr, pc, false);
|
||||
ThreadIgnoreSyncBegin(thr, pc, false);
|
||||
ThreadIgnoreBegin(thr, pc, /*save_stack=*/false);
|
||||
ThreadIgnoreSyncBegin(thr, pc, /*save_stack=*/false);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -525,8 +525,8 @@ void __tsan_mutex_post_unlock(void *m, unsigned flagz) {
|
||||
INTERFACE_ATTRIBUTE
|
||||
void __tsan_mutex_pre_signal(void *addr, unsigned flagz) {
|
||||
SCOPED_ANNOTATION(__tsan_mutex_pre_signal);
|
||||
ThreadIgnoreBegin(thr, pc, false);
|
||||
ThreadIgnoreSyncBegin(thr, pc, false);
|
||||
ThreadIgnoreBegin(thr, pc, /*save_stack=*/false);
|
||||
ThreadIgnoreSyncBegin(thr, pc, /*save_stack=*/false);
|
||||
}
|
||||
|
||||
INTERFACE_ATTRIBUTE
|
||||
@ -547,7 +547,7 @@ void __tsan_mutex_pre_divert(void *addr, unsigned flagz) {
|
||||
INTERFACE_ATTRIBUTE
|
||||
void __tsan_mutex_post_divert(void *addr, unsigned flagz) {
|
||||
SCOPED_ANNOTATION(__tsan_mutex_post_divert);
|
||||
ThreadIgnoreBegin(thr, pc, false);
|
||||
ThreadIgnoreSyncBegin(thr, pc, false);
|
||||
ThreadIgnoreBegin(thr, pc, /*save_stack=*/false);
|
||||
ThreadIgnoreSyncBegin(thr, pc, /*save_stack=*/false);
|
||||
}
|
||||
} // extern "C"
|
||||
|
@ -220,8 +220,7 @@ static a128 NoTsanAtomicLoad(const volatile a128 *a, morder mo) {
|
||||
#endif
|
||||
|
||||
template<typename T>
|
||||
static T AtomicLoad(ThreadState *thr, uptr pc, const volatile T *a,
|
||||
morder mo) {
|
||||
static T AtomicLoad(ThreadState *thr, uptr pc, const volatile T *a, morder mo) {
|
||||
CHECK(IsLoadOrder(mo));
|
||||
// This fast-path is critical for performance.
|
||||
// Assume the access is atomic.
|
||||
@ -229,10 +228,17 @@ static T AtomicLoad(ThreadState *thr, uptr pc, const volatile T *a,
|
||||
MemoryReadAtomic(thr, pc, (uptr)a, SizeLog<T>());
|
||||
return NoTsanAtomicLoad(a, mo);
|
||||
}
|
||||
SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, (uptr)a, false);
|
||||
AcquireImpl(thr, pc, &s->clock);
|
||||
// Don't create sync object if it does not exist yet. For example, an atomic
|
||||
// pointer is initialized to nullptr and then periodically acquire-loaded.
|
||||
T v = NoTsanAtomicLoad(a, mo);
|
||||
s->mtx.ReadUnlock();
|
||||
SyncVar *s = ctx->metamap.GetIfExistsAndLock((uptr)a, false);
|
||||
if (s) {
|
||||
AcquireImpl(thr, pc, &s->clock);
|
||||
// Re-read under sync mutex because we need a consistent snapshot
|
||||
// of the value and the clock we acquire.
|
||||
v = NoTsanAtomicLoad(a, mo);
|
||||
s->mtx.ReadUnlock();
|
||||
}
|
||||
MemoryReadAtomic(thr, pc, (uptr)a, SizeLog<T>());
|
||||
return v;
|
||||
}
|
||||
|
@ -294,6 +294,8 @@ uptr __sanitizer_get_allocated_size(const void *p) {
|
||||
|
||||
void __tsan_on_thread_idle() {
|
||||
ThreadState *thr = cur_thread();
|
||||
thr->clock.ResetCached(&thr->proc()->clock_cache);
|
||||
thr->last_sleep_clock.ResetCached(&thr->proc()->clock_cache);
|
||||
allocator()->SwallowCache(&thr->proc()->alloc_cache);
|
||||
internal_allocator()->SwallowCache(&thr->proc()->internal_alloc_cache);
|
||||
ctx->metamap.OnProcIdle(thr->proc());
|
||||
|
@ -100,6 +100,37 @@ struct Mapping {
|
||||
};
|
||||
|
||||
#define TSAN_MID_APP_RANGE 1
|
||||
#elif defined(__aarch64__) && defined(__APPLE__)
|
||||
/*
|
||||
C/C++ on Darwin/iOS/ARM64 (36-bit VMA, 64 GB VM)
|
||||
0000 0000 00 - 0100 0000 00: - (4 GB)
|
||||
0100 0000 00 - 0200 0000 00: main binary, modules, thread stacks (4 GB)
|
||||
0200 0000 00 - 0300 0000 00: heap (4 GB)
|
||||
0300 0000 00 - 0400 0000 00: - (4 GB)
|
||||
0400 0000 00 - 0c00 0000 00: shadow memory (32 GB)
|
||||
0c00 0000 00 - 0d00 0000 00: - (4 GB)
|
||||
0d00 0000 00 - 0e00 0000 00: metainfo (4 GB)
|
||||
0e00 0000 00 - 0f00 0000 00: - (4 GB)
|
||||
0f00 0000 00 - 1000 0000 00: traces (4 GB)
|
||||
*/
|
||||
struct Mapping {
|
||||
static const uptr kLoAppMemBeg = 0x0100000000ull;
|
||||
static const uptr kLoAppMemEnd = 0x0200000000ull;
|
||||
static const uptr kHeapMemBeg = 0x0200000000ull;
|
||||
static const uptr kHeapMemEnd = 0x0300000000ull;
|
||||
static const uptr kShadowBeg = 0x0400000000ull;
|
||||
static const uptr kShadowEnd = 0x0c00000000ull;
|
||||
static const uptr kMetaShadowBeg = 0x0d00000000ull;
|
||||
static const uptr kMetaShadowEnd = 0x0e00000000ull;
|
||||
static const uptr kTraceMemBeg = 0x0f00000000ull;
|
||||
static const uptr kTraceMemEnd = 0x1000000000ull;
|
||||
static const uptr kHiAppMemBeg = 0x1000000000ull;
|
||||
static const uptr kHiAppMemEnd = 0x1000000000ull;
|
||||
static const uptr kAppMemMsk = 0x0ull;
|
||||
static const uptr kAppMemXor = 0x0ull;
|
||||
static const uptr kVdsoBeg = 0x7000000000000000ull;
|
||||
};
|
||||
|
||||
#elif defined(__aarch64__)
|
||||
// AArch64 supports multiple VMA which leads to multiple address transformation
|
||||
// functions. To support these multiple VMAS transformations and mappings TSAN
|
||||
@ -389,7 +420,7 @@ uptr MappingImpl(void) {
|
||||
|
||||
template<int Type>
|
||||
uptr MappingArchImpl(void) {
|
||||
#ifdef __aarch64__
|
||||
#if defined(__aarch64__) && !defined(__APPLE__)
|
||||
switch (vmaSize) {
|
||||
case 39: return MappingImpl<Mapping39, Type>();
|
||||
case 42: return MappingImpl<Mapping42, Type>();
|
||||
@ -542,7 +573,7 @@ bool IsAppMemImpl(uptr mem) {
|
||||
|
||||
ALWAYS_INLINE
|
||||
bool IsAppMem(uptr mem) {
|
||||
#ifdef __aarch64__
|
||||
#if defined(__aarch64__) && !defined(__APPLE__)
|
||||
switch (vmaSize) {
|
||||
case 39: return IsAppMemImpl<Mapping39>(mem);
|
||||
case 42: return IsAppMemImpl<Mapping42>(mem);
|
||||
@ -569,7 +600,7 @@ bool IsShadowMemImpl(uptr mem) {
|
||||
|
||||
ALWAYS_INLINE
|
||||
bool IsShadowMem(uptr mem) {
|
||||
#ifdef __aarch64__
|
||||
#if defined(__aarch64__) && !defined(__APPLE__)
|
||||
switch (vmaSize) {
|
||||
case 39: return IsShadowMemImpl<Mapping39>(mem);
|
||||
case 42: return IsShadowMemImpl<Mapping42>(mem);
|
||||
@ -596,7 +627,7 @@ bool IsMetaMemImpl(uptr mem) {
|
||||
|
||||
ALWAYS_INLINE
|
||||
bool IsMetaMem(uptr mem) {
|
||||
#ifdef __aarch64__
|
||||
#if defined(__aarch64__) && !defined(__APPLE__)
|
||||
switch (vmaSize) {
|
||||
case 39: return IsMetaMemImpl<Mapping39>(mem);
|
||||
case 42: return IsMetaMemImpl<Mapping42>(mem);
|
||||
@ -633,7 +664,7 @@ uptr MemToShadowImpl(uptr x) {
|
||||
|
||||
ALWAYS_INLINE
|
||||
uptr MemToShadow(uptr x) {
|
||||
#ifdef __aarch64__
|
||||
#if defined(__aarch64__) && !defined(__APPLE__)
|
||||
switch (vmaSize) {
|
||||
case 39: return MemToShadowImpl<Mapping39>(x);
|
||||
case 42: return MemToShadowImpl<Mapping42>(x);
|
||||
@ -672,7 +703,7 @@ u32 *MemToMetaImpl(uptr x) {
|
||||
|
||||
ALWAYS_INLINE
|
||||
u32 *MemToMeta(uptr x) {
|
||||
#ifdef __aarch64__
|
||||
#if defined(__aarch64__) && !defined(__APPLE__)
|
||||
switch (vmaSize) {
|
||||
case 39: return MemToMetaImpl<Mapping39>(x);
|
||||
case 42: return MemToMetaImpl<Mapping42>(x);
|
||||
@ -724,7 +755,7 @@ uptr ShadowToMemImpl(uptr s) {
|
||||
|
||||
ALWAYS_INLINE
|
||||
uptr ShadowToMem(uptr s) {
|
||||
#ifdef __aarch64__
|
||||
#if defined(__aarch64__) && !defined(__APPLE__)
|
||||
switch (vmaSize) {
|
||||
case 39: return ShadowToMemImpl<Mapping39>(s);
|
||||
case 42: return ShadowToMemImpl<Mapping42>(s);
|
||||
@ -759,7 +790,7 @@ uptr GetThreadTraceImpl(int tid) {
|
||||
|
||||
ALWAYS_INLINE
|
||||
uptr GetThreadTrace(int tid) {
|
||||
#ifdef __aarch64__
|
||||
#if defined(__aarch64__) && !defined(__APPLE__)
|
||||
switch (vmaSize) {
|
||||
case 39: return GetThreadTraceImpl<Mapping39>(tid);
|
||||
case 42: return GetThreadTraceImpl<Mapping42>(tid);
|
||||
@ -789,7 +820,7 @@ uptr GetThreadTraceHeaderImpl(int tid) {
|
||||
|
||||
ALWAYS_INLINE
|
||||
uptr GetThreadTraceHeader(int tid) {
|
||||
#ifdef __aarch64__
|
||||
#if defined(__aarch64__) && !defined(__APPLE__)
|
||||
switch (vmaSize) {
|
||||
case 39: return GetThreadTraceHeaderImpl<Mapping39>(tid);
|
||||
case 42: return GetThreadTraceHeaderImpl<Mapping42>(tid);
|
||||
|
@ -47,7 +47,6 @@
|
||||
#include <sys/resource.h>
|
||||
#include <sys/stat.h>
|
||||
#include <unistd.h>
|
||||
#include <errno.h>
|
||||
#include <sched.h>
|
||||
#include <dlfcn.h>
|
||||
#if SANITIZER_LINUX
|
||||
@ -182,17 +181,15 @@ static void MapRodata() {
|
||||
}
|
||||
// Map the file into shadow of .rodata sections.
|
||||
MemoryMappingLayout proc_maps(/*cache_enabled*/true);
|
||||
uptr start, end, offset, prot;
|
||||
// Reusing the buffer 'name'.
|
||||
while (proc_maps.Next(&start, &end, &offset, name, ARRAY_SIZE(name), &prot)) {
|
||||
if (name[0] != 0 && name[0] != '['
|
||||
&& (prot & MemoryMappingLayout::kProtectionRead)
|
||||
&& (prot & MemoryMappingLayout::kProtectionExecute)
|
||||
&& !(prot & MemoryMappingLayout::kProtectionWrite)
|
||||
&& IsAppMem(start)) {
|
||||
MemoryMappedSegment segment(name, ARRAY_SIZE(name));
|
||||
while (proc_maps.Next(&segment)) {
|
||||
if (segment.filename[0] != 0 && segment.filename[0] != '[' &&
|
||||
segment.IsReadable() && segment.IsExecutable() &&
|
||||
!segment.IsWritable() && IsAppMem(segment.start)) {
|
||||
// Assume it's .rodata
|
||||
char *shadow_start = (char*)MemToShadow(start);
|
||||
char *shadow_end = (char*)MemToShadow(end);
|
||||
char *shadow_start = (char *)MemToShadow(segment.start);
|
||||
char *shadow_end = (char *)MemToShadow(segment.end);
|
||||
for (char *p = shadow_start; p < shadow_end; p += marker.size()) {
|
||||
internal_mmap(p, Min<uptr>(marker.size(), shadow_end - p),
|
||||
PROT_READ, MAP_PRIVATE | MAP_FIXED, fd, 0);
|
||||
|
@ -230,6 +230,14 @@ static void my_pthread_introspection_hook(unsigned int event, pthread_t thread,
|
||||
#endif
|
||||
|
||||
void InitializePlatformEarly() {
|
||||
#if defined(__aarch64__)
|
||||
uptr max_vm = GetMaxVirtualAddress() + 1;
|
||||
if (max_vm != Mapping::kHiAppMemEnd) {
|
||||
Printf("ThreadSanitizer: unsupported vm address limit %p, expected %p.\n",
|
||||
max_vm, Mapping::kHiAppMemEnd);
|
||||
Die();
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
void InitializePlatform() {
|
||||
|
@ -46,6 +46,9 @@ void InitializeShadowMemory() {
|
||||
#elif defined(__mips64)
|
||||
const uptr kMadviseRangeBeg = 0xff00000000ull;
|
||||
const uptr kMadviseRangeSize = 0x0100000000ull;
|
||||
#elif defined(__aarch64__) && defined(__APPLE__)
|
||||
uptr kMadviseRangeBeg = LoAppMemBeg();
|
||||
uptr kMadviseRangeSize = LoAppMemEnd() - LoAppMemBeg();
|
||||
#elif defined(__aarch64__)
|
||||
uptr kMadviseRangeBeg = 0;
|
||||
uptr kMadviseRangeSize = 0;
|
||||
@ -115,21 +118,24 @@ static void ProtectRange(uptr beg, uptr end) {
|
||||
void CheckAndProtect() {
|
||||
// Ensure that the binary is indeed compiled with -pie.
|
||||
MemoryMappingLayout proc_maps(true);
|
||||
uptr p, end, prot;
|
||||
while (proc_maps.Next(&p, &end, 0, 0, 0, &prot)) {
|
||||
if (IsAppMem(p))
|
||||
MemoryMappedSegment segment;
|
||||
while (proc_maps.Next(&segment)) {
|
||||
if (IsAppMem(segment.start)) continue;
|
||||
if (segment.start >= HeapMemEnd() && segment.start < HeapEnd()) continue;
|
||||
if (segment.protection == 0) // Zero page or mprotected.
|
||||
continue;
|
||||
if (p >= HeapMemEnd() &&
|
||||
p < HeapEnd())
|
||||
continue;
|
||||
if (prot == 0) // Zero page or mprotected.
|
||||
continue;
|
||||
if (p >= VdsoBeg()) // vdso
|
||||
if (segment.start >= VdsoBeg()) // vdso
|
||||
break;
|
||||
Printf("FATAL: ThreadSanitizer: unexpected memory mapping %p-%p\n", p, end);
|
||||
Printf("FATAL: ThreadSanitizer: unexpected memory mapping %p-%p\n",
|
||||
segment.start, segment.end);
|
||||
Die();
|
||||
}
|
||||
|
||||
#if defined(__aarch64__) && defined(__APPLE__)
|
||||
ProtectRange(HeapMemEnd(), ShadowBeg());
|
||||
ProtectRange(ShadowEnd(), MetaShadowBeg());
|
||||
ProtectRange(MetaShadowEnd(), TraceMemBeg());
|
||||
#else
|
||||
ProtectRange(LoAppMemEnd(), ShadowBeg());
|
||||
ProtectRange(ShadowEnd(), MetaShadowBeg());
|
||||
#ifdef TSAN_MID_APP_RANGE
|
||||
@ -143,6 +149,7 @@ void CheckAndProtect() {
|
||||
ProtectRange(TraceMemBeg(), TraceMemEnd());
|
||||
ProtectRange(TraceMemEnd(), HeapMemBeg());
|
||||
ProtectRange(HeapEnd(), HiAppMemBeg());
|
||||
#endif
|
||||
}
|
||||
#endif
|
||||
|
||||
|
@ -104,7 +104,8 @@ Context::Context()
|
||||
, racy_stacks(MBlockRacyStacks)
|
||||
, racy_addresses(MBlockRacyAddresses)
|
||||
, fired_suppressions_mtx(MutexTypeFired, StatMtxFired)
|
||||
, fired_suppressions(8) {
|
||||
, fired_suppressions(8)
|
||||
, clock_alloc("clock allocator") {
|
||||
}
|
||||
|
||||
// The objects are allocated in TLS, so one may rely on zero-initialization.
|
||||
|
@ -1,13 +1,46 @@
|
||||
// The content of this file is AArch64-only:
|
||||
#if defined(__aarch64__)
|
||||
|
||||
#include "sanitizer_common/sanitizer_asm.h"
|
||||
|
||||
#if !defined(__APPLE__)
|
||||
.section .bss
|
||||
.type __tsan_pointer_chk_guard, %object
|
||||
.size __tsan_pointer_chk_guard, 8
|
||||
ASM_SIZE(ASM_TSAN_SYMBOL_INTERCEPTOR(__tsan_pointer_chk_guard))
|
||||
__tsan_pointer_chk_guard:
|
||||
.zero 8
|
||||
#endif
|
||||
|
||||
#if defined(__APPLE__)
|
||||
.align 2
|
||||
|
||||
.section __DATA,__nl_symbol_ptr,non_lazy_symbol_pointers
|
||||
.long _setjmp$non_lazy_ptr
|
||||
_setjmp$non_lazy_ptr:
|
||||
.indirect_symbol _setjmp
|
||||
.long 0
|
||||
|
||||
.section __DATA,__nl_symbol_ptr,non_lazy_symbol_pointers
|
||||
.long __setjmp$non_lazy_ptr
|
||||
__setjmp$non_lazy_ptr:
|
||||
.indirect_symbol __setjmp
|
||||
.long 0
|
||||
|
||||
.section __DATA,__nl_symbol_ptr,non_lazy_symbol_pointers
|
||||
.long _sigsetjmp$non_lazy_ptr
|
||||
_sigsetjmp$non_lazy_ptr:
|
||||
.indirect_symbol _sigsetjmp
|
||||
.long 0
|
||||
#endif
|
||||
|
||||
#if !defined(__APPLE__)
|
||||
.section .text
|
||||
#else
|
||||
.section __TEXT,__text
|
||||
.align 3
|
||||
#endif
|
||||
|
||||
#if !defined(__APPLE__)
|
||||
// GLIBC mangles the function pointers in jmp_buf (used in {set,long}*jmp
|
||||
// functions) by XORing them with a random guard pointer. For AArch64 it is a
|
||||
// global variable rather than a TCB one (as for x86_64/powerpc) and althought
|
||||
@ -16,9 +49,9 @@ __tsan_pointer_chk_guard:
|
||||
// not stable). So InitializeGuardPtr obtains the pointer guard value by
|
||||
// issuing a setjmp and checking the resulting pointers values against the
|
||||
// original ones.
|
||||
.hidden _Z18InitializeGuardPtrv
|
||||
ASM_HIDDEN(_Z18InitializeGuardPtrv)
|
||||
.global _Z18InitializeGuardPtrv
|
||||
.type _Z18InitializeGuardPtrv, @function
|
||||
ASM_TYPE_FUNCTION(ASM_TSAN_SYMBOL_INTERCEPTOR(_Z18InitializeGuardPtrv))
|
||||
_Z18InitializeGuardPtrv:
|
||||
CFI_STARTPROC
|
||||
// Allocates a jmp_buf for the setjmp call.
|
||||
@ -55,12 +88,14 @@ _Z18InitializeGuardPtrv:
|
||||
CFI_DEF_CFA (31, 0)
|
||||
ret
|
||||
CFI_ENDPROC
|
||||
.size _Z18InitializeGuardPtrv, .-_Z18InitializeGuardPtrv
|
||||
ASM_SIZE(ASM_TSAN_SYMBOL_INTERCEPTOR(_Z18InitializeGuardPtrv))
|
||||
#endif
|
||||
|
||||
.hidden __tsan_setjmp
|
||||
ASM_HIDDEN(__tsan_setjmp)
|
||||
.comm _ZN14__interception11real_setjmpE,8,8
|
||||
.type setjmp, @function
|
||||
setjmp:
|
||||
.globl ASM_TSAN_SYMBOL_INTERCEPTOR(setjmp)
|
||||
ASM_TYPE_FUNCTION(ASM_TSAN_SYMBOL_INTERCEPTOR(setjmp))
|
||||
ASM_TSAN_SYMBOL_INTERCEPTOR(setjmp):
|
||||
CFI_STARTPROC
|
||||
|
||||
// save env parameters for function call
|
||||
@ -78,14 +113,19 @@ setjmp:
|
||||
CFI_OFFSET (19, -16)
|
||||
mov x19, x0
|
||||
|
||||
#if !defined(__APPLE__)
|
||||
// SP pointer mangling (see glibc setjmp)
|
||||
adrp x2, __tsan_pointer_chk_guard
|
||||
ldr x2, [x2, #:lo12:__tsan_pointer_chk_guard]
|
||||
add x0, x29, 32
|
||||
eor x1, x2, x0
|
||||
#else
|
||||
add x0, x29, 32
|
||||
mov x1, x0
|
||||
#endif
|
||||
|
||||
// call tsan interceptor
|
||||
bl __tsan_setjmp
|
||||
bl ASM_TSAN_SYMBOL(__tsan_setjmp)
|
||||
|
||||
// restore env parameter
|
||||
mov x0, x19
|
||||
@ -96,18 +136,24 @@ setjmp:
|
||||
CFI_DEF_CFA (31, 0)
|
||||
|
||||
// tail jump to libc setjmp
|
||||
#if !defined(__APPLE__)
|
||||
adrp x1, :got:_ZN14__interception11real_setjmpE
|
||||
ldr x1, [x1, #:got_lo12:_ZN14__interception11real_setjmpE]
|
||||
ldr x1, [x1]
|
||||
#else
|
||||
adrp x1, _setjmp$non_lazy_ptr@page
|
||||
add x1, x1, _setjmp$non_lazy_ptr@pageoff
|
||||
ldr x1, [x1]
|
||||
#endif
|
||||
br x1
|
||||
|
||||
CFI_ENDPROC
|
||||
.size setjmp, .-setjmp
|
||||
ASM_SIZE(ASM_TSAN_SYMBOL_INTERCEPTOR(setjmp))
|
||||
|
||||
.comm _ZN14__interception12real__setjmpE,8,8
|
||||
.globl _setjmp
|
||||
.type _setjmp, @function
|
||||
_setjmp:
|
||||
.globl ASM_TSAN_SYMBOL_INTERCEPTOR(_setjmp)
|
||||
ASM_TYPE_FUNCTION(ASM_TSAN_SYMBOL_INTERCEPTOR(_setjmp))
|
||||
ASM_TSAN_SYMBOL_INTERCEPTOR(_setjmp):
|
||||
CFI_STARTPROC
|
||||
|
||||
// save env parameters for function call
|
||||
@ -125,14 +171,19 @@ _setjmp:
|
||||
CFI_OFFSET (19, -16)
|
||||
mov x19, x0
|
||||
|
||||
#if !defined(__APPLE__)
|
||||
// SP pointer mangling (see glibc setjmp)
|
||||
adrp x2, __tsan_pointer_chk_guard
|
||||
ldr x2, [x2, #:lo12:__tsan_pointer_chk_guard]
|
||||
add x0, x29, 32
|
||||
eor x1, x2, x0
|
||||
#else
|
||||
add x0, x29, 32
|
||||
mov x1, x0
|
||||
#endif
|
||||
|
||||
// call tsan interceptor
|
||||
bl __tsan_setjmp
|
||||
bl ASM_TSAN_SYMBOL(__tsan_setjmp)
|
||||
|
||||
// Restore jmp_buf parameter
|
||||
mov x0, x19
|
||||
@ -143,18 +194,24 @@ _setjmp:
|
||||
CFI_DEF_CFA (31, 0)
|
||||
|
||||
// tail jump to libc setjmp
|
||||
#if !defined(__APPLE__)
|
||||
adrp x1, :got:_ZN14__interception12real__setjmpE
|
||||
ldr x1, [x1, #:got_lo12:_ZN14__interception12real__setjmpE]
|
||||
ldr x1, [x1]
|
||||
#else
|
||||
adrp x1, __setjmp$non_lazy_ptr@page
|
||||
add x1, x1, __setjmp$non_lazy_ptr@pageoff
|
||||
ldr x1, [x1]
|
||||
#endif
|
||||
br x1
|
||||
|
||||
CFI_ENDPROC
|
||||
.size _setjmp, .-_setjmp
|
||||
ASM_SIZE(ASM_TSAN_SYMBOL_INTERCEPTOR(_setjmp))
|
||||
|
||||
.comm _ZN14__interception14real_sigsetjmpE,8,8
|
||||
.globl sigsetjmp
|
||||
.type sigsetjmp, @function
|
||||
sigsetjmp:
|
||||
.globl ASM_TSAN_SYMBOL_INTERCEPTOR(sigsetjmp)
|
||||
ASM_TYPE_FUNCTION(ASM_TSAN_SYMBOL_INTERCEPTOR(sigsetjmp))
|
||||
ASM_TSAN_SYMBOL_INTERCEPTOR(sigsetjmp):
|
||||
CFI_STARTPROC
|
||||
|
||||
// save env parameters for function call
|
||||
@ -174,14 +231,19 @@ sigsetjmp:
|
||||
mov w20, w1
|
||||
mov x19, x0
|
||||
|
||||
#if !defined(__APPLE__)
|
||||
// SP pointer mangling (see glibc setjmp)
|
||||
adrp x2, __tsan_pointer_chk_guard
|
||||
ldr x2, [x2, #:lo12:__tsan_pointer_chk_guard]
|
||||
add x0, x29, 32
|
||||
eor x1, x2, x0
|
||||
#else
|
||||
add x0, x29, 32
|
||||
mov x1, x0
|
||||
#endif
|
||||
|
||||
// call tsan interceptor
|
||||
bl __tsan_setjmp
|
||||
bl ASM_TSAN_SYMBOL(__tsan_setjmp)
|
||||
|
||||
// restore env parameter
|
||||
mov w1, w20
|
||||
@ -195,17 +257,24 @@ sigsetjmp:
|
||||
CFI_DEF_CFA (31, 0)
|
||||
|
||||
// tail jump to libc sigsetjmp
|
||||
#if !defined(__APPLE__)
|
||||
adrp x2, :got:_ZN14__interception14real_sigsetjmpE
|
||||
ldr x2, [x2, #:got_lo12:_ZN14__interception14real_sigsetjmpE]
|
||||
ldr x2, [x2]
|
||||
#else
|
||||
adrp x2, _sigsetjmp$non_lazy_ptr@page
|
||||
add x2, x2, _sigsetjmp$non_lazy_ptr@pageoff
|
||||
ldr x2, [x2]
|
||||
#endif
|
||||
br x2
|
||||
CFI_ENDPROC
|
||||
.size sigsetjmp, .-sigsetjmp
|
||||
ASM_SIZE(ASM_TSAN_SYMBOL_INTERCEPTOR(sigsetjmp))
|
||||
|
||||
#if !defined(__APPLE__)
|
||||
.comm _ZN14__interception16real___sigsetjmpE,8,8
|
||||
.globl __sigsetjmp
|
||||
.type __sigsetjmp, @function
|
||||
__sigsetjmp:
|
||||
.globl ASM_TSAN_SYMBOL_INTERCEPTOR(__sigsetjmp)
|
||||
ASM_TYPE_FUNCTION(ASM_TSAN_SYMBOL_INTERCEPTOR(__sigsetjmp))
|
||||
ASM_TSAN_SYMBOL_INTERCEPTOR(__sigsetjmp):
|
||||
CFI_STARTPROC
|
||||
|
||||
// save env parameters for function call
|
||||
@ -225,14 +294,16 @@ __sigsetjmp:
|
||||
mov w20, w1
|
||||
mov x19, x0
|
||||
|
||||
#if !defined(__APPLE__)
|
||||
// SP pointer mangling (see glibc setjmp)
|
||||
adrp x2, __tsan_pointer_chk_guard
|
||||
ldr x2, [x2, #:lo12:__tsan_pointer_chk_guard]
|
||||
add x0, x29, 32
|
||||
eor x1, x2, x0
|
||||
#endif
|
||||
|
||||
// call tsan interceptor
|
||||
bl __tsan_setjmp
|
||||
bl ASM_TSAN_SYMBOL(__tsan_setjmp)
|
||||
|
||||
mov w1, w20
|
||||
mov x0, x19
|
||||
@ -245,14 +316,22 @@ __sigsetjmp:
|
||||
CFI_DEF_CFA (31, 0)
|
||||
|
||||
// tail jump to libc __sigsetjmp
|
||||
#if !defined(__APPLE__)
|
||||
adrp x2, :got:_ZN14__interception16real___sigsetjmpE
|
||||
ldr x2, [x2, #:got_lo12:_ZN14__interception16real___sigsetjmpE]
|
||||
ldr x2, [x2]
|
||||
#else
|
||||
adrp x2, ASM_TSAN_SYMBOL(__sigsetjmp)@page
|
||||
add x2, x2, ASM_TSAN_SYMBOL(__sigsetjmp)@pageoff
|
||||
#endif
|
||||
br x2
|
||||
CFI_ENDPROC
|
||||
.size __sigsetjmp, .-__sigsetjmp
|
||||
ASM_SIZE(ASM_TSAN_SYMBOL_INTERCEPTOR(__sigsetjmp))
|
||||
#endif
|
||||
|
||||
#if defined(__FreeBSD__) || defined(__linux__)
|
||||
/* We do not need executable stack. */
|
||||
.section .note.GNU-stack,"",@progbits
|
||||
#endif
|
||||
|
||||
#endif
|
||||
|
@ -1,4 +1,8 @@
|
||||
// The content of this file is x86_64-only:
|
||||
#if defined(__x86_64__)
|
||||
|
||||
#include "sanitizer_common/sanitizer_asm.h"
|
||||
|
||||
#if !defined(__APPLE__)
|
||||
.section .text
|
||||
#else
|
||||
@ -357,3 +361,5 @@ ASM_SIZE(ASM_TSAN_SYMBOL_INTERCEPTOR(__sigsetjmp))
|
||||
/* We do not need executable stack. */
|
||||
.section .note.GNU-stack,"",@progbits
|
||||
#endif
|
||||
|
||||
#endif
|
||||
|
@ -413,10 +413,10 @@ void Acquire(ThreadState *thr, uptr pc, uptr addr) {
|
||||
static void UpdateClockCallback(ThreadContextBase *tctx_base, void *arg) {
|
||||
ThreadState *thr = reinterpret_cast<ThreadState*>(arg);
|
||||
ThreadContext *tctx = static_cast<ThreadContext*>(tctx_base);
|
||||
u64 epoch = tctx->epoch1;
|
||||
if (tctx->status == ThreadStatusRunning)
|
||||
thr->clock.set(tctx->tid, tctx->thr->fast_state.epoch());
|
||||
else
|
||||
thr->clock.set(tctx->tid, tctx->epoch1);
|
||||
epoch = tctx->thr->fast_state.epoch();
|
||||
thr->clock.set(&thr->proc()->clock_cache, tctx->tid, epoch);
|
||||
}
|
||||
|
||||
void AcquireGlobal(ThreadState *thr, uptr pc) {
|
||||
@ -456,10 +456,10 @@ void ReleaseStore(ThreadState *thr, uptr pc, uptr addr) {
|
||||
static void UpdateSleepClockCallback(ThreadContextBase *tctx_base, void *arg) {
|
||||
ThreadState *thr = reinterpret_cast<ThreadState*>(arg);
|
||||
ThreadContext *tctx = static_cast<ThreadContext*>(tctx_base);
|
||||
u64 epoch = tctx->epoch1;
|
||||
if (tctx->status == ThreadStatusRunning)
|
||||
thr->last_sleep_clock.set(tctx->tid, tctx->thr->fast_state.epoch());
|
||||
else
|
||||
thr->last_sleep_clock.set(tctx->tid, tctx->epoch1);
|
||||
epoch = tctx->thr->fast_state.epoch();
|
||||
thr->last_sleep_clock.set(&thr->proc()->clock_cache, tctx->tid, epoch);
|
||||
}
|
||||
|
||||
void AfterSleep(ThreadState *thr, uptr pc) {
|
||||
|
@ -314,7 +314,7 @@ void ScopedReport::AddLocation(uptr addr, uptr size) {
|
||||
return;
|
||||
#if !SANITIZER_GO
|
||||
int fd = -1;
|
||||
int creat_tid = -1;
|
||||
int creat_tid = kInvalidTid;
|
||||
u32 creat_stack = 0;
|
||||
if (FdLocation(addr, &fd, &creat_tid, &creat_stack)) {
|
||||
ReportLocation *loc = ReportLocation::New(ReportLocationFD);
|
||||
|
@ -142,6 +142,10 @@ void ThreadContext::OnFinished() {
|
||||
|
||||
if (common_flags()->detect_deadlocks)
|
||||
ctx->dd->DestroyLogicalThread(thr->dd_lt);
|
||||
thr->clock.ResetCached(&thr->proc()->clock_cache);
|
||||
#if !SANITIZER_GO
|
||||
thr->last_sleep_clock.ResetCached(&thr->proc()->clock_cache);
|
||||
#endif
|
||||
thr->~ThreadState();
|
||||
#if TSAN_COLLECT_STATS
|
||||
StatAggregate(ctx->stat, thr->stat);
|
||||
|
@ -75,14 +75,11 @@ void StatOutput(u64 *stat) {
|
||||
name[StatClockAcquire] = "Clock acquire ";
|
||||
name[StatClockAcquireEmpty] = " empty clock ";
|
||||
name[StatClockAcquireFastRelease] = " fast from release-store ";
|
||||
name[StatClockAcquireLarge] = " contains my tid ";
|
||||
name[StatClockAcquireRepeat] = " repeated (fast) ";
|
||||
name[StatClockAcquireFull] = " full (slow) ";
|
||||
name[StatClockAcquiredSomething] = " acquired something ";
|
||||
name[StatClockRelease] = "Clock release ";
|
||||
name[StatClockReleaseResize] = " resize ";
|
||||
name[StatClockReleaseFast1] = " fast1 ";
|
||||
name[StatClockReleaseFast2] = " fast2 ";
|
||||
name[StatClockReleaseFast] = " fast ";
|
||||
name[StatClockReleaseSlow] = " dirty overflow (slow) ";
|
||||
name[StatClockReleaseFull] = " full (slow) ";
|
||||
name[StatClockReleaseAcquired] = " was acquired ";
|
||||
|
@ -74,15 +74,12 @@ enum StatType {
|
||||
StatClockAcquire,
|
||||
StatClockAcquireEmpty,
|
||||
StatClockAcquireFastRelease,
|
||||
StatClockAcquireLarge,
|
||||
StatClockAcquireRepeat,
|
||||
StatClockAcquireFull,
|
||||
StatClockAcquiredSomething,
|
||||
// Clocks - release.
|
||||
StatClockRelease,
|
||||
StatClockReleaseResize,
|
||||
StatClockReleaseFast1,
|
||||
StatClockReleaseFast2,
|
||||
StatClockReleaseFast,
|
||||
StatClockReleaseSlow,
|
||||
StatClockReleaseFull,
|
||||
StatClockReleaseAcquired,
|
||||
|
@ -53,7 +53,9 @@ void SyncVar::Reset(Processor *proc) {
|
||||
}
|
||||
}
|
||||
|
||||
MetaMap::MetaMap() {
|
||||
MetaMap::MetaMap()
|
||||
: block_alloc_("heap block allocator")
|
||||
, sync_alloc_("sync allocator") {
|
||||
atomic_store(&uid_gen_, 0, memory_order_relaxed);
|
||||
}
|
||||
|
||||
|
@ -549,7 +549,7 @@ hypot(_A1 __lcpp_x, _A2 __lcpp_y, _A3 __lcpp_z) _NOEXCEPT
|
||||
template <class _A1>
|
||||
_LIBCPP_ALWAYS_INLINE
|
||||
_LIBCPP_CONSTEXPR typename enable_if<is_floating_point<_A1>::value, bool>::type
|
||||
__libcpp_isnan(_A1 __lcpp_x) _NOEXCEPT
|
||||
__libcpp_isnan_or_builtin(_A1 __lcpp_x) _NOEXCEPT
|
||||
{
|
||||
#if __has_builtin(__builtin_isnan)
|
||||
return __builtin_isnan(__lcpp_x);
|
||||
@ -561,7 +561,7 @@ __libcpp_isnan(_A1 __lcpp_x) _NOEXCEPT
|
||||
template <class _A1>
|
||||
_LIBCPP_ALWAYS_INLINE
|
||||
_LIBCPP_CONSTEXPR typename enable_if<!is_floating_point<_A1>::value, bool>::type
|
||||
__libcpp_isnan(_A1 __lcpp_x) _NOEXCEPT
|
||||
__libcpp_isnan_or_builtin(_A1 __lcpp_x) _NOEXCEPT
|
||||
{
|
||||
return isnan(__lcpp_x);
|
||||
}
|
||||
@ -569,7 +569,7 @@ __libcpp_isnan(_A1 __lcpp_x) _NOEXCEPT
|
||||
template <class _A1>
|
||||
_LIBCPP_ALWAYS_INLINE
|
||||
_LIBCPP_CONSTEXPR typename enable_if<is_floating_point<_A1>::value, bool>::type
|
||||
__libcpp_isinf(_A1 __lcpp_x) _NOEXCEPT
|
||||
__libcpp_isinf_or_builtin(_A1 __lcpp_x) _NOEXCEPT
|
||||
{
|
||||
#if __has_builtin(__builtin_isinf)
|
||||
return __builtin_isinf(__lcpp_x);
|
||||
@ -581,7 +581,7 @@ __libcpp_isinf(_A1 __lcpp_x) _NOEXCEPT
|
||||
template <class _A1>
|
||||
_LIBCPP_ALWAYS_INLINE
|
||||
_LIBCPP_CONSTEXPR typename enable_if<!is_floating_point<_A1>::value, bool>::type
|
||||
__libcpp_isinf(_A1 __lcpp_x) _NOEXCEPT
|
||||
__libcpp_isinf_or_builtin(_A1 __lcpp_x) _NOEXCEPT
|
||||
{
|
||||
return isinf(__lcpp_x);
|
||||
}
|
||||
@ -589,7 +589,7 @@ __libcpp_isinf(_A1 __lcpp_x) _NOEXCEPT
|
||||
template <class _A1>
|
||||
_LIBCPP_ALWAYS_INLINE
|
||||
_LIBCPP_CONSTEXPR typename enable_if<is_floating_point<_A1>::value, bool>::type
|
||||
__libcpp_isfinite(_A1 __lcpp_x) _NOEXCEPT
|
||||
__libcpp_isfinite_or_builtin(_A1 __lcpp_x) _NOEXCEPT
|
||||
{
|
||||
#if __has_builtin(__builtin_isfinite)
|
||||
return __builtin_isfinite(__lcpp_x);
|
||||
@ -601,7 +601,7 @@ __libcpp_isfinite(_A1 __lcpp_x) _NOEXCEPT
|
||||
template <class _A1>
|
||||
_LIBCPP_ALWAYS_INLINE
|
||||
_LIBCPP_CONSTEXPR typename enable_if<!is_floating_point<_A1>::value, bool>::type
|
||||
__libcpp_isfinite(_A1 __lcpp_x) _NOEXCEPT
|
||||
__libcpp_isfinite_or_builtin(_A1 __lcpp_x) _NOEXCEPT
|
||||
{
|
||||
return isfinite(__lcpp_x);
|
||||
}
|
||||
|
@ -599,39 +599,39 @@ operator*(const complex<_Tp>& __z, const complex<_Tp>& __w)
|
||||
_Tp __bc = __b * __c;
|
||||
_Tp __x = __ac - __bd;
|
||||
_Tp __y = __ad + __bc;
|
||||
if (__libcpp_isnan(__x) && __libcpp_isnan(__y))
|
||||
if (__libcpp_isnan_or_builtin(__x) && __libcpp_isnan_or_builtin(__y))
|
||||
{
|
||||
bool __recalc = false;
|
||||
if (__libcpp_isinf(__a) || __libcpp_isinf(__b))
|
||||
if (__libcpp_isinf_or_builtin(__a) || __libcpp_isinf_or_builtin(__b))
|
||||
{
|
||||
__a = copysign(__libcpp_isinf(__a) ? _Tp(1) : _Tp(0), __a);
|
||||
__b = copysign(__libcpp_isinf(__b) ? _Tp(1) : _Tp(0), __b);
|
||||
if (__libcpp_isnan(__c))
|
||||
__a = copysign(__libcpp_isinf_or_builtin(__a) ? _Tp(1) : _Tp(0), __a);
|
||||
__b = copysign(__libcpp_isinf_or_builtin(__b) ? _Tp(1) : _Tp(0), __b);
|
||||
if (__libcpp_isnan_or_builtin(__c))
|
||||
__c = copysign(_Tp(0), __c);
|
||||
if (__libcpp_isnan(__d))
|
||||
if (__libcpp_isnan_or_builtin(__d))
|
||||
__d = copysign(_Tp(0), __d);
|
||||
__recalc = true;
|
||||
}
|
||||
if (__libcpp_isinf(__c) || __libcpp_isinf(__d))
|
||||
if (__libcpp_isinf_or_builtin(__c) || __libcpp_isinf_or_builtin(__d))
|
||||
{
|
||||
__c = copysign(__libcpp_isinf(__c) ? _Tp(1) : _Tp(0), __c);
|
||||
__d = copysign(__libcpp_isinf(__d) ? _Tp(1) : _Tp(0), __d);
|
||||
if (__libcpp_isnan(__a))
|
||||
__c = copysign(__libcpp_isinf_or_builtin(__c) ? _Tp(1) : _Tp(0), __c);
|
||||
__d = copysign(__libcpp_isinf_or_builtin(__d) ? _Tp(1) : _Tp(0), __d);
|
||||
if (__libcpp_isnan_or_builtin(__a))
|
||||
__a = copysign(_Tp(0), __a);
|
||||
if (__libcpp_isnan(__b))
|
||||
if (__libcpp_isnan_or_builtin(__b))
|
||||
__b = copysign(_Tp(0), __b);
|
||||
__recalc = true;
|
||||
}
|
||||
if (!__recalc && (__libcpp_isinf(__ac) || __libcpp_isinf(__bd) ||
|
||||
__libcpp_isinf(__ad) || __libcpp_isinf(__bc)))
|
||||
if (!__recalc && (__libcpp_isinf_or_builtin(__ac) || __libcpp_isinf_or_builtin(__bd) ||
|
||||
__libcpp_isinf_or_builtin(__ad) || __libcpp_isinf_or_builtin(__bc)))
|
||||
{
|
||||
if (__libcpp_isnan(__a))
|
||||
if (__libcpp_isnan_or_builtin(__a))
|
||||
__a = copysign(_Tp(0), __a);
|
||||
if (__libcpp_isnan(__b))
|
||||
if (__libcpp_isnan_or_builtin(__b))
|
||||
__b = copysign(_Tp(0), __b);
|
||||
if (__libcpp_isnan(__c))
|
||||
if (__libcpp_isnan_or_builtin(__c))
|
||||
__c = copysign(_Tp(0), __c);
|
||||
if (__libcpp_isnan(__d))
|
||||
if (__libcpp_isnan_or_builtin(__d))
|
||||
__d = copysign(_Tp(0), __d);
|
||||
__recalc = true;
|
||||
}
|
||||
@ -674,7 +674,7 @@ operator/(const complex<_Tp>& __z, const complex<_Tp>& __w)
|
||||
_Tp __c = __w.real();
|
||||
_Tp __d = __w.imag();
|
||||
_Tp __logbw = logb(fmax(fabs(__c), fabs(__d)));
|
||||
if (__libcpp_isfinite(__logbw))
|
||||
if (__libcpp_isfinite_or_builtin(__logbw))
|
||||
{
|
||||
__ilogbw = static_cast<int>(__logbw);
|
||||
__c = scalbn(__c, -__ilogbw);
|
||||
@ -683,24 +683,24 @@ operator/(const complex<_Tp>& __z, const complex<_Tp>& __w)
|
||||
_Tp __denom = __c * __c + __d * __d;
|
||||
_Tp __x = scalbn((__a * __c + __b * __d) / __denom, -__ilogbw);
|
||||
_Tp __y = scalbn((__b * __c - __a * __d) / __denom, -__ilogbw);
|
||||
if (__libcpp_isnan(__x) && __libcpp_isnan(__y))
|
||||
if (__libcpp_isnan_or_builtin(__x) && __libcpp_isnan_or_builtin(__y))
|
||||
{
|
||||
if ((__denom == _Tp(0)) && (!__libcpp_isnan(__a) || !__libcpp_isnan(__b)))
|
||||
if ((__denom == _Tp(0)) && (!__libcpp_isnan_or_builtin(__a) || !__libcpp_isnan_or_builtin(__b)))
|
||||
{
|
||||
__x = copysign(_Tp(INFINITY), __c) * __a;
|
||||
__y = copysign(_Tp(INFINITY), __c) * __b;
|
||||
}
|
||||
else if ((__libcpp_isinf(__a) || __libcpp_isinf(__b)) && __libcpp_isfinite(__c) && __libcpp_isfinite(__d))
|
||||
else if ((__libcpp_isinf_or_builtin(__a) || __libcpp_isinf_or_builtin(__b)) && __libcpp_isfinite_or_builtin(__c) && __libcpp_isfinite_or_builtin(__d))
|
||||
{
|
||||
__a = copysign(__libcpp_isinf(__a) ? _Tp(1) : _Tp(0), __a);
|
||||
__b = copysign(__libcpp_isinf(__b) ? _Tp(1) : _Tp(0), __b);
|
||||
__a = copysign(__libcpp_isinf_or_builtin(__a) ? _Tp(1) : _Tp(0), __a);
|
||||
__b = copysign(__libcpp_isinf_or_builtin(__b) ? _Tp(1) : _Tp(0), __b);
|
||||
__x = _Tp(INFINITY) * (__a * __c + __b * __d);
|
||||
__y = _Tp(INFINITY) * (__b * __c - __a * __d);
|
||||
}
|
||||
else if (__libcpp_isinf(__logbw) && __logbw > _Tp(0) && __libcpp_isfinite(__a) && __libcpp_isfinite(__b))
|
||||
else if (__libcpp_isinf_or_builtin(__logbw) && __logbw > _Tp(0) && __libcpp_isfinite_or_builtin(__a) && __libcpp_isfinite_or_builtin(__b))
|
||||
{
|
||||
__c = copysign(__libcpp_isinf(__c) ? _Tp(1) : _Tp(0), __c);
|
||||
__d = copysign(__libcpp_isinf(__d) ? _Tp(1) : _Tp(0), __d);
|
||||
__c = copysign(__libcpp_isinf_or_builtin(__c) ? _Tp(1) : _Tp(0), __c);
|
||||
__d = copysign(__libcpp_isinf_or_builtin(__d) ? _Tp(1) : _Tp(0), __d);
|
||||
__x = _Tp(0) * (__a * __c + __b * __d);
|
||||
__y = _Tp(0) * (__b * __c - __a * __d);
|
||||
}
|
||||
@ -910,9 +910,9 @@ inline _LIBCPP_INLINE_VISIBILITY
|
||||
_Tp
|
||||
norm(const complex<_Tp>& __c)
|
||||
{
|
||||
if (__libcpp_isinf(__c.real()))
|
||||
if (__libcpp_isinf_or_builtin(__c.real()))
|
||||
return abs(__c.real());
|
||||
if (__libcpp_isinf(__c.imag()))
|
||||
if (__libcpp_isinf_or_builtin(__c.imag()))
|
||||
return abs(__c.imag());
|
||||
return __c.real() * __c.real() + __c.imag() * __c.imag();
|
||||
}
|
||||
@ -955,7 +955,7 @@ complex<_Tp>
|
||||
proj(const complex<_Tp>& __c)
|
||||
{
|
||||
std::complex<_Tp> __r = __c;
|
||||
if (__libcpp_isinf(__c.real()) || __libcpp_isinf(__c.imag()))
|
||||
if (__libcpp_isinf_or_builtin(__c.real()) || __libcpp_isinf_or_builtin(__c.imag()))
|
||||
__r = complex<_Tp>(INFINITY, copysign(_Tp(0), __c.imag()));
|
||||
return __r;
|
||||
}
|
||||
@ -969,7 +969,7 @@ typename enable_if
|
||||
>::type
|
||||
proj(_Tp __re)
|
||||
{
|
||||
if (__libcpp_isinf(__re))
|
||||
if (__libcpp_isinf_or_builtin(__re))
|
||||
__re = abs(__re);
|
||||
return complex<_Tp>(__re);
|
||||
}
|
||||
@ -993,25 +993,25 @@ template<class _Tp>
|
||||
complex<_Tp>
|
||||
polar(const _Tp& __rho, const _Tp& __theta = _Tp(0))
|
||||
{
|
||||
if (__libcpp_isnan(__rho) || signbit(__rho))
|
||||
if (__libcpp_isnan_or_builtin(__rho) || signbit(__rho))
|
||||
return complex<_Tp>(_Tp(NAN), _Tp(NAN));
|
||||
if (__libcpp_isnan(__theta))
|
||||
if (__libcpp_isnan_or_builtin(__theta))
|
||||
{
|
||||
if (__libcpp_isinf(__rho))
|
||||
if (__libcpp_isinf_or_builtin(__rho))
|
||||
return complex<_Tp>(__rho, __theta);
|
||||
return complex<_Tp>(__theta, __theta);
|
||||
}
|
||||
if (__libcpp_isinf(__theta))
|
||||
if (__libcpp_isinf_or_builtin(__theta))
|
||||
{
|
||||
if (__libcpp_isinf(__rho))
|
||||
if (__libcpp_isinf_or_builtin(__rho))
|
||||
return complex<_Tp>(__rho, _Tp(NAN));
|
||||
return complex<_Tp>(_Tp(NAN), _Tp(NAN));
|
||||
}
|
||||
_Tp __x = __rho * cos(__theta);
|
||||
if (__libcpp_isnan(__x))
|
||||
if (__libcpp_isnan_or_builtin(__x))
|
||||
__x = 0;
|
||||
_Tp __y = __rho * sin(__theta);
|
||||
if (__libcpp_isnan(__y))
|
||||
if (__libcpp_isnan_or_builtin(__y))
|
||||
__y = 0;
|
||||
return complex<_Tp>(__x, __y);
|
||||
}
|
||||
@ -1042,13 +1042,13 @@ template<class _Tp>
|
||||
complex<_Tp>
|
||||
sqrt(const complex<_Tp>& __x)
|
||||
{
|
||||
if (__libcpp_isinf(__x.imag()))
|
||||
if (__libcpp_isinf_or_builtin(__x.imag()))
|
||||
return complex<_Tp>(_Tp(INFINITY), __x.imag());
|
||||
if (__libcpp_isinf(__x.real()))
|
||||
if (__libcpp_isinf_or_builtin(__x.real()))
|
||||
{
|
||||
if (__x.real() > _Tp(0))
|
||||
return complex<_Tp>(__x.real(), __libcpp_isnan(__x.imag()) ? __x.imag() : copysign(_Tp(0), __x.imag()));
|
||||
return complex<_Tp>(__libcpp_isnan(__x.imag()) ? __x.imag() : _Tp(0), copysign(__x.real(), __x.imag()));
|
||||
return complex<_Tp>(__x.real(), __libcpp_isnan_or_builtin(__x.imag()) ? __x.imag() : copysign(_Tp(0), __x.imag()));
|
||||
return complex<_Tp>(__libcpp_isnan_or_builtin(__x.imag()) ? __x.imag() : _Tp(0), copysign(__x.real(), __x.imag()));
|
||||
}
|
||||
return polar(sqrt(abs(__x)), arg(__x) / _Tp(2));
|
||||
}
|
||||
@ -1060,21 +1060,21 @@ complex<_Tp>
|
||||
exp(const complex<_Tp>& __x)
|
||||
{
|
||||
_Tp __i = __x.imag();
|
||||
if (__libcpp_isinf(__x.real()))
|
||||
if (__libcpp_isinf_or_builtin(__x.real()))
|
||||
{
|
||||
if (__x.real() < _Tp(0))
|
||||
{
|
||||
if (!__libcpp_isfinite(__i))
|
||||
if (!__libcpp_isfinite_or_builtin(__i))
|
||||
__i = _Tp(1);
|
||||
}
|
||||
else if (__i == 0 || !__libcpp_isfinite(__i))
|
||||
else if (__i == 0 || !__libcpp_isfinite_or_builtin(__i))
|
||||
{
|
||||
if (__libcpp_isinf(__i))
|
||||
if (__libcpp_isinf_or_builtin(__i))
|
||||
__i = _Tp(NAN);
|
||||
return complex<_Tp>(__x.real(), __i);
|
||||
}
|
||||
}
|
||||
else if (__libcpp_isnan(__x.real()) && __x.imag() == 0)
|
||||
else if (__libcpp_isnan_or_builtin(__x.real()) && __x.imag() == 0)
|
||||
return __x;
|
||||
_Tp __e = exp(__x.real());
|
||||
return complex<_Tp>(__e * cos(__i), __e * sin(__i));
|
||||
@ -1132,23 +1132,23 @@ complex<_Tp>
|
||||
asinh(const complex<_Tp>& __x)
|
||||
{
|
||||
const _Tp __pi(atan2(+0., -0.));
|
||||
if (__libcpp_isinf(__x.real()))
|
||||
if (__libcpp_isinf_or_builtin(__x.real()))
|
||||
{
|
||||
if (__libcpp_isnan(__x.imag()))
|
||||
if (__libcpp_isnan_or_builtin(__x.imag()))
|
||||
return __x;
|
||||
if (__libcpp_isinf(__x.imag()))
|
||||
if (__libcpp_isinf_or_builtin(__x.imag()))
|
||||
return complex<_Tp>(__x.real(), copysign(__pi * _Tp(0.25), __x.imag()));
|
||||
return complex<_Tp>(__x.real(), copysign(_Tp(0), __x.imag()));
|
||||
}
|
||||
if (__libcpp_isnan(__x.real()))
|
||||
if (__libcpp_isnan_or_builtin(__x.real()))
|
||||
{
|
||||
if (__libcpp_isinf(__x.imag()))
|
||||
if (__libcpp_isinf_or_builtin(__x.imag()))
|
||||
return complex<_Tp>(__x.imag(), __x.real());
|
||||
if (__x.imag() == 0)
|
||||
return __x;
|
||||
return complex<_Tp>(__x.real(), __x.real());
|
||||
}
|
||||
if (__libcpp_isinf(__x.imag()))
|
||||
if (__libcpp_isinf_or_builtin(__x.imag()))
|
||||
return complex<_Tp>(copysign(__x.imag(), __x.real()), copysign(__pi/_Tp(2), __x.imag()));
|
||||
complex<_Tp> __z = log(__x + sqrt(pow(__x, _Tp(2)) + _Tp(1)));
|
||||
return complex<_Tp>(copysign(__z.real(), __x.real()), copysign(__z.imag(), __x.imag()));
|
||||
@ -1161,11 +1161,11 @@ complex<_Tp>
|
||||
acosh(const complex<_Tp>& __x)
|
||||
{
|
||||
const _Tp __pi(atan2(+0., -0.));
|
||||
if (__libcpp_isinf(__x.real()))
|
||||
if (__libcpp_isinf_or_builtin(__x.real()))
|
||||
{
|
||||
if (__libcpp_isnan(__x.imag()))
|
||||
if (__libcpp_isnan_or_builtin(__x.imag()))
|
||||
return complex<_Tp>(abs(__x.real()), __x.imag());
|
||||
if (__libcpp_isinf(__x.imag()))
|
||||
if (__libcpp_isinf_or_builtin(__x.imag()))
|
||||
{
|
||||
if (__x.real() > 0)
|
||||
return complex<_Tp>(__x.real(), copysign(__pi * _Tp(0.25), __x.imag()));
|
||||
@ -1176,13 +1176,13 @@ acosh(const complex<_Tp>& __x)
|
||||
return complex<_Tp>(-__x.real(), copysign(__pi, __x.imag()));
|
||||
return complex<_Tp>(__x.real(), copysign(_Tp(0), __x.imag()));
|
||||
}
|
||||
if (__libcpp_isnan(__x.real()))
|
||||
if (__libcpp_isnan_or_builtin(__x.real()))
|
||||
{
|
||||
if (__libcpp_isinf(__x.imag()))
|
||||
if (__libcpp_isinf_or_builtin(__x.imag()))
|
||||
return complex<_Tp>(abs(__x.imag()), __x.real());
|
||||
return complex<_Tp>(__x.real(), __x.real());
|
||||
}
|
||||
if (__libcpp_isinf(__x.imag()))
|
||||
if (__libcpp_isinf_or_builtin(__x.imag()))
|
||||
return complex<_Tp>(abs(__x.imag()), copysign(__pi/_Tp(2), __x.imag()));
|
||||
complex<_Tp> __z = log(__x + sqrt(pow(__x, _Tp(2)) - _Tp(1)));
|
||||
return complex<_Tp>(copysign(__z.real(), _Tp(0)), copysign(__z.imag(), __x.imag()));
|
||||
@ -1195,21 +1195,21 @@ complex<_Tp>
|
||||
atanh(const complex<_Tp>& __x)
|
||||
{
|
||||
const _Tp __pi(atan2(+0., -0.));
|
||||
if (__libcpp_isinf(__x.imag()))
|
||||
if (__libcpp_isinf_or_builtin(__x.imag()))
|
||||
{
|
||||
return complex<_Tp>(copysign(_Tp(0), __x.real()), copysign(__pi/_Tp(2), __x.imag()));
|
||||
}
|
||||
if (__libcpp_isnan(__x.imag()))
|
||||
if (__libcpp_isnan_or_builtin(__x.imag()))
|
||||
{
|
||||
if (__libcpp_isinf(__x.real()) || __x.real() == 0)
|
||||
if (__libcpp_isinf_or_builtin(__x.real()) || __x.real() == 0)
|
||||
return complex<_Tp>(copysign(_Tp(0), __x.real()), __x.imag());
|
||||
return complex<_Tp>(__x.imag(), __x.imag());
|
||||
}
|
||||
if (__libcpp_isnan(__x.real()))
|
||||
if (__libcpp_isnan_or_builtin(__x.real()))
|
||||
{
|
||||
return complex<_Tp>(__x.real(), __x.real());
|
||||
}
|
||||
if (__libcpp_isinf(__x.real()))
|
||||
if (__libcpp_isinf_or_builtin(__x.real()))
|
||||
{
|
||||
return complex<_Tp>(copysign(_Tp(0), __x.real()), copysign(__pi/_Tp(2), __x.imag()));
|
||||
}
|
||||
@ -1227,11 +1227,11 @@ template<class _Tp>
|
||||
complex<_Tp>
|
||||
sinh(const complex<_Tp>& __x)
|
||||
{
|
||||
if (__libcpp_isinf(__x.real()) && !__libcpp_isfinite(__x.imag()))
|
||||
if (__libcpp_isinf_or_builtin(__x.real()) && !__libcpp_isfinite_or_builtin(__x.imag()))
|
||||
return complex<_Tp>(__x.real(), _Tp(NAN));
|
||||
if (__x.real() == 0 && !__libcpp_isfinite(__x.imag()))
|
||||
if (__x.real() == 0 && !__libcpp_isfinite_or_builtin(__x.imag()))
|
||||
return complex<_Tp>(__x.real(), _Tp(NAN));
|
||||
if (__x.imag() == 0 && !__libcpp_isfinite(__x.real()))
|
||||
if (__x.imag() == 0 && !__libcpp_isfinite_or_builtin(__x.real()))
|
||||
return __x;
|
||||
return complex<_Tp>(sinh(__x.real()) * cos(__x.imag()), cosh(__x.real()) * sin(__x.imag()));
|
||||
}
|
||||
@ -1242,13 +1242,13 @@ template<class _Tp>
|
||||
complex<_Tp>
|
||||
cosh(const complex<_Tp>& __x)
|
||||
{
|
||||
if (__libcpp_isinf(__x.real()) && !__libcpp_isfinite(__x.imag()))
|
||||
if (__libcpp_isinf_or_builtin(__x.real()) && !__libcpp_isfinite_or_builtin(__x.imag()))
|
||||
return complex<_Tp>(abs(__x.real()), _Tp(NAN));
|
||||
if (__x.real() == 0 && !__libcpp_isfinite(__x.imag()))
|
||||
if (__x.real() == 0 && !__libcpp_isfinite_or_builtin(__x.imag()))
|
||||
return complex<_Tp>(_Tp(NAN), __x.real());
|
||||
if (__x.real() == 0 && __x.imag() == 0)
|
||||
return complex<_Tp>(_Tp(1), __x.imag());
|
||||
if (__x.imag() == 0 && !__libcpp_isfinite(__x.real()))
|
||||
if (__x.imag() == 0 && !__libcpp_isfinite_or_builtin(__x.real()))
|
||||
return complex<_Tp>(abs(__x.real()), __x.imag());
|
||||
return complex<_Tp>(cosh(__x.real()) * cos(__x.imag()), sinh(__x.real()) * sin(__x.imag()));
|
||||
}
|
||||
@ -1259,19 +1259,19 @@ template<class _Tp>
|
||||
complex<_Tp>
|
||||
tanh(const complex<_Tp>& __x)
|
||||
{
|
||||
if (__libcpp_isinf(__x.real()))
|
||||
if (__libcpp_isinf_or_builtin(__x.real()))
|
||||
{
|
||||
if (!__libcpp_isfinite(__x.imag()))
|
||||
if (!__libcpp_isfinite_or_builtin(__x.imag()))
|
||||
return complex<_Tp>(_Tp(1), _Tp(0));
|
||||
return complex<_Tp>(_Tp(1), copysign(_Tp(0), sin(_Tp(2) * __x.imag())));
|
||||
}
|
||||
if (__libcpp_isnan(__x.real()) && __x.imag() == 0)
|
||||
if (__libcpp_isnan_or_builtin(__x.real()) && __x.imag() == 0)
|
||||
return __x;
|
||||
_Tp __2r(_Tp(2) * __x.real());
|
||||
_Tp __2i(_Tp(2) * __x.imag());
|
||||
_Tp __d(cosh(__2r) + cos(__2i));
|
||||
_Tp __2rsh(sinh(__2r));
|
||||
if (__libcpp_isinf(__2rsh) && __libcpp_isinf(__d))
|
||||
if (__libcpp_isinf_or_builtin(__2rsh) && __libcpp_isinf_or_builtin(__d))
|
||||
return complex<_Tp>(__2rsh > _Tp(0) ? _Tp(1) : _Tp(-1),
|
||||
__2i > _Tp(0) ? _Tp(0) : _Tp(-0.));
|
||||
return complex<_Tp>(__2rsh/__d, sin(__2i)/__d);
|
||||
@ -1294,11 +1294,11 @@ complex<_Tp>
|
||||
acos(const complex<_Tp>& __x)
|
||||
{
|
||||
const _Tp __pi(atan2(+0., -0.));
|
||||
if (__libcpp_isinf(__x.real()))
|
||||
if (__libcpp_isinf_or_builtin(__x.real()))
|
||||
{
|
||||
if (__libcpp_isnan(__x.imag()))
|
||||
if (__libcpp_isnan_or_builtin(__x.imag()))
|
||||
return complex<_Tp>(__x.imag(), __x.real());
|
||||
if (__libcpp_isinf(__x.imag()))
|
||||
if (__libcpp_isinf_or_builtin(__x.imag()))
|
||||
{
|
||||
if (__x.real() < _Tp(0))
|
||||
return complex<_Tp>(_Tp(0.75) * __pi, -__x.imag());
|
||||
@ -1308,13 +1308,13 @@ acos(const complex<_Tp>& __x)
|
||||
return complex<_Tp>(__pi, signbit(__x.imag()) ? -__x.real() : __x.real());
|
||||
return complex<_Tp>(_Tp(0), signbit(__x.imag()) ? __x.real() : -__x.real());
|
||||
}
|
||||
if (__libcpp_isnan(__x.real()))
|
||||
if (__libcpp_isnan_or_builtin(__x.real()))
|
||||
{
|
||||
if (__libcpp_isinf(__x.imag()))
|
||||
if (__libcpp_isinf_or_builtin(__x.imag()))
|
||||
return complex<_Tp>(__x.real(), -__x.imag());
|
||||
return complex<_Tp>(__x.real(), __x.real());
|
||||
}
|
||||
if (__libcpp_isinf(__x.imag()))
|
||||
if (__libcpp_isinf_or_builtin(__x.imag()))
|
||||
return complex<_Tp>(__pi/_Tp(2), -__x.imag());
|
||||
if (__x.real() == 0 && (__x.imag() == 0 || isnan(__x.imag())))
|
||||
return complex<_Tp>(__pi/_Tp(2), -__x.imag());
|
||||
|
@ -439,46 +439,122 @@ struct __optional_storage_base<_Tp, true>
|
||||
}
|
||||
};
|
||||
|
||||
template <class _Tp, bool = is_trivially_copyable<_Tp>::value>
|
||||
struct __optional_storage;
|
||||
|
||||
template <class _Tp>
|
||||
struct __optional_storage<_Tp, true> : __optional_storage_base<_Tp>
|
||||
template <class _Tp, bool = is_trivially_copy_constructible<_Tp>::value>
|
||||
struct __optional_copy_base : __optional_storage_base<_Tp>
|
||||
{
|
||||
using __optional_storage_base<_Tp>::__optional_storage_base;
|
||||
};
|
||||
|
||||
template <class _Tp>
|
||||
struct __optional_storage<_Tp, false> : __optional_storage_base<_Tp>
|
||||
struct __optional_copy_base<_Tp, false> : __optional_storage_base<_Tp>
|
||||
{
|
||||
using value_type = _Tp;
|
||||
using __optional_storage_base<_Tp>::__optional_storage_base;
|
||||
|
||||
_LIBCPP_INLINE_VISIBILITY
|
||||
__optional_storage() = default;
|
||||
__optional_copy_base() = default;
|
||||
|
||||
_LIBCPP_INLINE_VISIBILITY
|
||||
__optional_storage(const __optional_storage& __opt)
|
||||
__optional_copy_base(const __optional_copy_base& __opt)
|
||||
{
|
||||
this->__construct_from(__opt);
|
||||
}
|
||||
|
||||
_LIBCPP_INLINE_VISIBILITY
|
||||
__optional_storage(__optional_storage&& __opt)
|
||||
__optional_copy_base(__optional_copy_base&&) = default;
|
||||
_LIBCPP_INLINE_VISIBILITY
|
||||
__optional_copy_base& operator=(const __optional_copy_base&) = default;
|
||||
_LIBCPP_INLINE_VISIBILITY
|
||||
__optional_copy_base& operator=(__optional_copy_base&&) = default;
|
||||
};
|
||||
|
||||
template <class _Tp, bool = is_trivially_move_constructible<_Tp>::value>
|
||||
struct __optional_move_base : __optional_copy_base<_Tp>
|
||||
{
|
||||
using __optional_copy_base<_Tp>::__optional_copy_base;
|
||||
};
|
||||
|
||||
template <class _Tp>
|
||||
struct __optional_move_base<_Tp, false> : __optional_copy_base<_Tp>
|
||||
{
|
||||
using value_type = _Tp;
|
||||
using __optional_copy_base<_Tp>::__optional_copy_base;
|
||||
|
||||
_LIBCPP_INLINE_VISIBILITY
|
||||
__optional_move_base() = default;
|
||||
_LIBCPP_INLINE_VISIBILITY
|
||||
__optional_move_base(const __optional_move_base&) = default;
|
||||
|
||||
_LIBCPP_INLINE_VISIBILITY
|
||||
__optional_move_base(__optional_move_base&& __opt)
|
||||
noexcept(is_nothrow_move_constructible_v<value_type>)
|
||||
{
|
||||
this->__construct_from(_VSTD::move(__opt));
|
||||
}
|
||||
|
||||
_LIBCPP_INLINE_VISIBILITY
|
||||
__optional_storage& operator=(const __optional_storage& __opt)
|
||||
__optional_move_base& operator=(const __optional_move_base&) = default;
|
||||
_LIBCPP_INLINE_VISIBILITY
|
||||
__optional_move_base& operator=(__optional_move_base&&) = default;
|
||||
};
|
||||
|
||||
template <class _Tp, bool =
|
||||
is_trivially_destructible<_Tp>::value &&
|
||||
is_trivially_copy_constructible<_Tp>::value &&
|
||||
is_trivially_copy_assignable<_Tp>::value>
|
||||
struct __optional_copy_assign_base : __optional_move_base<_Tp>
|
||||
{
|
||||
using __optional_move_base<_Tp>::__optional_move_base;
|
||||
};
|
||||
|
||||
template <class _Tp>
|
||||
struct __optional_copy_assign_base<_Tp, false> : __optional_move_base<_Tp>
|
||||
{
|
||||
using __optional_move_base<_Tp>::__optional_move_base;
|
||||
|
||||
_LIBCPP_INLINE_VISIBILITY
|
||||
__optional_copy_assign_base() = default;
|
||||
_LIBCPP_INLINE_VISIBILITY
|
||||
__optional_copy_assign_base(const __optional_copy_assign_base&) = default;
|
||||
_LIBCPP_INLINE_VISIBILITY
|
||||
__optional_copy_assign_base(__optional_copy_assign_base&&) = default;
|
||||
|
||||
_LIBCPP_INLINE_VISIBILITY
|
||||
__optional_copy_assign_base& operator=(const __optional_copy_assign_base& __opt)
|
||||
{
|
||||
this->__assign_from(__opt);
|
||||
return *this;
|
||||
}
|
||||
|
||||
_LIBCPP_INLINE_VISIBILITY
|
||||
__optional_storage& operator=(__optional_storage&& __opt)
|
||||
__optional_copy_assign_base& operator=(__optional_copy_assign_base&&) = default;
|
||||
};
|
||||
|
||||
template <class _Tp, bool =
|
||||
is_trivially_destructible<_Tp>::value &&
|
||||
is_trivially_move_constructible<_Tp>::value &&
|
||||
is_trivially_move_assignable<_Tp>::value>
|
||||
struct __optional_move_assign_base : __optional_copy_assign_base<_Tp>
|
||||
{
|
||||
using __optional_copy_assign_base<_Tp>::__optional_copy_assign_base;
|
||||
};
|
||||
|
||||
template <class _Tp>
|
||||
struct __optional_move_assign_base<_Tp, false> : __optional_copy_assign_base<_Tp>
|
||||
{
|
||||
using value_type = _Tp;
|
||||
using __optional_copy_assign_base<_Tp>::__optional_copy_assign_base;
|
||||
|
||||
_LIBCPP_INLINE_VISIBILITY
|
||||
__optional_move_assign_base() = default;
|
||||
_LIBCPP_INLINE_VISIBILITY
|
||||
__optional_move_assign_base(const __optional_move_assign_base& __opt) = default;
|
||||
_LIBCPP_INLINE_VISIBILITY
|
||||
__optional_move_assign_base(__optional_move_assign_base&&) = default;
|
||||
_LIBCPP_INLINE_VISIBILITY
|
||||
__optional_move_assign_base& operator=(const __optional_move_assign_base&) = default;
|
||||
|
||||
_LIBCPP_INLINE_VISIBILITY
|
||||
__optional_move_assign_base& operator=(__optional_move_assign_base&& __opt)
|
||||
noexcept(is_nothrow_move_assignable_v<value_type> &&
|
||||
is_nothrow_move_constructible_v<value_type>)
|
||||
{
|
||||
@ -501,11 +577,11 @@ using __optional_sfinae_assign_base_t = __sfinae_assign_base<
|
||||
|
||||
template <class _Tp>
|
||||
class optional
|
||||
: private __optional_storage<_Tp>
|
||||
: private __optional_move_assign_base<_Tp>
|
||||
, private __optional_sfinae_ctor_base_t<_Tp>
|
||||
, private __optional_sfinae_assign_base_t<_Tp>
|
||||
{
|
||||
using __base = __optional_storage<_Tp>;
|
||||
using __base = __optional_move_assign_base<_Tp>;
|
||||
public:
|
||||
using value_type = _Tp;
|
||||
|
||||
|
@ -6142,7 +6142,7 @@ regex_iterator<_BidirectionalIterator, _CharT, _Traits>::operator++()
|
||||
{
|
||||
__flags_ |= regex_constants::__no_update_pos;
|
||||
_BidirectionalIterator __start = __match_[0].second;
|
||||
if (__match_.empty())
|
||||
if (__match_[0].first == __match_[0].second)
|
||||
{
|
||||
if (__start == __end_)
|
||||
{
|
||||
|
@ -676,11 +676,11 @@ private:
|
||||
};
|
||||
|
||||
#if _LIBCPP_BIG_ENDIAN
|
||||
enum {__short_mask = 0x01};
|
||||
enum {__long_mask = 0x1ul};
|
||||
static const size_type __short_mask = 0x01;
|
||||
static const size_type __long_mask = 0x1ul;
|
||||
#else // _LIBCPP_BIG_ENDIAN
|
||||
enum {__short_mask = 0x80};
|
||||
enum {__long_mask = ~(size_type(~0) >> 1)};
|
||||
static const size_type __short_mask = 0x80;
|
||||
static const size_type __long_mask = ~(size_type(~0) >> 1);
|
||||
#endif // _LIBCPP_BIG_ENDIAN
|
||||
|
||||
enum {__min_cap = (sizeof(__long) - 1)/sizeof(value_type) > 2 ?
|
||||
@ -706,11 +706,11 @@ private:
|
||||
};
|
||||
|
||||
#if _LIBCPP_BIG_ENDIAN
|
||||
enum {__short_mask = 0x80};
|
||||
enum {__long_mask = ~(size_type(~0) >> 1)};
|
||||
static const size_type __short_mask = 0x80;
|
||||
static const size_type __long_mask = ~(size_type(~0) >> 1);
|
||||
#else // _LIBCPP_BIG_ENDIAN
|
||||
enum {__short_mask = 0x01};
|
||||
enum {__long_mask = 0x1ul};
|
||||
static const size_type __short_mask = 0x01;
|
||||
static const size_type __long_mask = 0x1ul;
|
||||
#endif // _LIBCPP_BIG_ENDIAN
|
||||
|
||||
enum {__min_cap = (sizeof(__long) - 1)/sizeof(value_type) > 2 ?
|
||||
|
@ -0,0 +1,173 @@
|
||||
//===----------------------------------------------------------------------===////
|
||||
//
|
||||
// The LLVM Compiler Infrastructure
|
||||
//
|
||||
// This file is dual licensed under the MIT and the University of Illinois Open
|
||||
// Source Licenses. See LICENSE.TXT for details.
|
||||
//
|
||||
//===----------------------------------------------------------------------===////
|
||||
|
||||
#ifndef FILESYSTEM_TIME_HELPER_H
|
||||
#define FILESYSTEM_TIME_HELPER_H
|
||||
|
||||
#include "experimental/__config"
|
||||
#include "chrono"
|
||||
#include "cstdlib"
|
||||
#include "climits"
|
||||
|
||||
#include <unistd.h>
|
||||
#include <sys/stat.h>
|
||||
#if !defined(UTIME_OMIT)
|
||||
#include <sys/time.h> // for ::utimes as used in __last_write_time
|
||||
#endif
|
||||
|
||||
_LIBCPP_BEGIN_NAMESPACE_EXPERIMENTAL_FILESYSTEM
|
||||
|
||||
namespace time_detail { namespace {
|
||||
|
||||
using namespace chrono;
|
||||
|
||||
template <class FileTimeT,
|
||||
bool IsFloat = is_floating_point<typename FileTimeT::rep>::value>
|
||||
struct fs_time_util_base {
|
||||
static constexpr auto max_seconds =
|
||||
duration_cast<seconds>(FileTimeT::duration::max()).count();
|
||||
|
||||
static constexpr auto max_nsec =
|
||||
duration_cast<nanoseconds>(FileTimeT::duration::max() -
|
||||
seconds(max_seconds))
|
||||
.count();
|
||||
|
||||
static constexpr auto min_seconds =
|
||||
duration_cast<seconds>(FileTimeT::duration::min()).count();
|
||||
|
||||
static constexpr auto min_nsec_timespec =
|
||||
duration_cast<nanoseconds>(
|
||||
(FileTimeT::duration::min() - seconds(min_seconds)) + seconds(1))
|
||||
.count();
|
||||
|
||||
// Static assert that these values properly round trip.
|
||||
static_assert((seconds(min_seconds) +
|
||||
duration_cast<microseconds>(nanoseconds(min_nsec_timespec))) -
|
||||
duration_cast<microseconds>(seconds(1)) ==
|
||||
FileTimeT::duration::min(),
|
||||
"");
|
||||
};
|
||||
|
||||
template <class FileTimeT>
|
||||
struct fs_time_util_base<FileTimeT, true> {
|
||||
static const long long max_seconds;
|
||||
static const long long max_nsec;
|
||||
static const long long min_seconds;
|
||||
static const long long min_nsec_timespec;
|
||||
};
|
||||
|
||||
template <class FileTimeT>
|
||||
const long long fs_time_util_base<FileTimeT, true>::max_seconds =
|
||||
duration_cast<seconds>(FileTimeT::duration::max()).count();
|
||||
|
||||
template <class FileTimeT>
|
||||
const long long fs_time_util_base<FileTimeT, true>::max_nsec =
|
||||
duration_cast<nanoseconds>(FileTimeT::duration::max() -
|
||||
seconds(max_seconds))
|
||||
.count();
|
||||
|
||||
template <class FileTimeT>
|
||||
const long long fs_time_util_base<FileTimeT, true>::min_seconds =
|
||||
duration_cast<seconds>(FileTimeT::duration::min()).count();
|
||||
|
||||
template <class FileTimeT>
|
||||
const long long fs_time_util_base<FileTimeT, true>::min_nsec_timespec =
|
||||
duration_cast<nanoseconds>((FileTimeT::duration::min() -
|
||||
seconds(min_seconds)) +
|
||||
seconds(1))
|
||||
.count();
|
||||
|
||||
template <class FileTimeT, class TimeT, class TimeSpecT>
|
||||
struct fs_time_util : fs_time_util_base<FileTimeT> {
|
||||
using Base = fs_time_util_base<FileTimeT>;
|
||||
using Base::max_nsec;
|
||||
using Base::max_seconds;
|
||||
using Base::min_nsec_timespec;
|
||||
using Base::min_seconds;
|
||||
|
||||
public:
|
||||
template <class CType, class ChronoType>
|
||||
static bool checked_set(CType* out, ChronoType time) {
|
||||
using Lim = numeric_limits<CType>;
|
||||
if (time > Lim::max() || time < Lim::min())
|
||||
return false;
|
||||
*out = static_cast<CType>(time);
|
||||
return true;
|
||||
}
|
||||
|
||||
static _LIBCPP_CONSTEXPR_AFTER_CXX11 bool is_representable(TimeSpecT tm) {
|
||||
if (tm.tv_sec >= 0) {
|
||||
return (tm.tv_sec < max_seconds) ||
|
||||
(tm.tv_sec == max_seconds && tm.tv_nsec <= max_nsec);
|
||||
} else if (tm.tv_sec == (min_seconds - 1)) {
|
||||
return tm.tv_nsec >= min_nsec_timespec;
|
||||
} else {
|
||||
return (tm.tv_sec >= min_seconds);
|
||||
}
|
||||
}
|
||||
|
||||
static _LIBCPP_CONSTEXPR_AFTER_CXX11 bool is_representable(FileTimeT tm) {
|
||||
auto secs = duration_cast<seconds>(tm.time_since_epoch());
|
||||
auto nsecs = duration_cast<nanoseconds>(tm.time_since_epoch() - secs);
|
||||
if (nsecs.count() < 0) {
|
||||
secs = secs + seconds(1);
|
||||
nsecs = nsecs + seconds(1);
|
||||
}
|
||||
using TLim = numeric_limits<TimeT>;
|
||||
if (secs.count() >= 0)
|
||||
return secs.count() <= TLim::max();
|
||||
return secs.count() >= TLim::min();
|
||||
}
|
||||
|
||||
static _LIBCPP_CONSTEXPR_AFTER_CXX11 FileTimeT
|
||||
convert_timespec(TimeSpecT tm) {
|
||||
auto adj_msec = duration_cast<microseconds>(nanoseconds(tm.tv_nsec));
|
||||
if (tm.tv_sec >= 0) {
|
||||
auto Dur = seconds(tm.tv_sec) + microseconds(adj_msec);
|
||||
return FileTimeT(Dur);
|
||||
} else if (duration_cast<microseconds>(nanoseconds(tm.tv_nsec)).count() ==
|
||||
0) {
|
||||
return FileTimeT(seconds(tm.tv_sec));
|
||||
} else { // tm.tv_sec < 0
|
||||
auto adj_subsec =
|
||||
duration_cast<microseconds>(seconds(1) - nanoseconds(tm.tv_nsec));
|
||||
auto Dur = seconds(tm.tv_sec + 1) - adj_subsec;
|
||||
return FileTimeT(Dur);
|
||||
}
|
||||
}
|
||||
|
||||
template <class SubSecDurT, class SubSecT>
|
||||
static bool set_times_checked(TimeT* sec_out, SubSecT* subsec_out,
|
||||
FileTimeT tp) {
|
||||
using namespace chrono;
|
||||
auto dur = tp.time_since_epoch();
|
||||
auto sec_dur = duration_cast<seconds>(dur);
|
||||
auto subsec_dur = duration_cast<SubSecDurT>(dur - sec_dur);
|
||||
// The tv_nsec and tv_usec fields must not be negative so adjust accordingly
|
||||
if (subsec_dur.count() < 0) {
|
||||
if (sec_dur.count() > min_seconds) {
|
||||
sec_dur -= seconds(1);
|
||||
subsec_dur += seconds(1);
|
||||
} else {
|
||||
subsec_dur = SubSecDurT::zero();
|
||||
}
|
||||
}
|
||||
return checked_set(sec_out, sec_dur.count()) &&
|
||||
checked_set(subsec_out, subsec_dur.count());
|
||||
}
|
||||
};
|
||||
|
||||
} // end namespace
|
||||
} // end namespace time_detail
|
||||
|
||||
using time_detail::fs_time_util;
|
||||
|
||||
_LIBCPP_END_NAMESPACE_EXPERIMENTAL_FILESYSTEM
|
||||
|
||||
#endif // FILESYSTEM_TIME_HELPER_H
|
@ -15,6 +15,8 @@
|
||||
#include "cstdlib"
|
||||
#include "climits"
|
||||
|
||||
#include "filesystem_time_helper.h"
|
||||
|
||||
#include <unistd.h>
|
||||
#include <sys/stat.h>
|
||||
#include <sys/statvfs.h>
|
||||
@ -426,17 +428,20 @@ void __current_path(const path& p, std::error_code *ec) {
|
||||
|
||||
bool __equivalent(const path& p1, const path& p2, std::error_code *ec)
|
||||
{
|
||||
auto make_unsupported_error = [&]() {
|
||||
set_or_throw(make_error_code(errc::not_supported), ec,
|
||||
"equivalent", p1, p2);
|
||||
return false;
|
||||
};
|
||||
std::error_code ec1, ec2;
|
||||
struct ::stat st1 = {};
|
||||
struct ::stat st2 = {};
|
||||
auto s1 = detail::posix_stat(p1.native(), st1, &ec1);
|
||||
if (!exists(s1))
|
||||
return make_unsupported_error();
|
||||
auto s2 = detail::posix_stat(p2.native(), st2, &ec2);
|
||||
|
||||
if ((!exists(s1) && !exists(s2)) || (is_other(s1) && is_other(s2))) {
|
||||
set_or_throw(make_error_code(errc::not_supported), ec,
|
||||
"equivalent", p1, p2);
|
||||
return false;
|
||||
}
|
||||
if (!exists(s2))
|
||||
return make_unsupported_error();
|
||||
if (ec) ec->clear();
|
||||
return (st1.st_dev == st2.st_dev && st1.st_ino == st2.st_ino);
|
||||
}
|
||||
@ -502,17 +507,6 @@ bool __fs_is_empty(const path& p, std::error_code *ec)
|
||||
|
||||
namespace detail { namespace {
|
||||
|
||||
using namespace std::chrono;
|
||||
|
||||
template <class CType, class ChronoType>
|
||||
bool checked_set(CType* out, ChronoType time) {
|
||||
using Lim = numeric_limits<CType>;
|
||||
if (time > Lim::max() || time < Lim::min())
|
||||
return false;
|
||||
*out = static_cast<CType>(time);
|
||||
return true;
|
||||
}
|
||||
|
||||
using TimeSpec = struct timespec;
|
||||
using StatT = struct stat;
|
||||
|
||||
@ -525,137 +519,10 @@ __attribute__((unused)) // Suppress warning
|
||||
TimeSpec extract_atime(StatT const& st) { return st.st_atim; }
|
||||
#endif
|
||||
|
||||
constexpr auto max_seconds = duration_cast<seconds>(
|
||||
file_time_type::duration::max()).count();
|
||||
|
||||
constexpr auto max_nsec = duration_cast<nanoseconds>(
|
||||
file_time_type::duration::max() - seconds(max_seconds)).count();
|
||||
|
||||
constexpr auto min_seconds = duration_cast<seconds>(
|
||||
file_time_type::duration::min()).count();
|
||||
|
||||
constexpr auto min_nsec_timespec = duration_cast<nanoseconds>(
|
||||
(file_time_type::duration::min() - seconds(min_seconds)) + seconds(1)).count();
|
||||
|
||||
// Static assert that these values properly round trip.
|
||||
static_assert((seconds(min_seconds) + duration_cast<microseconds>(nanoseconds(min_nsec_timespec)))
|
||||
- duration_cast<microseconds>(seconds(1))
|
||||
== file_time_type::duration::min(), "");
|
||||
|
||||
constexpr auto max_time_t = numeric_limits<time_t>::max();
|
||||
constexpr auto min_time_t = numeric_limits<time_t>::min();
|
||||
|
||||
#if !defined(__LP64__) && defined(__clang__)
|
||||
#pragma clang diagnostic push
|
||||
#pragma clang diagnostic ignored "-Wtautological-constant-out-of-range-compare"
|
||||
#endif
|
||||
|
||||
_LIBCPP_CONSTEXPR_AFTER_CXX11
|
||||
bool is_representable(TimeSpec const& tm) {
|
||||
if (tm.tv_sec >= 0) {
|
||||
return (tm.tv_sec < max_seconds) ||
|
||||
(tm.tv_sec == max_seconds && tm.tv_nsec <= max_nsec);
|
||||
} else if (tm.tv_sec == (min_seconds - 1)) {
|
||||
return tm.tv_nsec >= min_nsec_timespec;
|
||||
} else {
|
||||
return (tm.tv_sec >= min_seconds);
|
||||
}
|
||||
}
|
||||
#ifndef _LIBCPP_HAS_NO_CXX14_CONSTEXPR
|
||||
#if defined(__LP64__)
|
||||
static_assert(is_representable({max_seconds, max_nsec}), "");
|
||||
static_assert(!is_representable({max_seconds + 1, 0}), "");
|
||||
static_assert(!is_representable({max_seconds, max_nsec + 1}), "");
|
||||
static_assert(!is_representable({max_time_t, 0}), "");
|
||||
static_assert(is_representable({min_seconds, 0}), "");
|
||||
static_assert(is_representable({min_seconds - 1, min_nsec_timespec}), "");
|
||||
static_assert(is_representable({min_seconds - 1, min_nsec_timespec + 1}), "");
|
||||
static_assert(!is_representable({min_seconds - 1, min_nsec_timespec - 1}), "");
|
||||
static_assert(!is_representable({min_time_t, 999999999}), "");
|
||||
#else
|
||||
static_assert(is_representable({max_time_t, 999999999}), "");
|
||||
static_assert(is_representable({max_time_t, 1000000000}), "");
|
||||
static_assert(is_representable({min_time_t, 0}), "");
|
||||
#endif
|
||||
#endif
|
||||
|
||||
_LIBCPP_CONSTEXPR_AFTER_CXX11
|
||||
bool is_representable(file_time_type const& tm) {
|
||||
auto secs = duration_cast<seconds>(tm.time_since_epoch());
|
||||
auto nsecs = duration_cast<nanoseconds>(tm.time_since_epoch() - secs);
|
||||
if (nsecs.count() < 0) {
|
||||
secs = secs + seconds(1);
|
||||
nsecs = nsecs + seconds(1);
|
||||
}
|
||||
using TLim = numeric_limits<time_t>;
|
||||
if (secs.count() >= 0)
|
||||
return secs.count() <= TLim::max();
|
||||
return secs.count() >= TLim::min();
|
||||
}
|
||||
#ifndef _LIBCPP_HAS_NO_CXX14_CONSTEXPR
|
||||
#if defined(__LP64__)
|
||||
static_assert(is_representable(file_time_type::max()), "");
|
||||
static_assert(is_representable(file_time_type::min()), "");
|
||||
#else
|
||||
static_assert(!is_representable(file_time_type::max()), "");
|
||||
static_assert(!is_representable(file_time_type::min()), "");
|
||||
static_assert(is_representable(file_time_type(seconds(max_time_t))), "");
|
||||
static_assert(is_representable(file_time_type(seconds(min_time_t))), "");
|
||||
#endif
|
||||
#endif
|
||||
|
||||
_LIBCPP_CONSTEXPR_AFTER_CXX11
|
||||
file_time_type convert_timespec(TimeSpec const& tm) {
|
||||
auto adj_msec = duration_cast<microseconds>(nanoseconds(tm.tv_nsec));
|
||||
if (tm.tv_sec >= 0) {
|
||||
auto Dur = seconds(tm.tv_sec) + microseconds(adj_msec);
|
||||
return file_time_type(Dur);
|
||||
} else if (duration_cast<microseconds>(nanoseconds(tm.tv_nsec)).count() == 0) {
|
||||
return file_time_type(seconds(tm.tv_sec));
|
||||
} else { // tm.tv_sec < 0
|
||||
auto adj_subsec = duration_cast<microseconds>(seconds(1) - nanoseconds(tm.tv_nsec));
|
||||
auto Dur = seconds(tm.tv_sec + 1) - adj_subsec;
|
||||
return file_time_type(Dur);
|
||||
}
|
||||
}
|
||||
#ifndef _LIBCPP_HAS_NO_CXX14_CONSTEXPR
|
||||
#if defined(__LP64__)
|
||||
static_assert(convert_timespec({max_seconds, max_nsec}) == file_time_type::max(), "");
|
||||
static_assert(convert_timespec({max_seconds, max_nsec - 1}) < file_time_type::max(), "");
|
||||
static_assert(convert_timespec({max_seconds - 1, 999999999}) < file_time_type::max(), "");
|
||||
static_assert(convert_timespec({min_seconds - 1, min_nsec_timespec}) == file_time_type::min(), "");
|
||||
static_assert(convert_timespec({min_seconds - 1, min_nsec_timespec + 1}) > file_time_type::min(), "");
|
||||
static_assert(convert_timespec({min_seconds , 0}) > file_time_type::min(), "");
|
||||
#else
|
||||
// FIXME add tests for 32 bit builds
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#if !defined(__LP64__) && defined(__clang__)
|
||||
#pragma clang diagnostic pop
|
||||
#endif
|
||||
|
||||
template <class SubSecDurT, class SubSecT>
|
||||
bool set_times_checked(time_t* sec_out, SubSecT* subsec_out, file_time_type tp) {
|
||||
using namespace chrono;
|
||||
auto dur = tp.time_since_epoch();
|
||||
auto sec_dur = duration_cast<seconds>(dur);
|
||||
auto subsec_dur = duration_cast<SubSecDurT>(dur - sec_dur);
|
||||
// The tv_nsec and tv_usec fields must not be negative so adjust accordingly
|
||||
if (subsec_dur.count() < 0) {
|
||||
if (sec_dur.count() > min_seconds) {
|
||||
sec_dur -= seconds(1);
|
||||
subsec_dur += seconds(1);
|
||||
} else {
|
||||
subsec_dur = SubSecDurT::zero();
|
||||
}
|
||||
}
|
||||
return checked_set(sec_out, sec_dur.count())
|
||||
&& checked_set(subsec_out, subsec_dur.count());
|
||||
}
|
||||
|
||||
}} // end namespace detail
|
||||
|
||||
using FSTime = fs_time_util<file_time_type, time_t, struct timespec>;
|
||||
|
||||
file_time_type __last_write_time(const path& p, std::error_code *ec)
|
||||
{
|
||||
using namespace ::std::chrono;
|
||||
@ -668,12 +535,12 @@ file_time_type __last_write_time(const path& p, std::error_code *ec)
|
||||
}
|
||||
if (ec) ec->clear();
|
||||
auto ts = detail::extract_mtime(st);
|
||||
if (!detail::is_representable(ts)) {
|
||||
if (!FSTime::is_representable(ts)) {
|
||||
set_or_throw(error_code(EOVERFLOW, generic_category()), ec,
|
||||
"last_write_time", p);
|
||||
return file_time_type::min();
|
||||
}
|
||||
return detail::convert_timespec(ts);
|
||||
return FSTime::convert_timespec(ts);
|
||||
}
|
||||
|
||||
void __last_write_time(const path& p, file_time_type new_time,
|
||||
@ -698,7 +565,7 @@ void __last_write_time(const path& p, file_time_type new_time,
|
||||
struct ::timeval tbuf[2];
|
||||
tbuf[0].tv_sec = atime.tv_sec;
|
||||
tbuf[0].tv_usec = duration_cast<microseconds>(nanoseconds(atime.tv_nsec)).count();
|
||||
const bool overflowed = !detail::set_times_checked<microseconds>(
|
||||
const bool overflowed = !FSTime::set_times_checked<microseconds>(
|
||||
&tbuf[1].tv_sec, &tbuf[1].tv_usec, new_time);
|
||||
|
||||
if (overflowed) {
|
||||
@ -714,7 +581,7 @@ void __last_write_time(const path& p, file_time_type new_time,
|
||||
tbuf[0].tv_sec = 0;
|
||||
tbuf[0].tv_nsec = UTIME_OMIT;
|
||||
|
||||
const bool overflowed = !detail::set_times_checked<nanoseconds>(
|
||||
const bool overflowed = !FSTime::set_times_checked<nanoseconds>(
|
||||
&tbuf[1].tv_sec, &tbuf[1].tv_nsec, new_time);
|
||||
if (overflowed) {
|
||||
set_or_throw(make_error_code(errc::invalid_argument),
|
||||
|
@ -7,8 +7,8 @@
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#ifndef _LIBCPP___REFSTRING
|
||||
#define _LIBCPP___REFSTRING
|
||||
#ifndef _LIBCPP_REFSTRING_H
|
||||
#define _LIBCPP_REFSTRING_H
|
||||
|
||||
#include <__config>
|
||||
#include <stdexcept>
|
||||
@ -124,4 +124,4 @@ bool __libcpp_refstring::__uses_refcount() const {
|
||||
|
||||
_LIBCPP_END_NAMESPACE_STD
|
||||
|
||||
#endif //_LIBCPP___REFSTRING
|
||||
#endif //_LIBCPP_REFSTRING_H
|
@ -11,7 +11,7 @@
|
||||
#include "new"
|
||||
#include "string"
|
||||
#include "system_error"
|
||||
#include "__refstring"
|
||||
#include "include/refstring.h"
|
||||
|
||||
/* For _LIBCPPABI_VERSION */
|
||||
#if !defined(_LIBCPP_BUILDING_HAS_NO_ABI_LIBRARY) && \
|
||||
|
@ -113,8 +113,9 @@ void LLVMOrcDisposeMangledSymbol(char *MangledSymbol);
|
||||
/**
|
||||
* Create a lazy compile callback.
|
||||
*/
|
||||
LLVMOrcTargetAddress
|
||||
LLVMOrcErrorCode
|
||||
LLVMOrcCreateLazyCompileCallback(LLVMOrcJITStackRef JITStack,
|
||||
LLVMOrcTargetAddress *RetAddr,
|
||||
LLVMOrcLazyCompileCallbackFn Callback,
|
||||
void *CallbackCtx);
|
||||
|
||||
@ -135,8 +136,9 @@ LLVMOrcErrorCode LLVMOrcSetIndirectStubPointer(LLVMOrcJITStackRef JITStack,
|
||||
/**
|
||||
* Add module to be eagerly compiled.
|
||||
*/
|
||||
LLVMOrcModuleHandle
|
||||
LLVMOrcErrorCode
|
||||
LLVMOrcAddEagerlyCompiledIR(LLVMOrcJITStackRef JITStack,
|
||||
LLVMOrcModuleHandle *RetHandle,
|
||||
LLVMSharedModuleRef Mod,
|
||||
LLVMOrcSymbolResolverFn SymbolResolver,
|
||||
void *SymbolResolverCtx);
|
||||
@ -144,8 +146,9 @@ LLVMOrcAddEagerlyCompiledIR(LLVMOrcJITStackRef JITStack,
|
||||
/**
|
||||
* Add module to be lazily compiled one function at a time.
|
||||
*/
|
||||
LLVMOrcModuleHandle
|
||||
LLVMOrcErrorCode
|
||||
LLVMOrcAddLazilyCompiledIR(LLVMOrcJITStackRef JITStack,
|
||||
LLVMOrcModuleHandle *RetHandle,
|
||||
LLVMSharedModuleRef Mod,
|
||||
LLVMOrcSymbolResolverFn SymbolResolver,
|
||||
void *SymbolResolverCtx);
|
||||
@ -153,10 +156,11 @@ LLVMOrcAddLazilyCompiledIR(LLVMOrcJITStackRef JITStack,
|
||||
/**
|
||||
* Add an object file.
|
||||
*/
|
||||
LLVMOrcModuleHandle LLVMOrcAddObjectFile(LLVMOrcJITStackRef JITStack,
|
||||
LLVMSharedObjectBufferRef Obj,
|
||||
LLVMOrcSymbolResolverFn SymbolResolver,
|
||||
void *SymbolResolverCtx);
|
||||
LLVMOrcErrorCode LLVMOrcAddObjectFile(LLVMOrcJITStackRef JITStack,
|
||||
LLVMOrcModuleHandle *RetHandle,
|
||||
LLVMSharedObjectBufferRef Obj,
|
||||
LLVMOrcSymbolResolverFn SymbolResolver,
|
||||
void *SymbolResolverCtx);
|
||||
|
||||
/**
|
||||
* Remove a module set from the JIT.
|
||||
@ -164,18 +168,20 @@ LLVMOrcModuleHandle LLVMOrcAddObjectFile(LLVMOrcJITStackRef JITStack,
|
||||
* This works for all modules that can be added via OrcAdd*, including object
|
||||
* files.
|
||||
*/
|
||||
void LLVMOrcRemoveModule(LLVMOrcJITStackRef JITStack, LLVMOrcModuleHandle H);
|
||||
LLVMOrcErrorCode LLVMOrcRemoveModule(LLVMOrcJITStackRef JITStack,
|
||||
LLVMOrcModuleHandle H);
|
||||
|
||||
/**
|
||||
* Get symbol address from JIT instance.
|
||||
*/
|
||||
LLVMOrcTargetAddress LLVMOrcGetSymbolAddress(LLVMOrcJITStackRef JITStack,
|
||||
const char *SymbolName);
|
||||
LLVMOrcErrorCode LLVMOrcGetSymbolAddress(LLVMOrcJITStackRef JITStack,
|
||||
LLVMOrcTargetAddress *RetAddr,
|
||||
const char *SymbolName);
|
||||
|
||||
/**
|
||||
* Dispose of an ORC JIT stack.
|
||||
*/
|
||||
void LLVMOrcDisposeInstance(LLVMOrcJITStackRef JITStack);
|
||||
LLVMOrcErrorCode LLVMOrcDisposeInstance(LLVMOrcJITStackRef JITStack);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
|
@ -401,7 +401,11 @@ class LLVM_NODISCARD APInt {
|
||||
/// \brief Determine if this is a value of 1.
|
||||
///
|
||||
/// This checks to see if the value of this APInt is one.
|
||||
bool isOneValue() const { return getActiveBits() == 1; }
|
||||
bool isOneValue() const {
|
||||
if (isSingleWord())
|
||||
return U.VAL == 1;
|
||||
return countLeadingZerosSlowCase() == BitWidth - 1;
|
||||
}
|
||||
|
||||
/// \brief Determine if this is the largest unsigned value.
|
||||
///
|
||||
|
@ -100,6 +100,8 @@ class function_ref<Ret(Params...)> {
|
||||
}
|
||||
|
||||
public:
|
||||
function_ref() : callback(nullptr) {}
|
||||
|
||||
template <typename Callable>
|
||||
function_ref(Callable &&callable,
|
||||
typename std::enable_if<
|
||||
@ -110,6 +112,8 @@ class function_ref<Ret(Params...)> {
|
||||
Ret operator()(Params ...params) const {
|
||||
return callback(callable, std::forward<Params>(params)...);
|
||||
}
|
||||
|
||||
operator bool() const { return callback; }
|
||||
};
|
||||
|
||||
// deleter - Very very very simple method that is used to invoke operator
|
||||
|
@ -15,9 +15,9 @@
|
||||
#ifndef LLVM_ADT_SMALLPTRSET_H
|
||||
#define LLVM_ADT_SMALLPTRSET_H
|
||||
|
||||
#include "llvm/Config/abi-breaking.h"
|
||||
#include "llvm/Support/Compiler.h"
|
||||
#include "llvm/Support/PointerLikeTypeTraits.h"
|
||||
#include "llvm/Support/ReverseIteration.h"
|
||||
#include "llvm/Support/type_traits.h"
|
||||
#include <cassert>
|
||||
#include <cstddef>
|
||||
@ -29,15 +29,6 @@
|
||||
|
||||
namespace llvm {
|
||||
|
||||
#if LLVM_ENABLE_ABI_BREAKING_CHECKS
|
||||
template <class T = void> struct ReverseIterate { static bool value; };
|
||||
#if LLVM_ENABLE_REVERSE_ITERATION
|
||||
template <class T> bool ReverseIterate<T>::value = true;
|
||||
#else
|
||||
template <class T> bool ReverseIterate<T>::value = false;
|
||||
#endif
|
||||
#endif
|
||||
|
||||
/// SmallPtrSetImplBase - This is the common code shared among all the
|
||||
/// SmallPtrSet<>'s, which is almost everything. SmallPtrSet has two modes, one
|
||||
/// for small and one for large sets.
|
||||
|
@ -1353,4 +1353,4 @@ struct BFIDOTGraphTraitsBase : public DefaultDOTGraphTraits {
|
||||
|
||||
#undef DEBUG_TYPE
|
||||
|
||||
#endif
|
||||
#endif // LLVM_ANALYSIS_BLOCKFREQUENCYINFOIMPL_H
|
||||
|
@ -577,12 +577,17 @@ class CGSCCToFunctionPassAdaptor
|
||||
// analyses will eventually occur when the module pass completes.
|
||||
PA.intersect(std::move(PassPA));
|
||||
|
||||
// Update the call graph based on this function pass. This may also
|
||||
// update the current SCC to point to a smaller, more refined SCC.
|
||||
CurrentC = &updateCGAndAnalysisManagerForFunctionPass(
|
||||
CG, *CurrentC, *N, AM, UR, DebugLogging);
|
||||
assert(CG.lookupSCC(*N) == CurrentC &&
|
||||
"Current SCC not updated to the SCC containing the current node!");
|
||||
// If the call graph hasn't been preserved, update it based on this
|
||||
// function pass. This may also update the current SCC to point to
|
||||
// a smaller, more refined SCC.
|
||||
auto PAC = PA.getChecker<LazyCallGraphAnalysis>();
|
||||
if (!PAC.preserved() && !PAC.preservedSet<AllAnalysesOn<Module>>()) {
|
||||
CurrentC = &updateCGAndAnalysisManagerForFunctionPass(
|
||||
CG, *CurrentC, *N, AM, UR, DebugLogging);
|
||||
assert(
|
||||
CG.lookupSCC(*N) == CurrentC &&
|
||||
"Current SCC not updated to the SCC containing the current node!");
|
||||
}
|
||||
}
|
||||
|
||||
// By definition we preserve the proxy. And we preserve all analyses on
|
||||
|
@ -160,7 +160,7 @@ InlineParams getInlineParams(int Threshold);
|
||||
/// the -Oz flag.
|
||||
InlineParams getInlineParams(unsigned OptLevel, unsigned SizeOptLevel);
|
||||
|
||||
/// Return the cost associated with a callsite, including paramater passing
|
||||
/// Return the cost associated with a callsite, including parameter passing
|
||||
/// and the call/return instruction.
|
||||
int getCallsiteCost(CallSite CS, const DataLayout &DL);
|
||||
|
||||
|
@ -652,17 +652,23 @@ class LazyCallGraph {
|
||||
/// Make an existing internal ref edge into a call edge.
|
||||
///
|
||||
/// This may form a larger cycle and thus collapse SCCs into TargetN's SCC.
|
||||
/// If that happens, the deleted SCC pointers are returned. These SCCs are
|
||||
/// not in a valid state any longer but the pointers will remain valid
|
||||
/// until destruction of the parent graph instance for the purpose of
|
||||
/// clearing cached information.
|
||||
/// If that happens, the optional callback \p MergedCB will be invoked (if
|
||||
/// provided) on the SCCs being merged away prior to actually performing
|
||||
/// the merge. Note that this will never include the target SCC as that
|
||||
/// will be the SCC functions are merged into to resolve the cycle. Once
|
||||
/// this function returns, these merged SCCs are not in a valid state but
|
||||
/// the pointers will remain valid until destruction of the parent graph
|
||||
/// instance for the purpose of clearing cached information. This function
|
||||
/// also returns 'true' if a cycle was formed and some SCCs merged away as
|
||||
/// a convenience.
|
||||
///
|
||||
/// After this operation, both SourceN's SCC and TargetN's SCC may move
|
||||
/// position within this RefSCC's postorder list. Any SCCs merged are
|
||||
/// merged into the TargetN's SCC in order to preserve reachability analyses
|
||||
/// which took place on that SCC.
|
||||
SmallVector<SCC *, 1> switchInternalEdgeToCall(Node &SourceN,
|
||||
Node &TargetN);
|
||||
bool switchInternalEdgeToCall(
|
||||
Node &SourceN, Node &TargetN,
|
||||
function_ref<void(ArrayRef<SCC *> MergedSCCs)> MergeCB = {});
|
||||
|
||||
/// Make an existing internal call edge between separate SCCs into a ref
|
||||
/// edge.
|
||||
|
@ -224,6 +224,9 @@ class ObjectSizeOffsetVisitor
|
||||
SizeOffsetType visitSelectInst(SelectInst &I);
|
||||
SizeOffsetType visitUndefValue(UndefValue&);
|
||||
SizeOffsetType visitInstruction(Instruction &I);
|
||||
|
||||
private:
|
||||
bool CheckedZextOrTrunc(APInt &I);
|
||||
};
|
||||
|
||||
typedef std::pair<Value*, Value*> SizeOffsetEvalType;
|
||||
|
@ -34,10 +34,10 @@
|
||||
#include <type_traits>
|
||||
#include <vector>
|
||||
|
||||
namespace llvm {
|
||||
|
||||
#define DEBUG_TYPE "region"
|
||||
|
||||
namespace llvm {
|
||||
|
||||
//===----------------------------------------------------------------------===//
|
||||
/// RegionBase Implementation
|
||||
template <class Tr>
|
||||
@ -901,8 +901,8 @@ void RegionInfoBase<Tr>::calculate(FuncT &F) {
|
||||
buildRegionsTree(DT->getNode(BB), TopLevelRegion);
|
||||
}
|
||||
|
||||
#undef DEBUG_TYPE
|
||||
|
||||
} // end namespace llvm
|
||||
|
||||
#undef DEBUG_TYPE
|
||||
|
||||
#endif // LLVM_ANALYSIS_REGIONINFOIMPL_H
|
||||
|
@ -753,6 +753,28 @@ class TargetTransformInfo {
|
||||
Value *getOrCreateResultFromMemIntrinsic(IntrinsicInst *Inst,
|
||||
Type *ExpectedType) const;
|
||||
|
||||
/// \returns The type to use in a loop expansion of a memcpy call.
|
||||
Type *getMemcpyLoopLoweringType(LLVMContext &Context, Value *Length,
|
||||
unsigned SrcAlign, unsigned DestAlign) const;
|
||||
|
||||
/// \param[out] OpsOut The operand types to copy RemainingBytes of memory.
|
||||
/// \param RemainingBytes The number of bytes to copy.
|
||||
///
|
||||
/// Calculates the operand types to use when copying \p RemainingBytes of
|
||||
/// memory, where source and destination alignments are \p SrcAlign and
|
||||
/// \p DestAlign respectively.
|
||||
void getMemcpyLoopResidualLoweringType(SmallVectorImpl<Type *> &OpsOut,
|
||||
LLVMContext &Context,
|
||||
unsigned RemainingBytes,
|
||||
unsigned SrcAlign,
|
||||
unsigned DestAlign) const;
|
||||
|
||||
/// \returns True if we want to test the new memcpy lowering functionality in
|
||||
/// Transform/Utils.
|
||||
/// Temporary. Will be removed once we move to the new functionality and
|
||||
/// remove the old.
|
||||
bool useWideIRMemcpyLoopLowering() const;
|
||||
|
||||
/// \returns True if the two functions have compatible attributes for inlining
|
||||
/// purposes.
|
||||
bool areInlineCompatible(const Function *Caller,
|
||||
@ -953,6 +975,12 @@ class TargetTransformInfo::Concept {
|
||||
virtual unsigned getAtomicMemIntrinsicMaxElementSize() const = 0;
|
||||
virtual Value *getOrCreateResultFromMemIntrinsic(IntrinsicInst *Inst,
|
||||
Type *ExpectedType) = 0;
|
||||
virtual Type *getMemcpyLoopLoweringType(LLVMContext &Context, Value *Length,
|
||||
unsigned SrcAlign,
|
||||
unsigned DestAlign) const = 0;
|
||||
virtual void getMemcpyLoopResidualLoweringType(
|
||||
SmallVectorImpl<Type *> &OpsOut, LLVMContext &Context,
|
||||
unsigned RemainingBytes, unsigned SrcAlign, unsigned DestAlign) const = 0;
|
||||
virtual bool areInlineCompatible(const Function *Caller,
|
||||
const Function *Callee) const = 0;
|
||||
virtual unsigned getLoadStoreVecRegBitWidth(unsigned AddrSpace) const = 0;
|
||||
@ -1266,6 +1294,19 @@ class TargetTransformInfo::Model final : public TargetTransformInfo::Concept {
|
||||
Type *ExpectedType) override {
|
||||
return Impl.getOrCreateResultFromMemIntrinsic(Inst, ExpectedType);
|
||||
}
|
||||
Type *getMemcpyLoopLoweringType(LLVMContext &Context, Value *Length,
|
||||
unsigned SrcAlign,
|
||||
unsigned DestAlign) const override {
|
||||
return Impl.getMemcpyLoopLoweringType(Context, Length, SrcAlign, DestAlign);
|
||||
}
|
||||
void getMemcpyLoopResidualLoweringType(SmallVectorImpl<Type *> &OpsOut,
|
||||
LLVMContext &Context,
|
||||
unsigned RemainingBytes,
|
||||
unsigned SrcAlign,
|
||||
unsigned DestAlign) const override {
|
||||
Impl.getMemcpyLoopResidualLoweringType(OpsOut, Context, RemainingBytes,
|
||||
SrcAlign, DestAlign);
|
||||
}
|
||||
bool areInlineCompatible(const Function *Caller,
|
||||
const Function *Callee) const override {
|
||||
return Impl.areInlineCompatible(Caller, Callee);
|
||||
|
@ -444,6 +444,20 @@ class TargetTransformInfoImplBase {
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
Type *getMemcpyLoopLoweringType(LLVMContext &Context, Value *Length,
|
||||
unsigned SrcAlign, unsigned DestAlign) const {
|
||||
return Type::getInt8Ty(Context);
|
||||
}
|
||||
|
||||
void getMemcpyLoopResidualLoweringType(SmallVectorImpl<Type *> &OpsOut,
|
||||
LLVMContext &Context,
|
||||
unsigned RemainingBytes,
|
||||
unsigned SrcAlign,
|
||||
unsigned DestAlign) const {
|
||||
for (unsigned i = 0; i != RemainingBytes; ++i)
|
||||
OpsOut.push_back(Type::getInt8Ty(Context));
|
||||
}
|
||||
|
||||
bool areInlineCompatible(const Function *Caller,
|
||||
const Function *Callee) const {
|
||||
return (Caller->getFnAttribute("target-cpu") ==
|
||||
|
@ -523,8 +523,7 @@ template <typename T> class ArrayRef;
|
||||
/// (A)
|
||||
Optional<bool> isImpliedCondition(const Value *LHS, const Value *RHS,
|
||||
const DataLayout &DL,
|
||||
bool InvertAPred = false,
|
||||
unsigned Depth = 0,
|
||||
bool LHSIsFalse = false, unsigned Depth = 0,
|
||||
AssumptionCache *AC = nullptr,
|
||||
const Instruction *CxtI = nullptr,
|
||||
const DominatorTree *DT = nullptr);
|
||||
|
@ -94,7 +94,7 @@ struct WasmFunction {
|
||||
};
|
||||
|
||||
struct WasmDataSegment {
|
||||
uint32_t Index;
|
||||
uint32_t MemoryIndex;
|
||||
WasmInitExpr Offset;
|
||||
ArrayRef<uint8_t> Content;
|
||||
};
|
||||
@ -107,7 +107,7 @@ struct WasmElemSegment {
|
||||
|
||||
struct WasmRelocation {
|
||||
uint32_t Type; // The type of the relocation.
|
||||
int32_t Index; // Index into function to global index space.
|
||||
uint32_t Index; // Index into function to global index space.
|
||||
uint64_t Offset; // Offset from the start of the section.
|
||||
int64_t Addend; // A value to add to the symbol.
|
||||
};
|
||||
|
@ -59,6 +59,8 @@ enum BlockIDs {
|
||||
FULL_LTO_GLOBALVAL_SUMMARY_BLOCK_ID,
|
||||
|
||||
SYMTAB_BLOCK_ID,
|
||||
|
||||
SYNC_SCOPE_NAMES_BLOCK_ID,
|
||||
};
|
||||
|
||||
/// Identification block contains a string that describes the producer details,
|
||||
@ -172,6 +174,10 @@ enum OperandBundleTagCode {
|
||||
OPERAND_BUNDLE_TAG = 1, // TAG: [strchr x N]
|
||||
};
|
||||
|
||||
enum SyncScopeNameCode {
|
||||
SYNC_SCOPE_NAME = 1,
|
||||
};
|
||||
|
||||
// Value symbol table codes.
|
||||
enum ValueSymtabCodes {
|
||||
VST_CODE_ENTRY = 1, // VST_ENTRY: [valueid, namechar x N]
|
||||
@ -404,12 +410,6 @@ enum AtomicOrderingCodes {
|
||||
ORDERING_SEQCST = 6
|
||||
};
|
||||
|
||||
/// Encoded SynchronizationScope values.
|
||||
enum AtomicSynchScopeCodes {
|
||||
SYNCHSCOPE_SINGLETHREAD = 0,
|
||||
SYNCHSCOPE_CROSSTHREAD = 1
|
||||
};
|
||||
|
||||
/// Markers and flags for call instruction.
|
||||
enum CallMarkersFlags {
|
||||
CALL_TAIL = 0,
|
||||
|
@ -608,8 +608,8 @@ class AsmPrinter : public MachineFunctionPass {
|
||||
// Internal Implementation Details
|
||||
//===------------------------------------------------------------------===//
|
||||
|
||||
/// This emits visibility information about symbol, if this is suported by the
|
||||
/// target.
|
||||
/// This emits visibility information about symbol, if this is supported by
|
||||
/// the target.
|
||||
void EmitVisibility(MCSymbol *Sym, unsigned Visibility,
|
||||
bool IsDefinition = true) const;
|
||||
|
||||
|
@ -428,7 +428,7 @@ class BasicTTIImplBase : public TargetTransformInfoImplCRTPBase<T> {
|
||||
|
||||
std::pair<unsigned, MVT> LT = TLI->getTypeLegalizationCost(DL, Ty);
|
||||
|
||||
bool IsFloat = Ty->getScalarType()->isFloatingPointTy();
|
||||
bool IsFloat = Ty->isFPOrFPVectorTy();
|
||||
// Assume that floating point arithmetic operations cost twice as much as
|
||||
// integer operations.
|
||||
unsigned OpCost = (IsFloat ? 2 : 1);
|
||||
|
@ -16,14 +16,17 @@
|
||||
#ifndef LLVM_CODEGEN_GLOBALISEL_INSTRUCTIONSELECTOR_H
|
||||
#define LLVM_CODEGEN_GLOBALISEL_INSTRUCTIONSELECTOR_H
|
||||
|
||||
#include "llvm/ADT/SmallVector.h"
|
||||
#include <bitset>
|
||||
#include <cstddef>
|
||||
#include <cstdint>
|
||||
#include <functional>
|
||||
#include <initializer_list>
|
||||
#include <vector>
|
||||
|
||||
namespace llvm {
|
||||
|
||||
class LLT;
|
||||
class MachineInstr;
|
||||
class MachineInstrBuilder;
|
||||
class MachineOperand;
|
||||
@ -58,6 +61,131 @@ class PredicateBitsetImpl : public std::bitset<MaxPredicates> {
|
||||
}
|
||||
};
|
||||
|
||||
enum {
|
||||
/// Record the specified instruction
|
||||
/// - NewInsnID - Instruction ID to define
|
||||
/// - InsnID - Instruction ID
|
||||
/// - OpIdx - Operand index
|
||||
GIM_RecordInsn,
|
||||
|
||||
/// Check the feature bits
|
||||
/// - Expected features
|
||||
GIM_CheckFeatures,
|
||||
|
||||
/// Check the opcode on the specified instruction
|
||||
/// - InsnID - Instruction ID
|
||||
/// - Expected opcode
|
||||
GIM_CheckOpcode,
|
||||
/// Check the instruction has the right number of operands
|
||||
/// - InsnID - Instruction ID
|
||||
/// - Expected number of operands
|
||||
GIM_CheckNumOperands,
|
||||
|
||||
/// Check the type for the specified operand
|
||||
/// - InsnID - Instruction ID
|
||||
/// - OpIdx - Operand index
|
||||
/// - Expected type
|
||||
GIM_CheckType,
|
||||
/// Check the register bank for the specified operand
|
||||
/// - InsnID - Instruction ID
|
||||
/// - OpIdx - Operand index
|
||||
/// - Expected register bank (specified as a register class)
|
||||
GIM_CheckRegBankForClass,
|
||||
/// Check the operand matches a complex predicate
|
||||
/// - InsnID - Instruction ID
|
||||
/// - OpIdx - Operand index
|
||||
/// - RendererID - The renderer to hold the result
|
||||
/// - Complex predicate ID
|
||||
GIM_CheckComplexPattern,
|
||||
/// Check the operand is a specific integer
|
||||
/// - InsnID - Instruction ID
|
||||
/// - OpIdx - Operand index
|
||||
/// - Expected integer
|
||||
GIM_CheckConstantInt,
|
||||
/// Check the operand is a specific literal integer (i.e. MO.isImm() or MO.isCImm() is true).
|
||||
/// - InsnID - Instruction ID
|
||||
/// - OpIdx - Operand index
|
||||
/// - Expected integer
|
||||
GIM_CheckLiteralInt,
|
||||
/// Check the operand is a specific intrinsic ID
|
||||
/// - InsnID - Instruction ID
|
||||
/// - OpIdx - Operand index
|
||||
/// - Expected Intrinsic ID
|
||||
GIM_CheckIntrinsicID,
|
||||
/// Check the specified operand is an MBB
|
||||
/// - InsnID - Instruction ID
|
||||
/// - OpIdx - Operand index
|
||||
GIM_CheckIsMBB,
|
||||
|
||||
/// Check if the specified operand is safe to fold into the current
|
||||
/// instruction.
|
||||
/// - InsnID - Instruction ID
|
||||
GIM_CheckIsSafeToFold,
|
||||
|
||||
//=== Renderers ===
|
||||
|
||||
/// Mutate an instruction
|
||||
/// - NewInsnID - Instruction ID to define
|
||||
/// - OldInsnID - Instruction ID to mutate
|
||||
/// - NewOpcode - The new opcode to use
|
||||
GIR_MutateOpcode,
|
||||
/// Build a new instruction
|
||||
/// - InsnID - Instruction ID to define
|
||||
/// - Opcode - The new opcode to use
|
||||
GIR_BuildMI,
|
||||
|
||||
/// Copy an operand to the specified instruction
|
||||
/// - NewInsnID - Instruction ID to modify
|
||||
/// - OldInsnID - Instruction ID to copy from
|
||||
/// - OpIdx - The operand to copy
|
||||
GIR_Copy,
|
||||
/// Copy an operand to the specified instruction
|
||||
/// - NewInsnID - Instruction ID to modify
|
||||
/// - OldInsnID - Instruction ID to copy from
|
||||
/// - OpIdx - The operand to copy
|
||||
/// - SubRegIdx - The subregister to copy
|
||||
GIR_CopySubReg,
|
||||
/// Add an implicit register def to the specified instruction
|
||||
/// - InsnID - Instruction ID to modify
|
||||
/// - RegNum - The register to add
|
||||
GIR_AddImplicitDef,
|
||||
/// Add an implicit register use to the specified instruction
|
||||
/// - InsnID - Instruction ID to modify
|
||||
/// - RegNum - The register to add
|
||||
GIR_AddImplicitUse,
|
||||
/// Add an register to the specified instruction
|
||||
/// - InsnID - Instruction ID to modify
|
||||
/// - RegNum - The register to add
|
||||
GIR_AddRegister,
|
||||
/// Add an immediate to the specified instruction
|
||||
/// - InsnID - Instruction ID to modify
|
||||
/// - Imm - The immediate to add
|
||||
GIR_AddImm,
|
||||
/// Render complex operands to the specified instruction
|
||||
/// - InsnID - Instruction ID to modify
|
||||
/// - RendererID - The renderer to call
|
||||
GIR_ComplexRenderer,
|
||||
|
||||
/// Constrain an instruction operand to a register class.
|
||||
/// - InsnID - Instruction ID to modify
|
||||
/// - OpIdx - Operand index
|
||||
/// - RCEnum - Register class enumeration value
|
||||
GIR_ConstrainOperandRC,
|
||||
/// Constrain an instructions operands according to the instruction
|
||||
/// description.
|
||||
/// - InsnID - Instruction ID to modify
|
||||
GIR_ConstrainSelectedInstOperands,
|
||||
/// Merge all memory operands into instruction.
|
||||
/// - InsnID - Instruction ID to modify
|
||||
GIR_MergeMemOperands,
|
||||
/// Erase from parent.
|
||||
/// - InsnID - Instruction ID to erase
|
||||
GIR_EraseFromParent,
|
||||
|
||||
/// A successful emission
|
||||
GIR_Done,
|
||||
};
|
||||
|
||||
/// Provides the logic to select generic machine instructions.
|
||||
class InstructionSelector {
|
||||
public:
|
||||
@ -78,9 +206,39 @@ class InstructionSelector {
|
||||
|
||||
protected:
|
||||
using ComplexRendererFn = std::function<void(MachineInstrBuilder &)>;
|
||||
using RecordedMIVector = SmallVector<MachineInstr *, 4>;
|
||||
using NewMIVector = SmallVector<MachineInstrBuilder, 4>;
|
||||
|
||||
struct MatcherState {
|
||||
std::vector<ComplexRendererFn> Renderers;
|
||||
RecordedMIVector MIs;
|
||||
|
||||
MatcherState(unsigned MaxRenderers);
|
||||
};
|
||||
|
||||
public:
|
||||
template <class PredicateBitset, class ComplexMatcherMemFn>
|
||||
struct MatcherInfoTy {
|
||||
const LLT *TypeObjects;
|
||||
const PredicateBitset *FeatureBitsets;
|
||||
const std::vector<ComplexMatcherMemFn> ComplexPredicates;
|
||||
};
|
||||
|
||||
protected:
|
||||
InstructionSelector();
|
||||
|
||||
/// Execute a given matcher table and return true if the match was successful
|
||||
/// and false otherwise.
|
||||
template <class TgtInstructionSelector, class PredicateBitset,
|
||||
class ComplexMatcherMemFn>
|
||||
bool executeMatchTable(
|
||||
TgtInstructionSelector &ISel, NewMIVector &OutMIs, MatcherState &State,
|
||||
const MatcherInfoTy<PredicateBitset, ComplexMatcherMemFn> &MatcherInfo,
|
||||
const int64_t *MatchTable, const TargetInstrInfo &TII,
|
||||
MachineRegisterInfo &MRI, const TargetRegisterInfo &TRI,
|
||||
const RegisterBankInfo &RBI,
|
||||
const PredicateBitset &AvailableFeatures) const;
|
||||
|
||||
/// Constrain a register operand of an instruction \p I to a specified
|
||||
/// register class. This could involve inserting COPYs before (for uses) or
|
||||
/// after (for defs) and may replace the operand of \p I.
|
||||
|
@ -0,0 +1,337 @@
|
||||
//==-- llvm/CodeGen/GlobalISel/InstructionSelectorImpl.h ---------*- C++ -*-==//
|
||||
//
|
||||
// The LLVM Compiler Infrastructure
|
||||
//
|
||||
// This file is distributed under the University of Illinois Open Source
|
||||
// License. See LICENSE.TXT for details.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
//
|
||||
/// \file This file declares the API for the instruction selector.
|
||||
/// This class is responsible for selecting machine instructions.
|
||||
/// It's implemented by the target. It's used by the InstructionSelect pass.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#ifndef LLVM_CODEGEN_GLOBALISEL_INSTRUCTIONSELECTORIMPL_H
|
||||
#define LLVM_CODEGEN_GLOBALISEL_INSTRUCTIONSELECTORIMPL_H
|
||||
|
||||
namespace llvm {
|
||||
template <class TgtInstructionSelector, class PredicateBitset,
|
||||
class ComplexMatcherMemFn>
|
||||
bool InstructionSelector::executeMatchTable(
|
||||
TgtInstructionSelector &ISel, NewMIVector &OutMIs, MatcherState &State,
|
||||
const MatcherInfoTy<PredicateBitset, ComplexMatcherMemFn> &MatcherInfo,
|
||||
const int64_t *MatchTable, const TargetInstrInfo &TII,
|
||||
MachineRegisterInfo &MRI, const TargetRegisterInfo &TRI,
|
||||
const RegisterBankInfo &RBI,
|
||||
const PredicateBitset &AvailableFeatures) const {
|
||||
const int64_t *Command = MatchTable;
|
||||
while (true) {
|
||||
switch (*Command++) {
|
||||
case GIM_RecordInsn: {
|
||||
int64_t NewInsnID = *Command++;
|
||||
int64_t InsnID = *Command++;
|
||||
int64_t OpIdx = *Command++;
|
||||
|
||||
// As an optimisation we require that MIs[0] is always the root. Refuse
|
||||
// any attempt to modify it.
|
||||
assert(NewInsnID != 0 && "Refusing to modify MIs[0]");
|
||||
(void)NewInsnID;
|
||||
|
||||
MachineOperand &MO = State.MIs[InsnID]->getOperand(OpIdx);
|
||||
if (!MO.isReg()) {
|
||||
DEBUG(dbgs() << "Rejected (not a register)\n");
|
||||
return false;
|
||||
}
|
||||
if (TRI.isPhysicalRegister(MO.getReg())) {
|
||||
DEBUG(dbgs() << "Rejected (is a physical register)\n");
|
||||
return false;
|
||||
}
|
||||
|
||||
assert((size_t)NewInsnID == State.MIs.size() &&
|
||||
"Expected to store MIs in order");
|
||||
State.MIs.push_back(MRI.getVRegDef(MO.getReg()));
|
||||
DEBUG(dbgs() << "MIs[" << NewInsnID << "] = GIM_RecordInsn(" << InsnID
|
||||
<< ", " << OpIdx << ")\n");
|
||||
break;
|
||||
}
|
||||
|
||||
case GIM_CheckFeatures: {
|
||||
int64_t ExpectedBitsetID = *Command++;
|
||||
DEBUG(dbgs() << "GIM_CheckFeatures(ExpectedBitsetID=" << ExpectedBitsetID
|
||||
<< ")\n");
|
||||
if ((AvailableFeatures & MatcherInfo.FeatureBitsets[ExpectedBitsetID]) !=
|
||||
MatcherInfo.FeatureBitsets[ExpectedBitsetID]) {
|
||||
DEBUG(dbgs() << "Rejected\n");
|
||||
return false;
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
case GIM_CheckOpcode: {
|
||||
int64_t InsnID = *Command++;
|
||||
int64_t Expected = *Command++;
|
||||
|
||||
unsigned Opcode = State.MIs[InsnID]->getOpcode();
|
||||
DEBUG(dbgs() << "GIM_CheckOpcode(MIs[" << InsnID << "], ExpectedOpcode="
|
||||
<< Expected << ") // Got=" << Opcode << "\n");
|
||||
assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
|
||||
if (Opcode != Expected)
|
||||
return false;
|
||||
break;
|
||||
}
|
||||
case GIM_CheckNumOperands: {
|
||||
int64_t InsnID = *Command++;
|
||||
int64_t Expected = *Command++;
|
||||
DEBUG(dbgs() << "GIM_CheckNumOperands(MIs[" << InsnID
|
||||
<< "], Expected=" << Expected << ")\n");
|
||||
assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
|
||||
if (State.MIs[InsnID]->getNumOperands() != Expected)
|
||||
return false;
|
||||
break;
|
||||
}
|
||||
|
||||
case GIM_CheckType: {
|
||||
int64_t InsnID = *Command++;
|
||||
int64_t OpIdx = *Command++;
|
||||
int64_t TypeID = *Command++;
|
||||
DEBUG(dbgs() << "GIM_CheckType(MIs[" << InsnID << "]->getOperand("
|
||||
<< OpIdx << "), TypeID=" << TypeID << ")\n");
|
||||
assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
|
||||
if (MRI.getType(State.MIs[InsnID]->getOperand(OpIdx).getReg()) !=
|
||||
MatcherInfo.TypeObjects[TypeID])
|
||||
return false;
|
||||
break;
|
||||
}
|
||||
case GIM_CheckRegBankForClass: {
|
||||
int64_t InsnID = *Command++;
|
||||
int64_t OpIdx = *Command++;
|
||||
int64_t RCEnum = *Command++;
|
||||
DEBUG(dbgs() << "GIM_CheckRegBankForClass(MIs[" << InsnID
|
||||
<< "]->getOperand(" << OpIdx << "), RCEnum=" << RCEnum
|
||||
<< ")\n");
|
||||
assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
|
||||
if (&RBI.getRegBankFromRegClass(*TRI.getRegClass(RCEnum)) !=
|
||||
RBI.getRegBank(State.MIs[InsnID]->getOperand(OpIdx).getReg(), MRI, TRI))
|
||||
return false;
|
||||
break;
|
||||
}
|
||||
case GIM_CheckComplexPattern: {
|
||||
int64_t InsnID = *Command++;
|
||||
int64_t OpIdx = *Command++;
|
||||
int64_t RendererID = *Command++;
|
||||
int64_t ComplexPredicateID = *Command++;
|
||||
DEBUG(dbgs() << "State.Renderers[" << RendererID
|
||||
<< "] = GIM_CheckComplexPattern(MIs[" << InsnID
|
||||
<< "]->getOperand(" << OpIdx
|
||||
<< "), ComplexPredicateID=" << ComplexPredicateID << ")\n");
|
||||
assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
|
||||
// FIXME: Use std::invoke() when it's available.
|
||||
if (!(State.Renderers[RendererID] =
|
||||
(ISel.*MatcherInfo.ComplexPredicates[ComplexPredicateID])(
|
||||
State.MIs[InsnID]->getOperand(OpIdx))))
|
||||
return false;
|
||||
break;
|
||||
}
|
||||
case GIM_CheckConstantInt: {
|
||||
int64_t InsnID = *Command++;
|
||||
int64_t OpIdx = *Command++;
|
||||
int64_t Value = *Command++;
|
||||
DEBUG(dbgs() << "GIM_CheckConstantInt(MIs[" << InsnID << "]->getOperand("
|
||||
<< OpIdx << "), Value=" << Value << ")\n");
|
||||
assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
|
||||
if (!isOperandImmEqual(State.MIs[InsnID]->getOperand(OpIdx), Value, MRI))
|
||||
return false;
|
||||
break;
|
||||
}
|
||||
case GIM_CheckLiteralInt: {
|
||||
int64_t InsnID = *Command++;
|
||||
int64_t OpIdx = *Command++;
|
||||
int64_t Value = *Command++;
|
||||
DEBUG(dbgs() << "GIM_CheckLiteralInt(MIs[" << InsnID << "]->getOperand(" << OpIdx
|
||||
<< "), Value=" << Value << ")\n");
|
||||
assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
|
||||
MachineOperand &OM = State.MIs[InsnID]->getOperand(OpIdx);
|
||||
if (!OM.isCImm() || !OM.getCImm()->equalsInt(Value))
|
||||
return false;
|
||||
break;
|
||||
}
|
||||
case GIM_CheckIntrinsicID: {
|
||||
int64_t InsnID = *Command++;
|
||||
int64_t OpIdx = *Command++;
|
||||
int64_t Value = *Command++;
|
||||
DEBUG(dbgs() << "GIM_CheckIntrinsicID(MIs[" << InsnID << "]->getOperand(" << OpIdx
|
||||
<< "), Value=" << Value << ")\n");
|
||||
assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
|
||||
MachineOperand &OM = State.MIs[InsnID]->getOperand(OpIdx);
|
||||
if (!OM.isIntrinsicID() || OM.getIntrinsicID() != Value)
|
||||
return false;
|
||||
break;
|
||||
}
|
||||
case GIM_CheckIsMBB: {
|
||||
int64_t InsnID = *Command++;
|
||||
int64_t OpIdx = *Command++;
|
||||
DEBUG(dbgs() << "GIM_CheckIsMBB(MIs[" << InsnID << "]->getOperand("
|
||||
<< OpIdx << "))\n");
|
||||
assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
|
||||
if (!State.MIs[InsnID]->getOperand(OpIdx).isMBB())
|
||||
return false;
|
||||
break;
|
||||
}
|
||||
|
||||
case GIM_CheckIsSafeToFold: {
|
||||
int64_t InsnID = *Command++;
|
||||
DEBUG(dbgs() << "GIM_CheckIsSafeToFold(MIs[" << InsnID << "])\n");
|
||||
assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
|
||||
if (!isObviouslySafeToFold(*State.MIs[InsnID]))
|
||||
return false;
|
||||
break;
|
||||
}
|
||||
|
||||
case GIR_MutateOpcode: {
|
||||
int64_t OldInsnID = *Command++;
|
||||
int64_t NewInsnID = *Command++;
|
||||
int64_t NewOpcode = *Command++;
|
||||
assert((size_t)NewInsnID == OutMIs.size() &&
|
||||
"Expected to store MIs in order");
|
||||
OutMIs.push_back(
|
||||
MachineInstrBuilder(*State.MIs[OldInsnID]->getParent()->getParent(),
|
||||
State.MIs[OldInsnID]));
|
||||
OutMIs[NewInsnID]->setDesc(TII.get(NewOpcode));
|
||||
DEBUG(dbgs() << "GIR_MutateOpcode(OutMIs[" << NewInsnID << "], MIs["
|
||||
<< OldInsnID << "], " << NewOpcode << ")\n");
|
||||
break;
|
||||
}
|
||||
case GIR_BuildMI: {
|
||||
int64_t InsnID = *Command++;
|
||||
int64_t Opcode = *Command++;
|
||||
assert((size_t)InsnID == OutMIs.size() &&
|
||||
"Expected to store MIs in order");
|
||||
(void)InsnID;
|
||||
OutMIs.push_back(BuildMI(*State.MIs[0]->getParent(), State.MIs[0],
|
||||
State.MIs[0]->getDebugLoc(), TII.get(Opcode)));
|
||||
DEBUG(dbgs() << "GIR_BuildMI(OutMIs[" << InsnID << "], " << Opcode
|
||||
<< ")\n");
|
||||
break;
|
||||
}
|
||||
|
||||
case GIR_Copy: {
|
||||
int64_t NewInsnID = *Command++;
|
||||
int64_t OldInsnID = *Command++;
|
||||
int64_t OpIdx = *Command++;
|
||||
assert(OutMIs[NewInsnID] && "Attempted to add to undefined instruction");
|
||||
OutMIs[NewInsnID].add(State.MIs[OldInsnID]->getOperand(OpIdx));
|
||||
DEBUG(dbgs() << "GIR_Copy(OutMIs[" << NewInsnID << "], MIs[" << OldInsnID
|
||||
<< "], " << OpIdx << ")\n");
|
||||
break;
|
||||
}
|
||||
case GIR_CopySubReg: {
|
||||
int64_t NewInsnID = *Command++;
|
||||
int64_t OldInsnID = *Command++;
|
||||
int64_t OpIdx = *Command++;
|
||||
int64_t SubRegIdx = *Command++;
|
||||
assert(OutMIs[NewInsnID] && "Attempted to add to undefined instruction");
|
||||
OutMIs[NewInsnID].addReg(State.MIs[OldInsnID]->getOperand(OpIdx).getReg(),
|
||||
0, SubRegIdx);
|
||||
DEBUG(dbgs() << "GIR_CopySubReg(OutMIs[" << NewInsnID << "], MIs["
|
||||
<< OldInsnID << "], " << OpIdx << ", " << SubRegIdx
|
||||
<< ")\n");
|
||||
break;
|
||||
}
|
||||
case GIR_AddImplicitDef: {
|
||||
int64_t InsnID = *Command++;
|
||||
int64_t RegNum = *Command++;
|
||||
assert(OutMIs[InsnID] && "Attempted to add to undefined instruction");
|
||||
OutMIs[InsnID].addDef(RegNum, RegState::Implicit);
|
||||
DEBUG(dbgs() << "GIR_AddImplicitDef(OutMIs[" << InsnID << "], " << RegNum
|
||||
<< ")\n");
|
||||
break;
|
||||
}
|
||||
case GIR_AddImplicitUse: {
|
||||
int64_t InsnID = *Command++;
|
||||
int64_t RegNum = *Command++;
|
||||
assert(OutMIs[InsnID] && "Attempted to add to undefined instruction");
|
||||
OutMIs[InsnID].addUse(RegNum, RegState::Implicit);
|
||||
DEBUG(dbgs() << "GIR_AddImplicitUse(OutMIs[" << InsnID << "], " << RegNum
|
||||
<< ")\n");
|
||||
break;
|
||||
}
|
||||
case GIR_AddRegister: {
|
||||
int64_t InsnID = *Command++;
|
||||
int64_t RegNum = *Command++;
|
||||
assert(OutMIs[InsnID] && "Attempted to add to undefined instruction");
|
||||
OutMIs[InsnID].addReg(RegNum);
|
||||
DEBUG(dbgs() << "GIR_AddRegister(OutMIs[" << InsnID << "], " << RegNum
|
||||
<< ")\n");
|
||||
break;
|
||||
}
|
||||
case GIR_AddImm: {
|
||||
int64_t InsnID = *Command++;
|
||||
int64_t Imm = *Command++;
|
||||
assert(OutMIs[InsnID] && "Attempted to add to undefined instruction");
|
||||
OutMIs[InsnID].addImm(Imm);
|
||||
DEBUG(dbgs() << "GIR_AddImm(OutMIs[" << InsnID << "], " << Imm << ")\n");
|
||||
break;
|
||||
}
|
||||
case GIR_ComplexRenderer: {
|
||||
int64_t InsnID = *Command++;
|
||||
int64_t RendererID = *Command++;
|
||||
assert(OutMIs[InsnID] && "Attempted to add to undefined instruction");
|
||||
State.Renderers[RendererID](OutMIs[InsnID]);
|
||||
DEBUG(dbgs() << "GIR_ComplexRenderer(OutMIs[" << InsnID << "], "
|
||||
<< RendererID << ")\n");
|
||||
break;
|
||||
}
|
||||
|
||||
case GIR_ConstrainOperandRC: {
|
||||
int64_t InsnID = *Command++;
|
||||
int64_t OpIdx = *Command++;
|
||||
int64_t RCEnum = *Command++;
|
||||
assert(OutMIs[InsnID] && "Attempted to add to undefined instruction");
|
||||
constrainOperandRegToRegClass(*OutMIs[InsnID].getInstr(), OpIdx,
|
||||
*TRI.getRegClass(RCEnum), TII, TRI, RBI);
|
||||
DEBUG(dbgs() << "GIR_ConstrainOperandRC(OutMIs[" << InsnID << "], "
|
||||
<< OpIdx << ", " << RCEnum << ")\n");
|
||||
break;
|
||||
}
|
||||
case GIR_ConstrainSelectedInstOperands: {
|
||||
int64_t InsnID = *Command++;
|
||||
assert(OutMIs[InsnID] && "Attempted to add to undefined instruction");
|
||||
constrainSelectedInstRegOperands(*OutMIs[InsnID].getInstr(), TII, TRI,
|
||||
RBI);
|
||||
DEBUG(dbgs() << "GIR_ConstrainSelectedInstOperands(OutMIs[" << InsnID
|
||||
<< "])\n");
|
||||
break;
|
||||
}
|
||||
case GIR_MergeMemOperands: {
|
||||
int64_t InsnID = *Command++;
|
||||
assert(OutMIs[InsnID] && "Attempted to add to undefined instruction");
|
||||
for (const auto *FromMI : State.MIs)
|
||||
for (const auto &MMO : FromMI->memoperands())
|
||||
OutMIs[InsnID].addMemOperand(MMO);
|
||||
DEBUG(dbgs() << "GIR_MergeMemOperands(OutMIs[" << InsnID << "])\n");
|
||||
break;
|
||||
}
|
||||
case GIR_EraseFromParent: {
|
||||
int64_t InsnID = *Command++;
|
||||
assert(State.MIs[InsnID] &&
|
||||
"Attempted to erase an undefined instruction");
|
||||
State.MIs[InsnID]->eraseFromParent();
|
||||
DEBUG(dbgs() << "GIR_EraseFromParent(MIs[" << InsnID << "])\n");
|
||||
break;
|
||||
}
|
||||
|
||||
case GIR_Done:
|
||||
DEBUG(dbgs() << "GIR_Done");
|
||||
return true;
|
||||
|
||||
default:
|
||||
llvm_unreachable("Unexpected command");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
} // end namespace llvm
|
||||
|
||||
#endif // LLVM_CODEGEN_GLOBALISEL_INSTRUCTIONSELECTORIMPL_H
|
@ -101,11 +101,11 @@ class LegalizerHelper {
|
||||
const LegalizerInfo &LI;
|
||||
};
|
||||
|
||||
/// Helper function that replaces \p MI with a libcall.
|
||||
/// Helper function that creates the given libcall.
|
||||
LegalizerHelper::LegalizeResult
|
||||
replaceWithLibcall(MachineInstr &MI, MachineIRBuilder &MIRBuilder,
|
||||
RTLIB::Libcall Libcall, const CallLowering::ArgInfo &Result,
|
||||
ArrayRef<CallLowering::ArgInfo> Args);
|
||||
createLibcall(MachineIRBuilder &MIRBuilder, RTLIB::Libcall Libcall,
|
||||
const CallLowering::ArgInfo &Result,
|
||||
ArrayRef<CallLowering::ArgInfo> Args);
|
||||
|
||||
} // End namespace llvm.
|
||||
|
||||
|
@ -19,6 +19,7 @@
|
||||
#include "llvm/CodeGen/LowLevelType.h"
|
||||
#include "llvm/CodeGen/MachineBasicBlock.h"
|
||||
#include "llvm/CodeGen/MachineInstrBuilder.h"
|
||||
#include "llvm/CodeGen/MachineRegisterInfo.h"
|
||||
#include "llvm/IR/Constants.h"
|
||||
#include "llvm/IR/DebugLoc.h"
|
||||
|
||||
@ -59,6 +60,21 @@ class MachineIRBuilder {
|
||||
}
|
||||
|
||||
void validateTruncExt(unsigned Dst, unsigned Src, bool IsExtend);
|
||||
MachineInstrBuilder buildBinaryOp(unsigned Opcode, unsigned Res, unsigned Op0, unsigned Op1);
|
||||
|
||||
unsigned getDestFromArg(unsigned Reg) { return Reg; }
|
||||
unsigned getDestFromArg(LLT Ty) {
|
||||
return getMF().getRegInfo().createGenericVirtualRegister(Ty);
|
||||
}
|
||||
unsigned getDestFromArg(const TargetRegisterClass *RC) {
|
||||
return getMF().getRegInfo().createVirtualRegister(RC);
|
||||
}
|
||||
|
||||
unsigned getRegFromArg(unsigned Reg) { return Reg; }
|
||||
|
||||
unsigned getRegFromArg(const MachineInstrBuilder &MIB) {
|
||||
return MIB->getOperand(0).getReg();
|
||||
}
|
||||
|
||||
public:
|
||||
/// Getter for the function we currently build.
|
||||
@ -120,6 +136,22 @@ class MachineIRBuilder {
|
||||
/// \return a MachineInstrBuilder for the newly created instruction.
|
||||
MachineInstrBuilder buildInstr(unsigned Opcode);
|
||||
|
||||
/// DAG like Generic method for building arbitrary instructions as above.
|
||||
/// \Opc opcode for the instruction.
|
||||
/// \Ty Either LLT/TargetRegisterClass/unsigned types for Dst
|
||||
/// \Args Variadic list of uses of types(unsigned/MachineInstrBuilder)
|
||||
/// Uses of type MachineInstrBuilder will perform
|
||||
/// getOperand(0).getReg() to convert to register.
|
||||
template <typename DstTy, typename... UseArgsTy>
|
||||
MachineInstrBuilder buildInstr(unsigned Opc, DstTy &&Ty,
|
||||
UseArgsTy &&... Args) {
|
||||
auto MIB = buildInstr(Opc).addDef(getDestFromArg(Ty));
|
||||
unsigned It[] = {(getRegFromArg(Args))...};
|
||||
for (const auto &i : It)
|
||||
MIB.addUse(i);
|
||||
return MIB;
|
||||
}
|
||||
|
||||
/// Build but don't insert <empty> = \p Opcode <empty>.
|
||||
///
|
||||
/// \pre setMF, setBasicBlock or setMI must have been called.
|
||||
@ -188,6 +220,11 @@ class MachineIRBuilder {
|
||||
/// \return a MachineInstrBuilder for the newly created instruction.
|
||||
MachineInstrBuilder buildAdd(unsigned Res, unsigned Op0,
|
||||
unsigned Op1);
|
||||
template <typename DstTy, typename... UseArgsTy>
|
||||
MachineInstrBuilder buildAdd(DstTy &&Ty, UseArgsTy &&... UseArgs) {
|
||||
unsigned Res = getDestFromArg(Ty);
|
||||
return buildAdd(Res, (getRegFromArg(UseArgs))...);
|
||||
}
|
||||
|
||||
/// Build and insert \p Res<def> = G_SUB \p Op0, \p Op1
|
||||
///
|
||||
@ -295,6 +332,18 @@ class MachineIRBuilder {
|
||||
MachineInstrBuilder buildAnd(unsigned Res, unsigned Op0,
|
||||
unsigned Op1);
|
||||
|
||||
/// Build and insert \p Res<def> = G_OR \p Op0, \p Op1
|
||||
///
|
||||
/// G_OR sets \p Res to the bitwise or of integer parameters \p Op0 and \p
|
||||
/// Op1.
|
||||
///
|
||||
/// \pre setBasicBlock or setMI must have been called.
|
||||
/// \pre \p Res, \p Op0 and \p Op1 must be generic virtual registers
|
||||
/// with the same (scalar or vector) type).
|
||||
///
|
||||
/// \return a MachineInstrBuilder for the newly created instruction.
|
||||
MachineInstrBuilder buildOr(unsigned Res, unsigned Op0, unsigned Op1);
|
||||
|
||||
/// Build and insert \p Res<def> = G_ANYEXT \p Op0
|
||||
///
|
||||
/// G_ANYEXT produces a register of the specified width, with bits 0 to
|
||||
@ -416,6 +465,10 @@ class MachineIRBuilder {
|
||||
/// \return The newly created instruction.
|
||||
MachineInstrBuilder buildConstant(unsigned Res, int64_t Val);
|
||||
|
||||
template <typename DstType>
|
||||
MachineInstrBuilder buildConstant(DstType &&Res, int64_t Val) {
|
||||
return buildConstant(getDestFromArg(Res), Val);
|
||||
}
|
||||
/// Build and insert \p Res = G_FCONSTANT \p Val
|
||||
///
|
||||
/// G_FCONSTANT is a floating-point constant with the specified size and
|
||||
|
@ -93,12 +93,14 @@ class LiveRegUnits {
|
||||
}
|
||||
|
||||
/// Updates liveness when stepping backwards over the instruction \p MI.
|
||||
/// This removes all register units defined or clobbered in \p MI and then
|
||||
/// adds the units used (as in use operands) in \p MI.
|
||||
void stepBackward(const MachineInstr &MI);
|
||||
|
||||
/// Mark all register units live during instruction \p MI.
|
||||
/// This can be used to accumulate live/unoccupied registers over a range of
|
||||
/// instructions.
|
||||
void accumulateBackward(const MachineInstr &MI);
|
||||
/// Adds all register units used, defined or clobbered in \p MI.
|
||||
/// This is useful when walking over a range of instruction to find registers
|
||||
/// unused over the whole range.
|
||||
void accumulate(const MachineInstr &MI);
|
||||
|
||||
/// Adds registers living out of block \p MBB.
|
||||
/// Live out registers are the union of the live-in registers of the successor
|
||||
|
@ -650,7 +650,7 @@ class MachineFunction {
|
||||
MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, uint64_t s,
|
||||
unsigned base_alignment, const AAMDNodes &AAInfo = AAMDNodes(),
|
||||
const MDNode *Ranges = nullptr,
|
||||
SynchronizationScope SynchScope = CrossThread,
|
||||
SyncScope::ID SSID = SyncScope::System,
|
||||
AtomicOrdering Ordering = AtomicOrdering::NotAtomic,
|
||||
AtomicOrdering FailureOrdering = AtomicOrdering::NotAtomic);
|
||||
|
||||
|
@ -114,6 +114,9 @@ class MachineMemOperand {
|
||||
MOInvariant = 1u << 5,
|
||||
|
||||
// Reserved for use by target-specific passes.
|
||||
// Targets may override getSerializableMachineMemOperandTargetFlags() to
|
||||
// enable MIR serialization/parsing of these flags. If more of these flags
|
||||
// are added, the MIR printing/parsing code will need to be updated as well.
|
||||
MOTargetFlag1 = 1u << 6,
|
||||
MOTargetFlag2 = 1u << 7,
|
||||
MOTargetFlag3 = 1u << 8,
|
||||
@ -124,8 +127,8 @@ class MachineMemOperand {
|
||||
private:
|
||||
/// Atomic information for this memory operation.
|
||||
struct MachineAtomicInfo {
|
||||
/// Synchronization scope for this memory operation.
|
||||
unsigned SynchScope : 1; // enum SynchronizationScope
|
||||
/// Synchronization scope ID for this memory operation.
|
||||
unsigned SSID : 8; // SyncScope::ID
|
||||
/// Atomic ordering requirements for this memory operation. For cmpxchg
|
||||
/// atomic operations, atomic ordering requirements when store occurs.
|
||||
unsigned Ordering : 4; // enum AtomicOrdering
|
||||
@ -152,7 +155,7 @@ class MachineMemOperand {
|
||||
unsigned base_alignment,
|
||||
const AAMDNodes &AAInfo = AAMDNodes(),
|
||||
const MDNode *Ranges = nullptr,
|
||||
SynchronizationScope SynchScope = CrossThread,
|
||||
SyncScope::ID SSID = SyncScope::System,
|
||||
AtomicOrdering Ordering = AtomicOrdering::NotAtomic,
|
||||
AtomicOrdering FailureOrdering = AtomicOrdering::NotAtomic);
|
||||
|
||||
@ -202,9 +205,9 @@ class MachineMemOperand {
|
||||
/// Return the range tag for the memory reference.
|
||||
const MDNode *getRanges() const { return Ranges; }
|
||||
|
||||
/// Return the synchronization scope for this memory operation.
|
||||
SynchronizationScope getSynchScope() const {
|
||||
return static_cast<SynchronizationScope>(AtomicInfo.SynchScope);
|
||||
/// Returns the synchronization scope ID for this memory operation.
|
||||
SyncScope::ID getSyncScopeID() const {
|
||||
return static_cast<SyncScope::ID>(AtomicInfo.SSID);
|
||||
}
|
||||
|
||||
/// Return the atomic ordering requirements for this memory operation. For
|
||||
|
@ -340,6 +340,18 @@ namespace RTLIB {
|
||||
MEMCPY_ELEMENT_UNORDERED_ATOMIC_8,
|
||||
MEMCPY_ELEMENT_UNORDERED_ATOMIC_16,
|
||||
|
||||
MEMMOVE_ELEMENT_UNORDERED_ATOMIC_1,
|
||||
MEMMOVE_ELEMENT_UNORDERED_ATOMIC_2,
|
||||
MEMMOVE_ELEMENT_UNORDERED_ATOMIC_4,
|
||||
MEMMOVE_ELEMENT_UNORDERED_ATOMIC_8,
|
||||
MEMMOVE_ELEMENT_UNORDERED_ATOMIC_16,
|
||||
|
||||
MEMSET_ELEMENT_UNORDERED_ATOMIC_1,
|
||||
MEMSET_ELEMENT_UNORDERED_ATOMIC_2,
|
||||
MEMSET_ELEMENT_UNORDERED_ATOMIC_4,
|
||||
MEMSET_ELEMENT_UNORDERED_ATOMIC_8,
|
||||
MEMSET_ELEMENT_UNORDERED_ATOMIC_16,
|
||||
|
||||
// EXCEPTION HANDLING
|
||||
UNWIND_RESUME,
|
||||
|
||||
@ -515,6 +527,17 @@ namespace RTLIB {
|
||||
/// MEMCPY_ELEMENT_UNORDERED_ATOMIC_* value for the given element size or
|
||||
/// UNKNOW_LIBCALL if there is none.
|
||||
Libcall getMEMCPY_ELEMENT_UNORDERED_ATOMIC(uint64_t ElementSize);
|
||||
|
||||
/// getMEMMOVE_ELEMENT_UNORDERED_ATOMIC - Return
|
||||
/// MEMMOVE_ELEMENT_UNORDERED_ATOMIC_* value for the given element size or
|
||||
/// UNKNOW_LIBCALL if there is none.
|
||||
Libcall getMEMMOVE_ELEMENT_UNORDERED_ATOMIC(uint64_t ElementSize);
|
||||
|
||||
/// getMEMSET_ELEMENT_UNORDERED_ATOMIC - Return
|
||||
/// MEMSET_ELEMENT_UNORDERED_ATOMIC_* value for the given element size or
|
||||
/// UNKNOW_LIBCALL if there is none.
|
||||
Libcall getMEMSET_ELEMENT_UNORDERED_ATOMIC(uint64_t ElementSize);
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -235,6 +235,9 @@ class TargetRegisterInfo;
|
||||
"SDep::Output edge cannot use the zero register!");
|
||||
Contents.Reg = Reg;
|
||||
}
|
||||
|
||||
raw_ostream &print(raw_ostream &O,
|
||||
const TargetRegisterInfo *TRI = nullptr) const;
|
||||
};
|
||||
|
||||
template <>
|
||||
@ -458,7 +461,10 @@ class TargetRegisterInfo;
|
||||
|
||||
void dump(const ScheduleDAG *G) const;
|
||||
void dumpAll(const ScheduleDAG *G) const;
|
||||
void print(raw_ostream &O, const ScheduleDAG *G) const;
|
||||
raw_ostream &print(raw_ostream &O,
|
||||
const SUnit *N = nullptr,
|
||||
const SUnit *X = nullptr) const;
|
||||
raw_ostream &print(raw_ostream &O, const ScheduleDAG *G) const;
|
||||
|
||||
private:
|
||||
void ComputeDepth();
|
||||
|
@ -927,7 +927,7 @@ class SelectionDAG {
|
||||
SDValue Cmp, SDValue Swp, MachinePointerInfo PtrInfo,
|
||||
unsigned Alignment, AtomicOrdering SuccessOrdering,
|
||||
AtomicOrdering FailureOrdering,
|
||||
SynchronizationScope SynchScope);
|
||||
SyncScope::ID SSID);
|
||||
SDValue getAtomicCmpSwap(unsigned Opcode, const SDLoc &dl, EVT MemVT,
|
||||
SDVTList VTs, SDValue Chain, SDValue Ptr,
|
||||
SDValue Cmp, SDValue Swp, MachineMemOperand *MMO);
|
||||
@ -937,7 +937,7 @@ class SelectionDAG {
|
||||
SDValue getAtomic(unsigned Opcode, const SDLoc &dl, EVT MemVT, SDValue Chain,
|
||||
SDValue Ptr, SDValue Val, const Value *PtrVal,
|
||||
unsigned Alignment, AtomicOrdering Ordering,
|
||||
SynchronizationScope SynchScope);
|
||||
SyncScope::ID SSID);
|
||||
SDValue getAtomic(unsigned Opcode, const SDLoc &dl, EVT MemVT, SDValue Chain,
|
||||
SDValue Ptr, SDValue Val, MachineMemOperand *MMO);
|
||||
|
||||
|
@ -1213,8 +1213,8 @@ class MemSDNode : public SDNode {
|
||||
/// Returns the Ranges that describes the dereference.
|
||||
const MDNode *getRanges() const { return MMO->getRanges(); }
|
||||
|
||||
/// Return the synchronization scope for this memory operation.
|
||||
SynchronizationScope getSynchScope() const { return MMO->getSynchScope(); }
|
||||
/// Returns the synchronization scope ID for this memory operation.
|
||||
SyncScope::ID getSyncScopeID() const { return MMO->getSyncScopeID(); }
|
||||
|
||||
/// Return the atomic ordering requirements for this memory operation. For
|
||||
/// cmpxchg atomic operations, return the atomic ordering requirements when
|
||||
@ -1432,8 +1432,8 @@ class ConstantSDNode : public SDNode {
|
||||
int64_t getSExtValue() const { return Value->getSExtValue(); }
|
||||
|
||||
bool isOne() const { return Value->isOne(); }
|
||||
bool isNullValue() const { return Value->isNullValue(); }
|
||||
bool isAllOnesValue() const { return Value->isAllOnesValue(); }
|
||||
bool isNullValue() const { return Value->isZero(); }
|
||||
bool isAllOnesValue() const { return Value->isMinusOne(); }
|
||||
|
||||
bool isOpaque() const { return ConstantSDNodeBits.IsOpaque; }
|
||||
|
||||
|
@ -735,6 +735,10 @@ class Compile3Sym : public SymbolRecord {
|
||||
uint16_t VersionBackendQFE;
|
||||
StringRef Version;
|
||||
|
||||
void setLanguage(SourceLanguage Lang) {
|
||||
Flags = CompileSym3Flags((uint32_t(Flags) & 0xFFFFFF00) | uint32_t(Lang));
|
||||
}
|
||||
|
||||
uint8_t getLanguage() const { return static_cast<uint32_t>(Flags) & 0xFF; }
|
||||
uint32_t getFlags() const { return static_cast<uint32_t>(Flags) & ~0xFF; }
|
||||
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user