Merge llvm, clang, compiler-rt, libc++, libunwind, lld, lldb and openmp

release/11.x llvmorg-11.0.0-rc1-47-gff47911ddfc.
This commit is contained in:
Dimitry Andric 2020-08-15 12:29:55 +00:00
commit 1106035d5b
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/projects/clang1100-import/; revision=364253
16 changed files with 149 additions and 74 deletions

View File

@ -8974,6 +8974,7 @@ bool PointerExprEvaluator::VisitCXXNewExpr(const CXXNewExpr *E) {
const Expr *Init = E->getInitializer(); const Expr *Init = E->getInitializer();
const InitListExpr *ResizedArrayILE = nullptr; const InitListExpr *ResizedArrayILE = nullptr;
const CXXConstructExpr *ResizedArrayCCE = nullptr; const CXXConstructExpr *ResizedArrayCCE = nullptr;
bool ValueInit = false;
QualType AllocType = E->getAllocatedType(); QualType AllocType = E->getAllocatedType();
if (Optional<const Expr*> ArraySize = E->getArraySize()) { if (Optional<const Expr*> ArraySize = E->getArraySize()) {
@ -9017,7 +9018,14 @@ bool PointerExprEvaluator::VisitCXXNewExpr(const CXXNewExpr *E) {
// -- the new-initializer is a braced-init-list and the number of // -- the new-initializer is a braced-init-list and the number of
// array elements for which initializers are provided [...] // array elements for which initializers are provided [...]
// exceeds the number of elements to initialize // exceeds the number of elements to initialize
if (Init && !isa<CXXConstructExpr>(Init)) { if (!Init) {
// No initialization is performed.
} else if (isa<CXXScalarValueInitExpr>(Init) ||
isa<ImplicitValueInitExpr>(Init)) {
ValueInit = true;
} else if (auto *CCE = dyn_cast<CXXConstructExpr>(Init)) {
ResizedArrayCCE = CCE;
} else {
auto *CAT = Info.Ctx.getAsConstantArrayType(Init->getType()); auto *CAT = Info.Ctx.getAsConstantArrayType(Init->getType());
assert(CAT && "unexpected type for array initializer"); assert(CAT && "unexpected type for array initializer");
@ -9040,8 +9048,6 @@ bool PointerExprEvaluator::VisitCXXNewExpr(const CXXNewExpr *E) {
// special handling for this case when we initialize. // special handling for this case when we initialize.
if (InitBound != AllocBound) if (InitBound != AllocBound)
ResizedArrayILE = cast<InitListExpr>(Init); ResizedArrayILE = cast<InitListExpr>(Init);
} else if (Init) {
ResizedArrayCCE = cast<CXXConstructExpr>(Init);
} }
AllocType = Info.Ctx.getConstantArrayType(AllocType, ArrayBound, nullptr, AllocType = Info.Ctx.getConstantArrayType(AllocType, ArrayBound, nullptr,
@ -9102,7 +9108,11 @@ bool PointerExprEvaluator::VisitCXXNewExpr(const CXXNewExpr *E) {
return false; return false;
} }
if (ResizedArrayILE) { if (ValueInit) {
ImplicitValueInitExpr VIE(AllocType);
if (!EvaluateInPlace(*Val, Info, Result, &VIE))
return false;
} else if (ResizedArrayILE) {
if (!EvaluateArrayNewInitList(Info, Result, *Val, ResizedArrayILE, if (!EvaluateArrayNewInitList(Info, Result, *Val, ResizedArrayILE,
AllocType)) AllocType))
return false; return false;

View File

@ -2912,9 +2912,11 @@ static bool isSameTemplateParameter(const NamedDecl *X,
return false; return false;
if (TX->hasTypeConstraint() != TY->hasTypeConstraint()) if (TX->hasTypeConstraint() != TY->hasTypeConstraint())
return false; return false;
if (TX->hasTypeConstraint()) { const TypeConstraint *TXTC = TX->getTypeConstraint();
const TypeConstraint *TXTC = TX->getTypeConstraint(); const TypeConstraint *TYTC = TY->getTypeConstraint();
const TypeConstraint *TYTC = TY->getTypeConstraint(); if (!TXTC != !TYTC)
return false;
if (TXTC && TYTC) {
if (TXTC->getNamedConcept() != TYTC->getNamedConcept()) if (TXTC->getNamedConcept() != TYTC->getNamedConcept())
return false; return false;
if (TXTC->hasExplicitTemplateArgs() != TYTC->hasExplicitTemplateArgs()) if (TXTC->hasExplicitTemplateArgs() != TYTC->hasExplicitTemplateArgs())

View File

@ -53,9 +53,9 @@
#endif #endif
#if COMPILER_RT_HAS_ATOMICS == 1 #if COMPILER_RT_HAS_ATOMICS == 1
#ifdef _MSC_VER #ifdef _WIN32
#include <windows.h> #include <windows.h>
#if _MSC_VER < 1900 #if defined(_MSC_VER) && _MSC_VER < 1900
#define snprintf _snprintf #define snprintf _snprintf
#endif #endif
#if defined(_WIN64) #if defined(_WIN64)
@ -73,7 +73,7 @@
(DomType *)InterlockedExchangeAdd((LONG volatile *)&PtrVar, \ (DomType *)InterlockedExchangeAdd((LONG volatile *)&PtrVar, \
(LONG)sizeof(DomType) * PtrIncr) (LONG)sizeof(DomType) * PtrIncr)
#endif #endif
#else /* !defined(_MSC_VER) */ #else /* !defined(_WIN32) */
#define COMPILER_RT_BOOL_CMPXCHG(Ptr, OldV, NewV) \ #define COMPILER_RT_BOOL_CMPXCHG(Ptr, OldV, NewV) \
__sync_bool_compare_and_swap(Ptr, OldV, NewV) __sync_bool_compare_and_swap(Ptr, OldV, NewV)
#define COMPILER_RT_PTR_FETCH_ADD(DomType, PtrVar, PtrIncr) \ #define COMPILER_RT_PTR_FETCH_ADD(DomType, PtrVar, PtrIncr) \

View File

@ -938,14 +938,17 @@ void InputSection::relocateNonAlloc(uint8_t *buf, ArrayRef<RelTy> rels) {
// the folded-in function, so exclude .debug_line. // the folded-in function, so exclude .debug_line.
// //
// For pre-DWARF-v5 .debug_loc and .debug_ranges, -1 is a reserved value // For pre-DWARF-v5 .debug_loc and .debug_ranges, -1 is a reserved value
// (base address selection entry), so -2 is used. // (base address selection entry), use 1 (which is used by GNU ld for
// .debug_ranges).
//
// TODO To reduce disruption, we use 0 instead of -1 as the tombstone
// value. Enable -1 in a future release.
auto *ds = dyn_cast<Defined>(&sym); auto *ds = dyn_cast<Defined>(&sym);
if (!sym.getOutputSection() || if (!sym.getOutputSection() ||
(ds && ds->section->repl != ds->section && !isDebugLine)) { (ds && ds->section->repl != ds->section && !isDebugLine)) {
// If -z dead-reloc-in-nonalloc= is specified, respect it. // If -z dead-reloc-in-nonalloc= is specified, respect it.
const uint64_t value = const uint64_t value = tombstone ? SignExtend64<bits>(*tombstone)
tombstone ? SignExtend64<bits>(*tombstone) : (isDebugLocOrRanges ? 1 : 0);
: (isDebugLocOrRanges ? UINT64_MAX - 1 : UINT64_MAX);
target->relocateNoSym(bufLoc, type, value); target->relocateNoSym(bufLoc, type, value);
continue; continue;
} }

View File

@ -388,7 +388,7 @@ module LLVM_Utils {
umbrella "Support" umbrella "Support"
module * { export * } module * { export * }
// Exclude this; it should only be used on Windows. // Exclude this; it should only be used on Windows.
exclude header "Support/Windows/WindowsSupport.h" exclude header "Support/Windows/WindowsSupport.h"
@ -397,8 +397,9 @@ module LLVM_Utils {
exclude header "Support/Solaris/sys/regset.h" exclude header "Support/Solaris/sys/regset.h"
// These are intended for textual inclusion. // These are intended for textual inclusion.
textual header "Support/ARMTargetParser.def"
textual header "Support/AArch64TargetParser.def" textual header "Support/AArch64TargetParser.def"
textual header "Support/ARMTargetParser.def"
textual header "Support/RISCVTargetParser.def"
textual header "Support/TargetOpcodes.def" textual header "Support/TargetOpcodes.def"
textual header "Support/X86TargetParser.def" textual header "Support/X86TargetParser.def"
} }

View File

@ -393,7 +393,10 @@ void RuntimePointerChecking::groupChecks(
// equivalence class, the iteration order is deterministic. // equivalence class, the iteration order is deterministic.
for (auto MI = DepCands.member_begin(LeaderI), ME = DepCands.member_end(); for (auto MI = DepCands.member_begin(LeaderI), ME = DepCands.member_end();
MI != ME; ++MI) { MI != ME; ++MI) {
unsigned Pointer = PositionMap[MI->getPointer()]; auto PointerI = PositionMap.find(MI->getPointer());
assert(PointerI != PositionMap.end() &&
"pointer in equivalence class not found in PositionMap");
unsigned Pointer = PointerI->second;
bool Merged = false; bool Merged = false;
// Mark this pointer as seen. // Mark this pointer as seen.
Seen.insert(Pointer); Seen.insert(Pointer);
@ -726,52 +729,55 @@ bool AccessAnalysis::canCheckPtrAtRT(RuntimePointerChecking &RtCheck,
SmallVector<MemAccessInfo, 4> Retries; SmallVector<MemAccessInfo, 4> Retries;
// First, count how many write and read accesses are in the alias set. Also
// collect MemAccessInfos for later.
SmallVector<MemAccessInfo, 4> AccessInfos;
for (auto A : AS) { for (auto A : AS) {
Value *Ptr = A.getValue(); Value *Ptr = A.getValue();
bool IsWrite = Accesses.count(MemAccessInfo(Ptr, true)); bool IsWrite = Accesses.count(MemAccessInfo(Ptr, true));
MemAccessInfo Access(Ptr, IsWrite);
if (IsWrite) if (IsWrite)
++NumWritePtrChecks; ++NumWritePtrChecks;
else else
++NumReadPtrChecks; ++NumReadPtrChecks;
AccessInfos.emplace_back(Ptr, IsWrite);
}
// We do not need runtime checks for this alias set, if there are no writes
// or a single write and no reads.
if (NumWritePtrChecks == 0 ||
(NumWritePtrChecks == 1 && NumReadPtrChecks == 0)) {
assert((AS.size() <= 1 ||
all_of(AS,
[this](auto AC) {
MemAccessInfo AccessWrite(AC.getValue(), true);
return DepCands.findValue(AccessWrite) == DepCands.end();
})) &&
"Can only skip updating CanDoRT below, if all entries in AS "
"are reads or there is at most 1 entry");
continue;
}
for (auto &Access : AccessInfos) {
if (!createCheckForAccess(RtCheck, Access, StridesMap, DepSetId, TheLoop, if (!createCheckForAccess(RtCheck, Access, StridesMap, DepSetId, TheLoop,
RunningDepId, ASId, ShouldCheckWrap, false)) { RunningDepId, ASId, ShouldCheckWrap, false)) {
LLVM_DEBUG(dbgs() << "LAA: Can't find bounds for ptr:" << *Ptr << '\n'); LLVM_DEBUG(dbgs() << "LAA: Can't find bounds for ptr:"
<< *Access.getPointer() << '\n');
Retries.push_back(Access); Retries.push_back(Access);
CanDoAliasSetRT = false; CanDoAliasSetRT = false;
} }
} }
// If we have at least two writes or one write and a read then we need to
// check them. But there is no need to checks if there is only one
// dependence set for this alias set.
//
// Note that this function computes CanDoRT and MayNeedRTCheck // Note that this function computes CanDoRT and MayNeedRTCheck
// independently. For example CanDoRT=false, MayNeedRTCheck=false means that // independently. For example CanDoRT=false, MayNeedRTCheck=false means that
// we have a pointer for which we couldn't find the bounds but we don't // we have a pointer for which we couldn't find the bounds but we don't
// actually need to emit any checks so it does not matter. // actually need to emit any checks so it does not matter.
bool NeedsAliasSetRTCheck = false; //
if (!(IsDepCheckNeeded && CanDoAliasSetRT && RunningDepId == 2)) { // We need runtime checks for this alias set, if there are at least 2
NeedsAliasSetRTCheck = (NumWritePtrChecks >= 2 || // dependence sets (in which case RunningDepId > 2) or if we need to re-try
(NumReadPtrChecks >= 1 && NumWritePtrChecks >= 1)); // any bound checks (because in that case the number of dependence sets is
// For alias sets without at least 2 writes or 1 write and 1 read, there // incomplete).
// is no need to generate RT checks and CanDoAliasSetRT for this alias set bool NeedsAliasSetRTCheck = RunningDepId > 2 || !Retries.empty();
// does not impact whether runtime checks can be generated.
if (!NeedsAliasSetRTCheck) {
assert((AS.size() <= 1 ||
all_of(AS,
[this](auto AC) {
MemAccessInfo AccessWrite(AC.getValue(), true);
return DepCands.findValue(AccessWrite) ==
DepCands.end();
})) &&
"Can only skip updating CanDoRT below, if all entries in AS "
"are reads or there is at most 1 entry");
continue;
}
}
// We need to perform run-time alias checks, but some pointers had bounds // We need to perform run-time alias checks, but some pointers had bounds
// that couldn't be checked. // that couldn't be checked.

View File

@ -1239,7 +1239,8 @@ bool AtomicExpand::expandAtomicCmpXchg(AtomicCmpXchgInst *CI) {
Value *NewValueInsert = Value *NewValueInsert =
insertMaskedValue(Builder, LoadedTryStore, CI->getNewValOperand(), PMV); insertMaskedValue(Builder, LoadedTryStore, CI->getNewValOperand(), PMV);
Value *StoreSuccess = Value *StoreSuccess =
TLI->emitStoreConditional(Builder, NewValueInsert, Addr, MemOpOrder); TLI->emitStoreConditional(Builder, NewValueInsert, PMV.AlignedAddr,
MemOpOrder);
StoreSuccess = Builder.CreateICmpEQ( StoreSuccess = Builder.CreateICmpEQ(
StoreSuccess, ConstantInt::get(Type::getInt32Ty(Ctx), 0), "success"); StoreSuccess, ConstantInt::get(Type::getInt32Ty(Ctx), 0), "success");
BasicBlock *RetryBB = HasReleasedLoadBB ? ReleasedLoadBB : StartBB; BasicBlock *RetryBB = HasReleasedLoadBB ? ReleasedLoadBB : StartBB;

View File

@ -455,19 +455,23 @@ bool InlineAsmLowering::lowerInlineAsm(
unsigned DefRegIdx = InstFlagIdx + 1; unsigned DefRegIdx = InstFlagIdx + 1;
Register Def = Inst->getOperand(DefRegIdx).getReg(); Register Def = Inst->getOperand(DefRegIdx).getReg();
// Copy input to new vreg with same reg class as Def
const TargetRegisterClass *RC = MRI->getRegClass(Def);
ArrayRef<Register> SrcRegs = GetOrCreateVRegs(*OpInfo.CallOperandVal); ArrayRef<Register> SrcRegs = GetOrCreateVRegs(*OpInfo.CallOperandVal);
assert(SrcRegs.size() == 1 && "Single register is expected here"); assert(SrcRegs.size() == 1 && "Single register is expected here");
Register Tmp = MRI->createVirtualRegister(RC);
if (!buildAnyextOrCopy(Tmp, SrcRegs[0], MIRBuilder))
return false;
// Add Flag and input register operand (Tmp) to Inst. Tie Tmp to Def. // When Def is physreg: use given input.
Register In = SrcRegs[0];
// When Def is vreg: copy input to new vreg with same reg class as Def.
if (Def.isVirtual()) {
In = MRI->createVirtualRegister(MRI->getRegClass(Def));
if (!buildAnyextOrCopy(In, SrcRegs[0], MIRBuilder))
return false;
}
// Add Flag and input register operand (In) to Inst. Tie In to Def.
unsigned UseFlag = InlineAsm::getFlagWord(InlineAsm::Kind_RegUse, 1); unsigned UseFlag = InlineAsm::getFlagWord(InlineAsm::Kind_RegUse, 1);
unsigned Flag = InlineAsm::getFlagWordForMatchingOp(UseFlag, DefIdx); unsigned Flag = InlineAsm::getFlagWordForMatchingOp(UseFlag, DefIdx);
Inst.addImm(Flag); Inst.addImm(Flag);
Inst.addReg(Tmp); Inst.addReg(In);
Inst->tieOperands(DefRegIdx, Inst->getNumOperands() - 1); Inst->tieOperands(DefRegIdx, Inst->getNumOperands() - 1);
break; break;
} }

View File

@ -269,7 +269,7 @@ void RuntimeDyldELF::resolveX86_64Relocation(const SectionEntry &Section,
uint64_t SymOffset) { uint64_t SymOffset) {
switch (Type) { switch (Type) {
default: default:
llvm_unreachable("Relocation type not implemented yet!"); report_fatal_error("Relocation type not implemented yet!");
break; break;
case ELF::R_X86_64_NONE: case ELF::R_X86_64_NONE:
break; break;
@ -359,7 +359,7 @@ void RuntimeDyldELF::resolveX86Relocation(const SectionEntry &Section,
default: default:
// There are other relocation types, but it appears these are the // There are other relocation types, but it appears these are the
// only ones currently used by the LLVM ELF object writer // only ones currently used by the LLVM ELF object writer
llvm_unreachable("Relocation type not implemented yet!"); report_fatal_error("Relocation type not implemented yet!");
break; break;
} }
} }
@ -382,7 +382,7 @@ void RuntimeDyldELF::resolveAArch64Relocation(const SectionEntry &Section,
switch (Type) { switch (Type) {
default: default:
llvm_unreachable("Relocation type not implemented yet!"); report_fatal_error("Relocation type not implemented yet!");
break; break;
case ELF::R_AARCH64_ABS16: { case ELF::R_AARCH64_ABS16: {
uint64_t Result = Value + Addend; uint64_t Result = Value + Addend;
@ -721,7 +721,7 @@ void RuntimeDyldELF::resolvePPC32Relocation(const SectionEntry &Section,
uint8_t *LocalAddress = Section.getAddressWithOffset(Offset); uint8_t *LocalAddress = Section.getAddressWithOffset(Offset);
switch (Type) { switch (Type) {
default: default:
llvm_unreachable("Relocation type not implemented yet!"); report_fatal_error("Relocation type not implemented yet!");
break; break;
case ELF::R_PPC_ADDR16_LO: case ELF::R_PPC_ADDR16_LO:
writeInt16BE(LocalAddress, applyPPClo(Value + Addend)); writeInt16BE(LocalAddress, applyPPClo(Value + Addend));
@ -741,7 +741,7 @@ void RuntimeDyldELF::resolvePPC64Relocation(const SectionEntry &Section,
uint8_t *LocalAddress = Section.getAddressWithOffset(Offset); uint8_t *LocalAddress = Section.getAddressWithOffset(Offset);
switch (Type) { switch (Type) {
default: default:
llvm_unreachable("Relocation type not implemented yet!"); report_fatal_error("Relocation type not implemented yet!");
break; break;
case ELF::R_PPC64_ADDR16: case ELF::R_PPC64_ADDR16:
writeInt16BE(LocalAddress, applyPPClo(Value + Addend)); writeInt16BE(LocalAddress, applyPPClo(Value + Addend));
@ -835,7 +835,7 @@ void RuntimeDyldELF::resolveSystemZRelocation(const SectionEntry &Section,
uint8_t *LocalAddress = Section.getAddressWithOffset(Offset); uint8_t *LocalAddress = Section.getAddressWithOffset(Offset);
switch (Type) { switch (Type) {
default: default:
llvm_unreachable("Relocation type not implemented yet!"); report_fatal_error("Relocation type not implemented yet!");
break; break;
case ELF::R_390_PC16DBL: case ELF::R_390_PC16DBL:
case ELF::R_390_PLT16DBL: { case ELF::R_390_PLT16DBL: {
@ -890,7 +890,7 @@ void RuntimeDyldELF::resolveBPFRelocation(const SectionEntry &Section,
switch (Type) { switch (Type) {
default: default:
llvm_unreachable("Relocation type not implemented yet!"); report_fatal_error("Relocation type not implemented yet!");
break; break;
case ELF::R_BPF_NONE: case ELF::R_BPF_NONE:
break; break;

View File

@ -11,6 +11,7 @@
#include "llvm/ADT/Twine.h" #include "llvm/ADT/Twine.h"
#include "llvm/BinaryFormat/COFF.h" #include "llvm/BinaryFormat/COFF.h"
#include "llvm/MC/MCAsmBackend.h" #include "llvm/MC/MCAsmBackend.h"
#include "llvm/MC/MCContext.h"
#include "llvm/MC/MCExpr.h" #include "llvm/MC/MCExpr.h"
#include "llvm/MC/MCFixup.h" #include "llvm/MC/MCFixup.h"
#include "llvm/MC/MCFixupKindInfo.h" #include "llvm/MC/MCFixupKindInfo.h"
@ -48,10 +49,33 @@ unsigned AArch64WinCOFFObjectWriter::getRelocType(
: Target.getSymA()->getKind(); : Target.getSymA()->getKind();
const MCExpr *Expr = Fixup.getValue(); const MCExpr *Expr = Fixup.getValue();
if (const AArch64MCExpr *A64E = dyn_cast<AArch64MCExpr>(Expr)) {
AArch64MCExpr::VariantKind RefKind = A64E->getKind();
switch (AArch64MCExpr::getSymbolLoc(RefKind)) {
case AArch64MCExpr::VK_ABS:
case AArch64MCExpr::VK_SECREL:
// Supported
break;
default:
Ctx.reportError(Fixup.getLoc(), "relocation variant " +
A64E->getVariantKindName() +
" unsupported on COFF targets");
return COFF::IMAGE_REL_ARM64_ABSOLUTE; // Dummy return value
}
}
switch (static_cast<unsigned>(Fixup.getKind())) { switch (static_cast<unsigned>(Fixup.getKind())) {
default: { default: {
const MCFixupKindInfo &Info = MAB.getFixupKindInfo(Fixup.getKind()); if (const AArch64MCExpr *A64E = dyn_cast<AArch64MCExpr>(Expr)) {
report_fatal_error(Twine("unsupported relocation type: ") + Info.Name); Ctx.reportError(Fixup.getLoc(), "relocation type " +
A64E->getVariantKindName() +
" unsupported on COFF targets");
} else {
const MCFixupKindInfo &Info = MAB.getFixupKindInfo(Fixup.getKind());
Ctx.reportError(Fixup.getLoc(), Twine("relocation type ") + Info.Name +
" unsupported on COFF targets");
}
return COFF::IMAGE_REL_ARM64_ABSOLUTE; // Dummy return value
} }
case FK_Data_4: case FK_Data_4:

View File

@ -131,10 +131,20 @@ void AMDGPUAnnotateUniformValues::visitLoadInst(LoadInst &I) {
// We're tracking up to the Function boundaries, and cannot go beyond because // We're tracking up to the Function boundaries, and cannot go beyond because
// of FunctionPass restrictions. We can ensure that is memory not clobbered // of FunctionPass restrictions. We can ensure that is memory not clobbered
// for memory operations that are live in to entry points only. // for memory operations that are live in to entry points only.
bool NotClobbered = isEntryFunc && !isClobberedInFunction(&I);
Instruction *PtrI = dyn_cast<Instruction>(Ptr); Instruction *PtrI = dyn_cast<Instruction>(Ptr);
if (!PtrI && NotClobbered && isGlobalLoad(I)) {
if (isa<Argument>(Ptr) || isa<GlobalValue>(Ptr)) { if (!isEntryFunc) {
if (PtrI)
setUniformMetadata(PtrI);
return;
}
bool NotClobbered = false;
if (PtrI)
NotClobbered = !isClobberedInFunction(&I);
else if (isa<Argument>(Ptr) || isa<GlobalValue>(Ptr)) {
if (isGlobalLoad(I) && !isClobberedInFunction(&I)) {
NotClobbered = true;
// Lookup for the existing GEP // Lookup for the existing GEP
if (noClobberClones.count(Ptr)) { if (noClobberClones.count(Ptr)) {
PtrI = noClobberClones[Ptr]; PtrI = noClobberClones[Ptr];

View File

@ -2655,6 +2655,15 @@ const unsigned *PPCInstrInfo::getLoadOpcodesForSpillArray() const {
void PPCInstrInfo::fixupIsDeadOrKill(MachineInstr &StartMI, MachineInstr &EndMI, void PPCInstrInfo::fixupIsDeadOrKill(MachineInstr &StartMI, MachineInstr &EndMI,
unsigned RegNo) const { unsigned RegNo) const {
// Conservatively clear kill flag for the register if the instructions are in
// different basic blocks and in SSA form, because the kill flag may no longer
// be right. There is no need to bother with dead flags since defs with no
// uses will be handled by DCE.
MachineRegisterInfo &MRI = StartMI.getParent()->getParent()->getRegInfo();
if (MRI.isSSA() && (StartMI.getParent() != EndMI.getParent())) {
MRI.clearKillFlags(RegNo);
return;
}
// Instructions between [StartMI, EndMI] should be in same basic block. // Instructions between [StartMI, EndMI] should be in same basic block.
assert((StartMI.getParent() == EndMI.getParent()) && assert((StartMI.getParent() == EndMI.getParent()) &&

View File

@ -565,14 +565,18 @@ class PPCInstrInfo : public PPCGenInstrInfo {
int64_t OffsetImm) const; int64_t OffsetImm) const;
/// Fixup killed/dead flag for register \p RegNo between instructions [\p /// Fixup killed/dead flag for register \p RegNo between instructions [\p
/// StartMI, \p EndMI]. Some PostRA transformations may violate register /// StartMI, \p EndMI]. Some pre-RA or post-RA transformations may violate
/// killed/dead flags semantics, this function can be called to fix up. Before /// register killed/dead flags semantics, this function can be called to fix
/// calling this function, /// up. Before calling this function,
/// 1. Ensure that \p RegNo liveness is killed after instruction \p EndMI. /// 1. Ensure that \p RegNo liveness is killed after instruction \p EndMI.
/// 2. Ensure that there is no new definition between (\p StartMI, \p EndMI) /// 2. Ensure that there is no new definition between (\p StartMI, \p EndMI)
/// and possible definition for \p RegNo is \p StartMI or \p EndMI. /// and possible definition for \p RegNo is \p StartMI or \p EndMI.
/// 3. Ensure that all instructions between [\p StartMI, \p EndMI] are in same /// 3. We can do accurate fixup for the case when all instructions between
/// basic block. /// [\p StartMI, \p EndMI] are in same basic block.
/// 4. For the case when \p StartMI and \p EndMI are not in same basic block,
/// we conservatively clear kill flag for all uses of \p RegNo for pre-RA
/// and for post-RA, we give an assertion as without reaching definition
/// analysis post-RA, \p StartMI and \p EndMI are hard to keep right.
void fixupIsDeadOrKill(MachineInstr &StartMI, MachineInstr &EndMI, void fixupIsDeadOrKill(MachineInstr &StartMI, MachineInstr &EndMI,
unsigned RegNo) const; unsigned RegNo) const;
void replaceInstrWithLI(MachineInstr &MI, const LoadImmediateInfo &LII) const; void replaceInstrWithLI(MachineInstr &MI, const LoadImmediateInfo &LII) const;

View File

@ -1706,7 +1706,8 @@ static void __kmp_set_queuing_lock_flags(kmp_queuing_lock_t *lck,
#if (KMP_COMPILER_ICC && __INTEL_COMPILER >= 1300) || \ #if (KMP_COMPILER_ICC && __INTEL_COMPILER >= 1300) || \
(KMP_COMPILER_MSVC && _MSC_VER >= 1700) || \ (KMP_COMPILER_MSVC && _MSC_VER >= 1700) || \
(KMP_COMPILER_CLANG && KMP_MSVC_COMPAT) (KMP_COMPILER_CLANG && (KMP_MSVC_COMPAT || __MINGW32__)) || \
(KMP_COMPILER_GCC && __MINGW32__)
#include <immintrin.h> #include <immintrin.h>
#define SOFT_ABORT_MASK (_XABORT_RETRY | _XABORT_CONFLICT | _XABORT_EXPLICIT) #define SOFT_ABORT_MASK (_XABORT_RETRY | _XABORT_CONFLICT | _XABORT_EXPLICIT)

View File

@ -1,14 +1,14 @@
// $FreeBSD$ // $FreeBSD$
#define LLVM_REVISION "llvmorg-11.0.0-rc1-25-g903c872b169" #define LLVM_REVISION "llvmorg-11.0.0-rc1-47-gff47911ddfc"
#define LLVM_REPOSITORY "git@github.com:llvm/llvm-project.git" #define LLVM_REPOSITORY "git@github.com:llvm/llvm-project.git"
#define CLANG_REVISION "llvmorg-11.0.0-rc1-25-g903c872b169" #define CLANG_REVISION "llvmorg-11.0.0-rc1-47-gff47911ddfc"
#define CLANG_REPOSITORY "git@github.com:llvm/llvm-project.git" #define CLANG_REPOSITORY "git@github.com:llvm/llvm-project.git"
// <Upstream revision at import>-<Local identifier in __FreeBSD_version style> // <Upstream revision at import>-<Local identifier in __FreeBSD_version style>
#define LLD_REVISION "llvmorg-11.0.0-rc1-25-g903c872b169-1300007" #define LLD_REVISION "llvmorg-11.0.0-rc1-47-gff47911ddfc-1300007"
#define LLD_REPOSITORY "FreeBSD" #define LLD_REPOSITORY "FreeBSD"
#define LLDB_REVISION "llvmorg-11.0.0-rc1-25-g903c872b169" #define LLDB_REVISION "llvmorg-11.0.0-rc1-47-gff47911ddfc"
#define LLDB_REPOSITORY "git@github.com:llvm/llvm-project.git" #define LLDB_REPOSITORY "git@github.com:llvm/llvm-project.git"

View File

@ -1,3 +1,3 @@
/* $FreeBSD$ */ /* $FreeBSD$ */
#define LLVM_REVISION "llvmorg-11.0.0-rc1-25-g903c872b169" #define LLVM_REVISION "llvmorg-11.0.0-rc1-47-gff47911ddfc"
#define LLVM_REPOSITORY "git@github.com:llvm/llvm-project.git" #define LLVM_REPOSITORY "git@github.com:llvm/llvm-project.git"