Merge llvm, clang, compiler-rt, libc++, libunwind, lld, lldb and openmp

llvmorg-10.0.0-97-g6f71678ecd2 (not quite 10.0.1 rc2, as more fixes are
still pending).

MFC after:	3 weeks
This commit is contained in:
Dimitry Andric 2020-06-20 20:06:52 +00:00
commit e837bb5cfb
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=362445
32 changed files with 2400 additions and 242 deletions

View File

@ -531,6 +531,7 @@ llvm/lib/ExecutionEngine/PerfJITEvents/CMakeLists.txt
llvm/lib/ExecutionEngine/PerfJITEvents/LLVMBuild.txt
llvm/lib/ExecutionEngine/RuntimeDyld/CMakeLists.txt
llvm/lib/ExecutionEngine/RuntimeDyld/LLVMBuild.txt
llvm/lib/Extensions/
llvm/lib/Frontend/CMakeLists.txt
llvm/lib/Frontend/LLVMBuild.txt
llvm/lib/Frontend/OpenMP/CMakeLists.txt

View File

@ -3817,6 +3817,9 @@ TypeResult Sema::ActOnTagTemplateIdType(TagUseKind TUK,
SourceLocation LAngleLoc,
ASTTemplateArgsPtr TemplateArgsIn,
SourceLocation RAngleLoc) {
if (SS.isInvalid())
return TypeResult(true);
TemplateName Template = TemplateD.get();
// Translate the parser's template argument list in our AST format.
@ -5925,7 +5928,9 @@ bool UnnamedLocalNoLinkageFinder::VisitDependentNameType(
bool UnnamedLocalNoLinkageFinder::VisitDependentTemplateSpecializationType(
const DependentTemplateSpecializationType* T) {
return VisitNestedNameSpecifier(T->getQualifier());
if (auto *Q = T->getQualifier())
return VisitNestedNameSpecifier(Q);
return false;
}
bool UnnamedLocalNoLinkageFinder::VisitPackExpansionType(
@ -5979,6 +5984,7 @@ bool UnnamedLocalNoLinkageFinder::VisitTagDecl(const TagDecl *Tag) {
bool UnnamedLocalNoLinkageFinder::VisitNestedNameSpecifier(
NestedNameSpecifier *NNS) {
assert(NNS);
if (NNS->getPrefix() && VisitNestedNameSpecifier(NNS->getPrefix()))
return true;

View File

@ -335,14 +335,38 @@ class TokenCollector::CollectPPExpansions : public PPCallbacks {
SourceRange Range, const MacroArgs *Args) override {
if (!Collector)
return;
// Only record top-level expansions, not those where:
const auto &SM = Collector->PP.getSourceManager();
// Only record top-level expansions that directly produce expanded tokens.
// This excludes those where:
// - the macro use is inside a macro body,
// - the macro appears in an argument to another macro.
if (!MacroNameTok.getLocation().isFileID() ||
(LastExpansionEnd.isValid() &&
Collector->PP.getSourceManager().isBeforeInTranslationUnit(
Range.getBegin(), LastExpansionEnd)))
// However macro expansion isn't really a tree, it's token rewrite rules,
// so there are other cases, e.g.
// #define B(X) X
// #define A 1 + B
// A(2)
// Both A and B produce expanded tokens, though the macro name 'B' comes
// from an expansion. The best we can do is merge the mappings for both.
// The *last* token of any top-level macro expansion must be in a file.
// (In the example above, see the closing paren of the expansion of B).
if (!Range.getEnd().isFileID())
return;
// If there's a current expansion that encloses this one, this one can't be
// top-level.
if (LastExpansionEnd.isValid() &&
!SM.isBeforeInTranslationUnit(LastExpansionEnd, Range.getEnd()))
return;
// If the macro invocation (B) starts in a macro (A) but ends in a file,
// we'll create a merged mapping for A + B by overwriting the endpoint for
// A's startpoint.
if (!Range.getBegin().isFileID()) {
Range.setBegin(SM.getExpansionLoc(Range.getBegin()));
assert(Collector->Expansions.count(Range.getBegin().getRawEncoding()) &&
"Overlapping macros should have same expansion location");
}
Collector->Expansions[Range.getBegin().getRawEncoding()] = Range.getEnd();
LastExpansionEnd = Range.getEnd();
}
@ -399,197 +423,167 @@ class TokenCollector::Builder {
}
TokenBuffer build() && {
buildSpelledTokens();
// Walk over expanded tokens and spelled tokens in parallel, building the
// mappings between those using source locations.
// To correctly recover empty macro expansions, we also take locations
// reported to PPCallbacks::MacroExpands into account as we do not have any
// expanded tokens with source locations to guide us.
// The 'eof' token is special, it is not part of spelled token stream. We
// handle it separately at the end.
assert(!Result.ExpandedTokens.empty());
assert(Result.ExpandedTokens.back().kind() == tok::eof);
for (unsigned I = 0; I < Result.ExpandedTokens.size() - 1; ++I) {
// (!) I might be updated by the following call.
processExpandedToken(I);
// Tokenize every file that contributed tokens to the expanded stream.
buildSpelledTokens();
// The expanded token stream consists of runs of tokens that came from
// the same source (a macro expansion, part of a file etc).
// Between these runs are the logical positions of spelled tokens that
// didn't expand to anything.
while (NextExpanded < Result.ExpandedTokens.size() - 1 /* eof */) {
// Create empty mappings for spelled tokens that expanded to nothing here.
// May advance NextSpelled, but NextExpanded is unchanged.
discard();
// Create mapping for a contiguous run of expanded tokens.
// Advances NextExpanded past the run, and NextSpelled accordingly.
unsigned OldPosition = NextExpanded;
advance();
if (NextExpanded == OldPosition)
diagnoseAdvanceFailure();
}
// 'eof' not handled in the loop, do it here.
assert(SM.getMainFileID() ==
SM.getFileID(Result.ExpandedTokens.back().location()));
fillGapUntil(Result.Files[SM.getMainFileID()],
Result.ExpandedTokens.back().location(),
Result.ExpandedTokens.size() - 1);
Result.Files[SM.getMainFileID()].EndExpanded = Result.ExpandedTokens.size();
// Some files might have unaccounted spelled tokens at the end, add an empty
// mapping for those as they did not have expanded counterparts.
fillGapsAtEndOfFiles();
// If any tokens remain in any of the files, they didn't expand to anything.
// Create empty mappings up until the end of the file.
for (const auto &File : Result.Files)
discard(File.first);
return std::move(Result);
}
private:
/// Process the next token in an expanded stream and move corresponding
/// spelled tokens, record any mapping if needed.
/// (!) \p I will be updated if this had to skip tokens, e.g. for macros.
void processExpandedToken(unsigned &I) {
auto L = Result.ExpandedTokens[I].location();
if (L.isMacroID()) {
processMacroExpansion(SM.getExpansionRange(L), I);
return;
// Consume a sequence of spelled tokens that didn't expand to anything.
// In the simplest case, skips spelled tokens until finding one that produced
// the NextExpanded token, and creates an empty mapping for them.
// If Drain is provided, skips remaining tokens from that file instead.
void discard(llvm::Optional<FileID> Drain = llvm::None) {
SourceLocation Target =
Drain ? SM.getLocForEndOfFile(*Drain)
: SM.getExpansionLoc(
Result.ExpandedTokens[NextExpanded].location());
FileID File = SM.getFileID(Target);
const auto &SpelledTokens = Result.Files[File].SpelledTokens;
auto &NextSpelled = this->NextSpelled[File];
TokenBuffer::Mapping Mapping;
Mapping.BeginSpelled = NextSpelled;
// When dropping trailing tokens from a file, the empty mapping should
// be positioned within the file's expanded-token range (at the end).
Mapping.BeginExpanded = Mapping.EndExpanded =
Drain ? Result.Files[*Drain].EndExpanded : NextExpanded;
// We may want to split into several adjacent empty mappings.
// FlushMapping() emits the current mapping and starts a new one.
auto FlushMapping = [&, this] {
Mapping.EndSpelled = NextSpelled;
if (Mapping.BeginSpelled != Mapping.EndSpelled)
Result.Files[File].Mappings.push_back(Mapping);
Mapping.BeginSpelled = NextSpelled;
};
while (NextSpelled < SpelledTokens.size() &&
SpelledTokens[NextSpelled].location() < Target) {
// If we know mapping bounds at [NextSpelled, KnownEnd] (macro expansion)
// then we want to partition our (empty) mapping.
// [Start, NextSpelled) [NextSpelled, KnownEnd] (KnownEnd, Target)
SourceLocation KnownEnd = CollectedExpansions.lookup(
SpelledTokens[NextSpelled].location().getRawEncoding());
if (KnownEnd.isValid()) {
FlushMapping(); // Emits [Start, NextSpelled)
while (NextSpelled < SpelledTokens.size() &&
SpelledTokens[NextSpelled].location() <= KnownEnd)
++NextSpelled;
FlushMapping(); // Emits [NextSpelled, KnownEnd]
// Now the loop contitues and will emit (KnownEnd, Target).
} else {
++NextSpelled;
}
}
if (L.isFileID()) {
auto FID = SM.getFileID(L);
TokenBuffer::MarkedFile &File = Result.Files[FID];
FlushMapping();
}
fillGapUntil(File, L, I);
// Consumes the NextExpanded token and others that are part of the same run.
// Increases NextExpanded and NextSpelled by at least one, and adds a mapping
// (unless this is a run of file tokens, which we represent with no mapping).
void advance() {
const syntax::Token &Tok = Result.ExpandedTokens[NextExpanded];
SourceLocation Expansion = SM.getExpansionLoc(Tok.location());
FileID File = SM.getFileID(Expansion);
const auto &SpelledTokens = Result.Files[File].SpelledTokens;
auto &NextSpelled = this->NextSpelled[File];
// Skip the token.
assert(File.SpelledTokens[NextSpelled[FID]].location() == L &&
"no corresponding token in the spelled stream");
++NextSpelled[FID];
return;
if (Tok.location().isFileID()) {
// A run of file tokens continues while the expanded/spelled tokens match.
while (NextSpelled < SpelledTokens.size() &&
NextExpanded < Result.ExpandedTokens.size() &&
SpelledTokens[NextSpelled].location() ==
Result.ExpandedTokens[NextExpanded].location()) {
++NextSpelled;
++NextExpanded;
}
// We need no mapping for file tokens copied to the expanded stream.
} else {
// We found a new macro expansion. We should have its spelling bounds.
auto End = CollectedExpansions.lookup(Expansion.getRawEncoding());
assert(End.isValid() && "Macro expansion wasn't captured?");
// Mapping starts here...
TokenBuffer::Mapping Mapping;
Mapping.BeginExpanded = NextExpanded;
Mapping.BeginSpelled = NextSpelled;
// ... consumes spelled tokens within bounds we captured ...
while (NextSpelled < SpelledTokens.size() &&
SpelledTokens[NextSpelled].location() <= End)
++NextSpelled;
// ... consumes expanded tokens rooted at the same expansion ...
while (NextExpanded < Result.ExpandedTokens.size() &&
SM.getExpansionLoc(
Result.ExpandedTokens[NextExpanded].location()) == Expansion)
++NextExpanded;
// ... and ends here.
Mapping.EndExpanded = NextExpanded;
Mapping.EndSpelled = NextSpelled;
Result.Files[File].Mappings.push_back(Mapping);
}
}
/// Skipped expanded and spelled tokens of a macro expansion that covers \p
/// SpelledRange. Add a corresponding mapping.
/// (!) \p I will be the index of the last token in an expansion after this
/// function returns.
void processMacroExpansion(CharSourceRange SpelledRange, unsigned &I) {
auto FID = SM.getFileID(SpelledRange.getBegin());
assert(FID == SM.getFileID(SpelledRange.getEnd()));
TokenBuffer::MarkedFile &File = Result.Files[FID];
fillGapUntil(File, SpelledRange.getBegin(), I);
// Skip all expanded tokens from the same macro expansion.
unsigned BeginExpanded = I;
for (; I + 1 < Result.ExpandedTokens.size(); ++I) {
auto NextL = Result.ExpandedTokens[I + 1].location();
if (!NextL.isMacroID() ||
SM.getExpansionLoc(NextL) != SpelledRange.getBegin())
break;
// advance() is supposed to consume at least one token - if not, we crash.
void diagnoseAdvanceFailure() {
#ifndef NDEBUG
// Show the failed-to-map token in context.
for (unsigned I = (NextExpanded < 10) ? 0 : NextExpanded - 10;
I < NextExpanded + 5 && I < Result.ExpandedTokens.size(); ++I) {
const char *L =
(I == NextExpanded) ? "!! " : (I < NextExpanded) ? "ok " : " ";
llvm::errs() << L << Result.ExpandedTokens[I].dumpForTests(SM) << "\n";
}
unsigned EndExpanded = I + 1;
consumeMapping(File, SM.getFileOffset(SpelledRange.getEnd()), BeginExpanded,
EndExpanded, NextSpelled[FID]);
#endif
llvm_unreachable("Couldn't map expanded token to spelled tokens!");
}
/// Initializes TokenBuffer::Files and fills spelled tokens and expanded
/// ranges for each of the files.
void buildSpelledTokens() {
for (unsigned I = 0; I < Result.ExpandedTokens.size(); ++I) {
auto FID =
SM.getFileID(SM.getExpansionLoc(Result.ExpandedTokens[I].location()));
const auto &Tok = Result.ExpandedTokens[I];
auto FID = SM.getFileID(SM.getExpansionLoc(Tok.location()));
auto It = Result.Files.try_emplace(FID);
TokenBuffer::MarkedFile &File = It.first->second;
File.EndExpanded = I + 1;
// The eof token should not be considered part of the main-file's range.
File.EndExpanded = Tok.kind() == tok::eof ? I : I + 1;
if (!It.second)
continue; // we have seen this file before.
// This is the first time we see this file.
File.BeginExpanded = I;
File.SpelledTokens = tokenize(FID, SM, LangOpts);
}
}
void consumeEmptyMapping(TokenBuffer::MarkedFile &File, unsigned EndOffset,
unsigned ExpandedIndex, unsigned &SpelledIndex) {
consumeMapping(File, EndOffset, ExpandedIndex, ExpandedIndex, SpelledIndex);
}
/// Consumes spelled tokens that form a macro expansion and adds a entry to
/// the resulting token buffer.
/// (!) SpelledIndex is updated in-place.
void consumeMapping(TokenBuffer::MarkedFile &File, unsigned EndOffset,
unsigned BeginExpanded, unsigned EndExpanded,
unsigned &SpelledIndex) {
// We need to record this mapping before continuing.
unsigned MappingBegin = SpelledIndex;
++SpelledIndex;
bool HitMapping =
tryConsumeSpelledUntil(File, EndOffset + 1, SpelledIndex).hasValue();
(void)HitMapping;
assert(!HitMapping && "recursive macro expansion?");
TokenBuffer::Mapping M;
M.BeginExpanded = BeginExpanded;
M.EndExpanded = EndExpanded;
M.BeginSpelled = MappingBegin;
M.EndSpelled = SpelledIndex;
File.Mappings.push_back(M);
}
/// Consumes spelled tokens until location \p L is reached and adds a mapping
/// covering the consumed tokens. The mapping will point to an empty expanded
/// range at position \p ExpandedIndex.
void fillGapUntil(TokenBuffer::MarkedFile &File, SourceLocation L,
unsigned ExpandedIndex) {
assert(L.isFileID());
FileID FID;
unsigned Offset;
std::tie(FID, Offset) = SM.getDecomposedLoc(L);
unsigned &SpelledIndex = NextSpelled[FID];
unsigned MappingBegin = SpelledIndex;
while (true) {
auto EndLoc = tryConsumeSpelledUntil(File, Offset, SpelledIndex);
if (SpelledIndex != MappingBegin) {
TokenBuffer::Mapping M;
M.BeginSpelled = MappingBegin;
M.EndSpelled = SpelledIndex;
M.BeginExpanded = M.EndExpanded = ExpandedIndex;
File.Mappings.push_back(M);
}
if (!EndLoc)
break;
consumeEmptyMapping(File, SM.getFileOffset(*EndLoc), ExpandedIndex,
SpelledIndex);
MappingBegin = SpelledIndex;
}
};
/// Consumes spelled tokens until it reaches Offset or a mapping boundary,
/// i.e. a name of a macro expansion or the start '#' token of a PP directive.
/// (!) NextSpelled is updated in place.
///
/// returns None if \p Offset was reached, otherwise returns the end location
/// of a mapping that starts at \p NextSpelled.
llvm::Optional<SourceLocation>
tryConsumeSpelledUntil(TokenBuffer::MarkedFile &File, unsigned Offset,
unsigned &NextSpelled) {
for (; NextSpelled < File.SpelledTokens.size(); ++NextSpelled) {
auto L = File.SpelledTokens[NextSpelled].location();
if (Offset <= SM.getFileOffset(L))
return llvm::None; // reached the offset we are looking for.
auto Mapping = CollectedExpansions.find(L.getRawEncoding());
if (Mapping != CollectedExpansions.end())
return Mapping->second; // found a mapping before the offset.
}
return llvm::None; // no more tokens, we "reached" the offset.
}
/// Adds empty mappings for unconsumed spelled tokens at the end of each file.
void fillGapsAtEndOfFiles() {
for (auto &F : Result.Files) {
if (F.second.SpelledTokens.empty())
continue;
fillGapUntil(F.second, F.second.SpelledTokens.back().endLocation(),
F.second.EndExpanded);
}
}
TokenBuffer Result;
/// For each file, a position of the next spelled token we will consume.
llvm::DenseMap<FileID, unsigned> NextSpelled;
unsigned NextExpanded = 0; // cursor in ExpandedTokens
llvm::DenseMap<FileID, unsigned> NextSpelled; // cursor in SpelledTokens
PPExpansions CollectedExpansions;
const SourceManager &SM;
const LangOptions &LangOpts;

View File

@ -2825,6 +2825,7 @@ void EmitClangAttrPCHRead(RecordKeeper &Records, raw_ostream &OS) {
if (R.isSubClassOf(InhClass))
OS << " bool isInherited = Record.readInt();\n";
OS << " bool isImplicit = Record.readInt();\n";
OS << " bool isPackExpansion = Record.readInt();\n";
ArgRecords = R.getValueAsListOfDefs("Args");
Args.clear();
for (const auto *Arg : ArgRecords) {
@ -2840,6 +2841,7 @@ void EmitClangAttrPCHRead(RecordKeeper &Records, raw_ostream &OS) {
if (R.isSubClassOf(InhClass))
OS << " cast<InheritableAttr>(New)->setInherited(isInherited);\n";
OS << " New->setImplicit(isImplicit);\n";
OS << " New->setPackExpansion(isPackExpansion);\n";
OS << " break;\n";
OS << " }\n";
}
@ -2866,6 +2868,7 @@ void EmitClangAttrPCHWrite(RecordKeeper &Records, raw_ostream &OS) {
if (R.isSubClassOf(InhClass))
OS << " Record.push_back(SA->isInherited());\n";
OS << " Record.push_back(A->isImplicit());\n";
OS << " Record.push_back(A->isPackExpansion());\n";
for (const auto *Arg : Args)
createArgument(*Arg, R.getName())->writePCHWrite(OS);

View File

@ -486,7 +486,9 @@ class ImportThunkChunkX86 : public ImportThunkChunk {
class ImportThunkChunkARM : public ImportThunkChunk {
public:
explicit ImportThunkChunkARM(Defined *s) : ImportThunkChunk(s) {}
explicit ImportThunkChunkARM(Defined *s) : ImportThunkChunk(s) {
setAlignment(2);
}
size_t getSize() const override { return sizeof(importThunkARM); }
void getBaserels(std::vector<Baserel> *res) override;
void writeTo(uint8_t *buf) const override;
@ -494,14 +496,16 @@ class ImportThunkChunkARM : public ImportThunkChunk {
class ImportThunkChunkARM64 : public ImportThunkChunk {
public:
explicit ImportThunkChunkARM64(Defined *s) : ImportThunkChunk(s) {}
explicit ImportThunkChunkARM64(Defined *s) : ImportThunkChunk(s) {
setAlignment(4);
}
size_t getSize() const override { return sizeof(importThunkARM64); }
void writeTo(uint8_t *buf) const override;
};
class RangeExtensionThunkARM : public NonSectionChunk {
public:
explicit RangeExtensionThunkARM(Defined *t) : target(t) {}
explicit RangeExtensionThunkARM(Defined *t) : target(t) { setAlignment(2); }
size_t getSize() const override;
void writeTo(uint8_t *buf) const override;

View File

@ -365,7 +365,9 @@ class TailMergeChunkX86 : public NonSectionChunk {
class ThunkChunkARM : public NonSectionChunk {
public:
ThunkChunkARM(Defined *i, Chunk *tm) : imp(i), tailMerge(tm) {}
ThunkChunkARM(Defined *i, Chunk *tm) : imp(i), tailMerge(tm) {
setAlignment(2);
}
size_t getSize() const override { return sizeof(thunkARM); }
@ -385,7 +387,9 @@ class ThunkChunkARM : public NonSectionChunk {
class TailMergeChunkARM : public NonSectionChunk {
public:
TailMergeChunkARM(Chunk *d, Defined *h) : desc(d), helper(h) {}
TailMergeChunkARM(Chunk *d, Defined *h) : desc(d), helper(h) {
setAlignment(2);
}
size_t getSize() const override { return sizeof(tailMergeARM); }
@ -405,7 +409,9 @@ class TailMergeChunkARM : public NonSectionChunk {
class ThunkChunkARM64 : public NonSectionChunk {
public:
ThunkChunkARM64(Defined *i, Chunk *tm) : imp(i), tailMerge(tm) {}
ThunkChunkARM64(Defined *i, Chunk *tm) : imp(i), tailMerge(tm) {
setAlignment(4);
}
size_t getSize() const override { return sizeof(thunkARM64); }
@ -422,7 +428,9 @@ class ThunkChunkARM64 : public NonSectionChunk {
class TailMergeChunkARM64 : public NonSectionChunk {
public:
TailMergeChunkARM64(Chunk *d, Defined *h) : desc(d), helper(h) {}
TailMergeChunkARM64(Chunk *d, Defined *h) : desc(d), helper(h) {
setAlignment(4);
}
size_t getSize() const override { return sizeof(tailMergeARM64); }

View File

@ -52,6 +52,8 @@ StringRef ScriptLexer::getLine() {
// Returns 1-based line number of the current token.
size_t ScriptLexer::getLineNumber() {
if (pos == 0)
return 1;
StringRef s = getCurrentMB().getBuffer();
StringRef tok = tokens[pos - 1];
return s.substr(0, tok.data() - s.data()).count('\n') + 1;
@ -292,7 +294,9 @@ static bool encloses(StringRef s, StringRef t) {
MemoryBufferRef ScriptLexer::getCurrentMB() {
// Find input buffer containing the current token.
assert(!mbs.empty() && pos > 0);
assert(!mbs.empty());
if (pos == 0)
return mbs.back();
for (MemoryBufferRef mb : mbs)
if (encloses(mb.getBuffer(), tokens[pos - 1]))
return mb;

View File

@ -737,6 +737,7 @@ bool ScriptParser::readSectionDirective(OutputSection *cmd, StringRef tok1, Stri
expect("(");
if (consume("NOLOAD")) {
cmd->noload = true;
cmd->type = SHT_NOBITS;
} else {
skip(); // This is "COPY", "INFO" or "OVERLAY".
cmd->nonAlloc = true;

View File

@ -152,6 +152,10 @@ AARCH64_CPU_NAME("kryo", ARMV8A, FK_CRYPTO_NEON_FP_ARMV8, false,
(AArch64::AEK_CRC))
AARCH64_CPU_NAME("thunderx2t99", ARMV8_1A, FK_CRYPTO_NEON_FP_ARMV8, false,
(AArch64::AEK_NONE))
AARCH64_CPU_NAME("thunderx3t110", ARMV8_3A, FK_CRYPTO_NEON_FP_ARMV8, false,
(AArch64::AEK_CRC | AEK_CRYPTO | AEK_FP | AEK_SIMD |
AEK_LSE | AEK_RAND | AArch64::AEK_PROFILE |
AArch64::AEK_RAS))
AARCH64_CPU_NAME("thunderx", ARMV8A, FK_CRYPTO_NEON_FP_ARMV8, false,
(AArch64::AEK_CRC | AArch64::AEK_PROFILE))
AARCH64_CPU_NAME("thunderxt88", ARMV8A, FK_CRYPTO_NEON_FP_ARMV8, false,

View File

@ -963,10 +963,10 @@ bool BranchFolder::TryTailMergeBlocks(MachineBasicBlock *SuccBB,
continue;
}
// If one of the blocks is the entire common tail (and not the entry
// block, which we can't jump to), we can treat all blocks with this same
// tail at once. Use PredBB if that is one of the possibilities, as that
// will not introduce any extra branches.
// If one of the blocks is the entire common tail (and is not the entry
// block/an EH pad, which we can't jump to), we can treat all blocks with
// this same tail at once. Use PredBB if that is one of the possibilities,
// as that will not introduce any extra branches.
MachineBasicBlock *EntryBB =
&MergePotentials.front().getBlock()->getParent()->front();
unsigned commonTailIndex = SameTails.size();
@ -974,19 +974,21 @@ bool BranchFolder::TryTailMergeBlocks(MachineBasicBlock *SuccBB,
// into the other.
if (SameTails.size() == 2 &&
SameTails[0].getBlock()->isLayoutSuccessor(SameTails[1].getBlock()) &&
SameTails[1].tailIsWholeBlock())
SameTails[1].tailIsWholeBlock() && !SameTails[1].getBlock()->isEHPad())
commonTailIndex = 1;
else if (SameTails.size() == 2 &&
SameTails[1].getBlock()->isLayoutSuccessor(
SameTails[0].getBlock()) &&
SameTails[0].tailIsWholeBlock())
SameTails[0].getBlock()) &&
SameTails[0].tailIsWholeBlock() &&
!SameTails[0].getBlock()->isEHPad())
commonTailIndex = 0;
else {
// Otherwise just pick one, favoring the fall-through predecessor if
// there is one.
for (unsigned i = 0, e = SameTails.size(); i != e; ++i) {
MachineBasicBlock *MBB = SameTails[i].getBlock();
if (MBB == EntryBB && SameTails[i].tailIsWholeBlock())
if ((MBB == EntryBB || MBB->isEHPad()) &&
SameTails[i].tailIsWholeBlock())
continue;
if (MBB == PredBB) {
commonTailIndex = i;

View File

@ -443,6 +443,10 @@ def SVEUnsupported : AArch64Unsupported {
HasSVE2BitPerm];
}
def PAUnsupported : AArch64Unsupported {
let F = [HasPA];
}
include "AArch64SchedA53.td"
include "AArch64SchedA57.td"
include "AArch64SchedCyclone.td"
@ -453,6 +457,7 @@ include "AArch64SchedExynosM4.td"
include "AArch64SchedExynosM5.td"
include "AArch64SchedThunderX.td"
include "AArch64SchedThunderX2T99.td"
include "AArch64SchedThunderX3T110.td"
def ProcA35 : SubtargetFeature<"a35", "ARMProcFamily", "CortexA35",
"Cortex-A35 ARM processors", [
@ -780,6 +785,25 @@ def ProcThunderX2T99 : SubtargetFeature<"thunderx2t99", "ARMProcFamily",
FeatureLSE,
HasV8_1aOps]>;
def ProcThunderX3T110 : SubtargetFeature<"thunderx3t110", "ARMProcFamily",
"ThunderX3T110",
"Marvell ThunderX3 processors", [
FeatureAggressiveFMA,
FeatureCRC,
FeatureCrypto,
FeatureFPARMv8,
FeatureArithmeticBccFusion,
FeatureNEON,
FeaturePostRAScheduler,
FeaturePredictableSelectIsExpensive,
FeatureLSE,
FeaturePA,
FeatureUseAA,
FeatureBalanceFPOps,
FeaturePerfMon,
FeatureStrictAlign,
HasV8_3aOps]>;
def ProcThunderX : SubtargetFeature<"thunderx", "ARMProcFamily", "ThunderX",
"Cavium ThunderX processors", [
FeatureCRC,
@ -878,6 +902,8 @@ def : ProcessorModel<"thunderxt81", ThunderXT8XModel, [ProcThunderXT81]>;
def : ProcessorModel<"thunderxt83", ThunderXT8XModel, [ProcThunderXT83]>;
// Cavium ThunderX2T9X Processors. Formerly Broadcom Vulcan.
def : ProcessorModel<"thunderx2t99", ThunderX2T99Model, [ProcThunderX2T99]>;
// Marvell ThunderX3T110 Processors.
def : ProcessorModel<"thunderx3t110", ThunderX3T110Model, [ProcThunderX3T110]>;
// FIXME: HiSilicon TSV110 is currently modeled as a Cortex-A57.
def : ProcessorModel<"tsv110", CortexA57Model, [ProcTSV110]>;

View File

@ -118,9 +118,15 @@ void AArch64BranchTargets::addBTI(MachineBasicBlock &MBB, bool CouldCall,
auto MBBI = MBB.begin();
// PACI[AB]SP are implicitly BTI JC, so no BTI instruction needed there.
if (MBBI != MBB.end() && (MBBI->getOpcode() == AArch64::PACIASP ||
MBBI->getOpcode() == AArch64::PACIBSP))
// Skip the meta instuctions, those will be removed anyway.
for (; MBBI != MBB.end() && MBBI->isMetaInstruction(); ++MBBI)
;
// SCTLR_EL1.BT[01] is set to 0 by default which means
// PACI[AB]SP are implicitly BTI C so no BTI C instruction is needed there.
if (MBBI != MBB.end() && HintNum == 34 &&
(MBBI->getOpcode() == AArch64::PACIASP ||
MBBI->getOpcode() == AArch64::PACIBSP))
return;
BuildMI(MBB, MBB.begin(), MBB.findDebugLoc(MBB.begin()),

View File

@ -26,7 +26,8 @@ def CortexA53Model : SchedMachineModel {
// v 1.0 Spreadsheet
let CompleteModel = 1;
list<Predicate> UnsupportedFeatures = SVEUnsupported.F;
list<Predicate> UnsupportedFeatures = !listconcat(SVEUnsupported.F,
PAUnsupported.F);
}

View File

@ -31,7 +31,8 @@ def CortexA57Model : SchedMachineModel {
let LoopMicroOpBufferSize = 16;
let CompleteModel = 1;
list<Predicate> UnsupportedFeatures = SVEUnsupported.F;
list<Predicate> UnsupportedFeatures = !listconcat(SVEUnsupported.F,
PAUnsupported.F);
}
//===----------------------------------------------------------------------===//

View File

@ -18,7 +18,8 @@ def CycloneModel : SchedMachineModel {
let MispredictPenalty = 16; // 14-19 cycles are typical.
let CompleteModel = 1;
list<Predicate> UnsupportedFeatures = SVEUnsupported.F;
list<Predicate> UnsupportedFeatures = !listconcat(SVEUnsupported.F,
PAUnsupported.F);
}
//===----------------------------------------------------------------------===//

View File

@ -24,7 +24,8 @@ def ExynosM3Model : SchedMachineModel {
let MispredictPenalty = 16; // Minimum branch misprediction penalty.
let CompleteModel = 1; // Use the default model otherwise.
list<Predicate> UnsupportedFeatures = SVEUnsupported.F;
list<Predicate> UnsupportedFeatures = !listconcat(SVEUnsupported.F,
PAUnsupported.F);
}
//===----------------------------------------------------------------------===//

View File

@ -24,7 +24,8 @@ def ExynosM4Model : SchedMachineModel {
let MispredictPenalty = 16; // Minimum branch misprediction penalty.
let CompleteModel = 1; // Use the default model otherwise.
list<Predicate> UnsupportedFeatures = SVEUnsupported.F;
list<Predicate> UnsupportedFeatures = !listconcat(SVEUnsupported.F,
PAUnsupported.F);
}
//===----------------------------------------------------------------------===//

View File

@ -24,7 +24,8 @@ def ExynosM5Model : SchedMachineModel {
let MispredictPenalty = 15; // Minimum branch misprediction penalty.
let CompleteModel = 1; // Use the default model otherwise.
list<Predicate> UnsupportedFeatures = SVEUnsupported.F;
list<Predicate> UnsupportedFeatures = !listconcat(SVEUnsupported.F,
PAUnsupported.F);
}
//===----------------------------------------------------------------------===//

View File

@ -23,8 +23,8 @@ def FalkorModel : SchedMachineModel {
let MispredictPenalty = 11; // Minimum branch misprediction penalty.
let CompleteModel = 1;
list<Predicate> UnsupportedFeatures = SVEUnsupported.F;
list<Predicate> UnsupportedFeatures = !listconcat(SVEUnsupported.F,
PAUnsupported.F);
// FIXME: Remove when all errors have been fixed.
let FullInstRWOverlapCheck = 0;
}

View File

@ -27,8 +27,8 @@ def KryoModel : SchedMachineModel {
let LoopMicroOpBufferSize = 16;
let CompleteModel = 1;
list<Predicate> UnsupportedFeatures = SVEUnsupported.F;
list<Predicate> UnsupportedFeatures = !listconcat(SVEUnsupported.F,
PAUnsupported.F);
// FIXME: Remove when all errors have been fixed.
let FullInstRWOverlapCheck = 0;
}

View File

@ -25,8 +25,8 @@ def ThunderXT8XModel : SchedMachineModel {
let PostRAScheduler = 1; // Use PostRA scheduler.
let CompleteModel = 1;
list<Predicate> UnsupportedFeatures = SVEUnsupported.F;
list<Predicate> UnsupportedFeatures = !listconcat(SVEUnsupported.F,
PAUnsupported.F);
// FIXME: Remove when all errors have been fixed.
let FullInstRWOverlapCheck = 0;
}

View File

@ -25,8 +25,8 @@ def ThunderX2T99Model : SchedMachineModel {
let PostRAScheduler = 1; // Using PostRA sched.
let CompleteModel = 1;
list<Predicate> UnsupportedFeatures = SVEUnsupported.F;
list<Predicate> UnsupportedFeatures = !listconcat(SVEUnsupported.F,
PAUnsupported.F);
// FIXME: Remove when all errors have been fixed.
let FullInstRWOverlapCheck = 0;
}

File diff suppressed because it is too large Load Diff

View File

@ -160,6 +160,17 @@ void AArch64Subtarget::initializeProperties() {
PrefFunctionLogAlignment = 4;
PrefLoopLogAlignment = 2;
break;
case ThunderX3T110:
CacheLineSize = 64;
PrefFunctionLogAlignment = 4;
PrefLoopLogAlignment = 2;
MaxInterleaveFactor = 4;
PrefetchDistance = 128;
MinPrefetchStride = 1024;
MaxPrefetchIterationsAhead = 4;
// FIXME: remove this to enable 64-bit SLP if performance looks good.
MinVectorRegisterBitWidth = 128;
break;
}
}

View File

@ -63,7 +63,8 @@ class AArch64Subtarget final : public AArch64GenSubtargetInfo {
ThunderXT81,
ThunderXT83,
ThunderXT88,
TSV110
TSV110,
ThunderX3T110
};
protected:

View File

@ -404,7 +404,7 @@ void X86AsmPrinter::PrintIntelMemReference(const MachineInstr *MI,
static bool printAsmMRegister(X86AsmPrinter &P, const MachineOperand &MO,
char Mode, raw_ostream &O) {
Register Reg = MO.getReg();
bool EmitPercent = true;
bool EmitPercent = MO.getParent()->getInlineAsmDialect() == InlineAsm::AD_ATT;
if (!X86::GR8RegClass.contains(Reg) &&
!X86::GR16RegClass.contains(Reg) &&
@ -443,6 +443,42 @@ static bool printAsmMRegister(X86AsmPrinter &P, const MachineOperand &MO,
return false;
}
static bool printAsmVRegister(X86AsmPrinter &P, const MachineOperand &MO,
char Mode, raw_ostream &O) {
unsigned Reg = MO.getReg();
bool EmitPercent = MO.getParent()->getInlineAsmDialect() == InlineAsm::AD_ATT;
unsigned Index;
if (X86::VR128XRegClass.contains(Reg))
Index = Reg - X86::XMM0;
else if (X86::VR256XRegClass.contains(Reg))
Index = Reg - X86::YMM0;
else if (X86::VR512RegClass.contains(Reg))
Index = Reg - X86::ZMM0;
else
return true;
switch (Mode) {
default: // Unknown mode.
return true;
case 'x': // Print V4SFmode register
Reg = X86::XMM0 + Index;
break;
case 't': // Print V8SFmode register
Reg = X86::YMM0 + Index;
break;
case 'g': // Print V16SFmode register
Reg = X86::ZMM0 + Index;
break;
}
if (EmitPercent)
O << '%';
O << X86ATTInstPrinter::getRegisterName(Reg);
return false;
}
/// PrintAsmOperand - Print out an operand for an inline asm expression.
///
bool X86AsmPrinter::PrintAsmOperand(const MachineInstr *MI, unsigned OpNo,
@ -517,6 +553,14 @@ bool X86AsmPrinter::PrintAsmOperand(const MachineInstr *MI, unsigned OpNo,
PrintOperand(MI, OpNo, O);
return false;
case 'x': // Print V4SFmode register
case 't': // Print V8SFmode register
case 'g': // Print V16SFmode register
if (MO.isReg())
return printAsmVRegister(*this, MO, ExtraCode[0], O);
PrintOperand(MI, OpNo, O);
return false;
case 'P': // This is the operand of a call, treat specially.
PrintPCRelImm(MI, OpNo, O);
return false;

View File

@ -23319,7 +23319,8 @@ static SDValue getTargetVShiftByConstNode(unsigned Opc, const SDLoc &dl, MVT VT,
for (unsigned i = 0; i != NumElts; ++i) {
SDValue CurrentOp = SrcOp->getOperand(i);
if (CurrentOp->isUndef()) {
Elts.push_back(CurrentOp);
// Must produce 0s in the correct bits.
Elts.push_back(DAG.getConstant(0, dl, ElementType));
continue;
}
auto *ND = cast<ConstantSDNode>(CurrentOp);
@ -23331,7 +23332,8 @@ static SDValue getTargetVShiftByConstNode(unsigned Opc, const SDLoc &dl, MVT VT,
for (unsigned i = 0; i != NumElts; ++i) {
SDValue CurrentOp = SrcOp->getOperand(i);
if (CurrentOp->isUndef()) {
Elts.push_back(CurrentOp);
// Must produce 0s in the correct bits.
Elts.push_back(DAG.getConstant(0, dl, ElementType));
continue;
}
auto *ND = cast<ConstantSDNode>(CurrentOp);
@ -23343,7 +23345,8 @@ static SDValue getTargetVShiftByConstNode(unsigned Opc, const SDLoc &dl, MVT VT,
for (unsigned i = 0; i != NumElts; ++i) {
SDValue CurrentOp = SrcOp->getOperand(i);
if (CurrentOp->isUndef()) {
Elts.push_back(CurrentOp);
// All shifted in bits must be the same so use 0.
Elts.push_back(DAG.getConstant(0, dl, ElementType));
continue;
}
auto *ND = cast<ConstantSDNode>(CurrentOp);
@ -39699,14 +39702,22 @@ static SDValue combineVectorShiftImm(SDNode *N, SelectionDAG &DAG,
getTargetConstantBitsFromNode(N0, NumBitsPerElt, UndefElts, EltBits)) {
assert(EltBits.size() == VT.getVectorNumElements() &&
"Unexpected shift value type");
for (APInt &Elt : EltBits) {
if (X86ISD::VSHLI == Opcode)
// Undef elements need to fold to 0. It's possible SimplifyDemandedBits
// created an undef input due to no input bits being demanded, but user
// still expects 0 in other bits.
for (unsigned i = 0, e = EltBits.size(); i != e; ++i) {
APInt &Elt = EltBits[i];
if (UndefElts[i])
Elt = 0;
else if (X86ISD::VSHLI == Opcode)
Elt <<= ShiftVal;
else if (X86ISD::VSRAI == Opcode)
Elt.ashrInPlace(ShiftVal);
else
Elt.lshrInPlace(ShiftVal);
}
// Reset undef elements since they were zeroed above.
UndefElts = 0;
return getConstVector(EltBits, UndefElts, VT.getSimpleVT(), DAG, SDLoc(N));
}

View File

@ -3956,6 +3956,8 @@ static bool ExpandMOVImmSExti8(MachineInstrBuilder &MIB,
BuildMI(MBB, I, DL, TII.get(X86::PUSH32i8)).addImm(Imm);
MIB->setDesc(TII.get(X86::POP32r));
}
MIB->RemoveOperand(1);
MIB->addImplicitDefUseOperands(*MBB.getParent());
// Build CFI if necessary.
MachineFunction &MF = *MBB.getParent();

View File

@ -527,19 +527,19 @@ namespace {
// Collect information about PHI nodes which can be transformed in
// rewriteLoopExitValues.
struct RewritePhi {
PHINode *PN;
PHINode *PN; // For which PHI node is this replacement?
unsigned Ith; // For which incoming value?
const SCEV *ExpansionSCEV; // The SCEV of the incoming value we are rewriting.
Instruction *ExpansionPoint; // Where we'd like to expand that SCEV?
bool HighCost; // Is this expansion a high-cost?
// Ith incoming value.
unsigned Ith;
Value *Expansion = nullptr;
bool ValidRewrite = false;
// Exit value after expansion.
Value *Val;
// High Cost when expansion.
bool HighCost;
RewritePhi(PHINode *P, unsigned I, Value *V, bool H)
: PN(P), Ith(I), Val(V), HighCost(H) {}
RewritePhi(PHINode *P, unsigned I, const SCEV *Val, Instruction *ExpansionPt,
bool H)
: PN(P), Ith(I), ExpansionSCEV(Val), ExpansionPoint(ExpansionPt),
HighCost(H) {}
};
} // end anonymous namespace
@ -671,41 +671,65 @@ bool IndVarSimplify::rewriteLoopExitValues(Loop *L, SCEVExpander &Rewriter) {
hasHardUserWithinLoop(L, Inst))
continue;
// Check if expansions of this SCEV would count as being high cost.
bool HighCost = Rewriter.isHighCostExpansion(ExitValue, L, Inst);
Value *ExitVal = Rewriter.expandCodeFor(ExitValue, PN->getType(), Inst);
LLVM_DEBUG(dbgs() << "INDVARS: RLEV: AfterLoopVal = " << *ExitVal
<< '\n'
<< " LoopVal = " << *Inst << "\n");
if (!isValidRewrite(Inst, ExitVal)) {
DeadInsts.push_back(ExitVal);
continue;
}
#ifndef NDEBUG
// If we reuse an instruction from a loop which is neither L nor one of
// its containing loops, we end up breaking LCSSA form for this loop by
// creating a new use of its instruction.
if (auto *ExitInsn = dyn_cast<Instruction>(ExitVal))
if (auto *EVL = LI->getLoopFor(ExitInsn->getParent()))
if (EVL != L)
assert(EVL->contains(L) && "LCSSA breach detected!");
#endif
// Note that we must not perform expansions until after
// we query *all* the costs, because if we perform temporary expansion
// inbetween, one that we might not intend to keep, said expansion
// *may* affect cost calculation of the the next SCEV's we'll query,
// and next SCEV may errneously get smaller cost.
// Collect all the candidate PHINodes to be rewritten.
RewritePhiSet.emplace_back(PN, i, ExitVal, HighCost);
RewritePhiSet.emplace_back(PN, i, ExitValue, Inst, HighCost);
}
}
}
// Now that we've done preliminary filtering and billed all the SCEV's,
// we can perform the last sanity check - the expansion must be valid.
for (RewritePhi &Phi : RewritePhiSet) {
Phi.Expansion = Rewriter.expandCodeFor(Phi.ExpansionSCEV, Phi.PN->getType(),
Phi.ExpansionPoint);
LLVM_DEBUG(dbgs() << "rewriteLoopExitValues: AfterLoopVal = "
<< *(Phi.Expansion) << '\n'
<< " LoopVal = " << *(Phi.ExpansionPoint) << "\n");
// FIXME: isValidRewrite() is a hack. it should be an assert, eventually.
Phi.ValidRewrite = isValidRewrite(Phi.ExpansionPoint, Phi.Expansion);
if (!Phi.ValidRewrite) {
DeadInsts.push_back(Phi.Expansion);
continue;
}
#ifndef NDEBUG
// If we reuse an instruction from a loop which is neither L nor one of
// its containing loops, we end up breaking LCSSA form for this loop by
// creating a new use of its instruction.
if (auto *ExitInsn = dyn_cast<Instruction>(Phi.Expansion))
if (auto *EVL = LI->getLoopFor(ExitInsn->getParent()))
if (EVL != L)
assert(EVL->contains(L) && "LCSSA breach detected!");
#endif
}
// TODO: after isValidRewrite() is an assertion, evaluate whether
// it is beneficial to change how we calculate high-cost:
// if we have SCEV 'A' which we know we will expand, should we calculate
// the cost of other SCEV's after expanding SCEV 'A',
// thus potentially giving cost bonus to those other SCEV's?
bool LoopCanBeDel = canLoopBeDeleted(L, RewritePhiSet);
bool Changed = false;
// Transformation.
for (const RewritePhi &Phi : RewritePhiSet) {
if (!Phi.ValidRewrite)
continue;
PHINode *PN = Phi.PN;
Value *ExitVal = Phi.Val;
Value *ExitVal = Phi.Expansion;
// Only do the rewrite when the ExitValue can be expanded cheaply.
// If LoopCanBeDel is true, rewrite exit value aggressively.
@ -844,6 +868,8 @@ bool IndVarSimplify::canLoopBeDeleted(
// phase later. Skip it in the loop invariant check below.
bool found = false;
for (const RewritePhi &Phi : RewritePhiSet) {
if (!Phi.ValidRewrite)
continue;
unsigned i = Phi.Ith;
if (Phi.PN == P && (Phi.PN)->getIncomingValue(i) == Incoming) {
found = true;

View File

@ -369,7 +369,8 @@ Value *Mapper::mapValue(const Value *V) {
if (NewTy != IA->getFunctionType())
V = InlineAsm::get(NewTy, IA->getAsmString(), IA->getConstraintString(),
IA->hasSideEffects(), IA->isAlignStack());
IA->hasSideEffects(), IA->isAlignStack(),
IA->getDialect());
}
return getVM()[V] = const_cast<Value *>(V);

View File

@ -1,14 +1,14 @@
// $FreeBSD$
#define LLVM_REVISION "llvmorg-10.0.1-rc1-0-gf79cd71e145"
#define LLVM_REVISION "llvmorg-10.0.0-97-g6f71678ecd2"
#define LLVM_REPOSITORY "git@github.com:llvm/llvm-project.git"
#define CLANG_REVISION "llvmorg-10.0.1-rc1-0-gf79cd71e145"
#define CLANG_REVISION "llvmorg-10.0.0-97-g6f71678ecd2"
#define CLANG_REPOSITORY "git@github.com:llvm/llvm-project.git"
// <Upstream revision at import>-<Local identifier in __FreeBSD_version style>
#define LLD_REVISION "llvmorg-10.0.1-rc1-0-gf79cd71e145-1300007"
#define LLD_REVISION "llvmorg-10.0.0-97-g6f71678ecd2-1300007"
#define LLD_REPOSITORY "FreeBSD"
#define LLDB_REVISION "llvmorg-10.0.1-rc1-0-gf79cd71e145"
#define LLDB_REVISION "llvmorg-10.0.0-97-g6f71678ecd2"
#define LLDB_REPOSITORY "git@github.com:llvm/llvm-project.git"

View File

@ -1,3 +1,3 @@
/* $FreeBSD$ */
#define LLVM_REVISION "llvmorg-10.0.1-rc1-0-gf79cd71e145"
#define LLVM_REVISION "llvmorg-10.0.0-97-g6f71678ecd2"
#define LLVM_REPOSITORY "git@github.com:llvm/llvm-project.git"