Merge llvm, clang, compiler-rt, libc++, libunwind, lld, lldb and openmp

llvmorg-10.0.0-97-g6f71678ecd2 (not quite 10.0.1 rc2, as more fixes are
still pending).

MFC after:	3 weeks
This commit is contained in:
Dimitry Andric 2020-06-20 20:06:52 +00:00
commit e837bb5cfb
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=362445
32 changed files with 2400 additions and 242 deletions

View File

@ -531,6 +531,7 @@ llvm/lib/ExecutionEngine/PerfJITEvents/CMakeLists.txt
llvm/lib/ExecutionEngine/PerfJITEvents/LLVMBuild.txt llvm/lib/ExecutionEngine/PerfJITEvents/LLVMBuild.txt
llvm/lib/ExecutionEngine/RuntimeDyld/CMakeLists.txt llvm/lib/ExecutionEngine/RuntimeDyld/CMakeLists.txt
llvm/lib/ExecutionEngine/RuntimeDyld/LLVMBuild.txt llvm/lib/ExecutionEngine/RuntimeDyld/LLVMBuild.txt
llvm/lib/Extensions/
llvm/lib/Frontend/CMakeLists.txt llvm/lib/Frontend/CMakeLists.txt
llvm/lib/Frontend/LLVMBuild.txt llvm/lib/Frontend/LLVMBuild.txt
llvm/lib/Frontend/OpenMP/CMakeLists.txt llvm/lib/Frontend/OpenMP/CMakeLists.txt

View File

@ -3817,6 +3817,9 @@ TypeResult Sema::ActOnTagTemplateIdType(TagUseKind TUK,
SourceLocation LAngleLoc, SourceLocation LAngleLoc,
ASTTemplateArgsPtr TemplateArgsIn, ASTTemplateArgsPtr TemplateArgsIn,
SourceLocation RAngleLoc) { SourceLocation RAngleLoc) {
if (SS.isInvalid())
return TypeResult(true);
TemplateName Template = TemplateD.get(); TemplateName Template = TemplateD.get();
// Translate the parser's template argument list in our AST format. // Translate the parser's template argument list in our AST format.
@ -5925,7 +5928,9 @@ bool UnnamedLocalNoLinkageFinder::VisitDependentNameType(
bool UnnamedLocalNoLinkageFinder::VisitDependentTemplateSpecializationType( bool UnnamedLocalNoLinkageFinder::VisitDependentTemplateSpecializationType(
const DependentTemplateSpecializationType* T) { const DependentTemplateSpecializationType* T) {
return VisitNestedNameSpecifier(T->getQualifier()); if (auto *Q = T->getQualifier())
return VisitNestedNameSpecifier(Q);
return false;
} }
bool UnnamedLocalNoLinkageFinder::VisitPackExpansionType( bool UnnamedLocalNoLinkageFinder::VisitPackExpansionType(
@ -5979,6 +5984,7 @@ bool UnnamedLocalNoLinkageFinder::VisitTagDecl(const TagDecl *Tag) {
bool UnnamedLocalNoLinkageFinder::VisitNestedNameSpecifier( bool UnnamedLocalNoLinkageFinder::VisitNestedNameSpecifier(
NestedNameSpecifier *NNS) { NestedNameSpecifier *NNS) {
assert(NNS);
if (NNS->getPrefix() && VisitNestedNameSpecifier(NNS->getPrefix())) if (NNS->getPrefix() && VisitNestedNameSpecifier(NNS->getPrefix()))
return true; return true;

View File

@ -335,14 +335,38 @@ class TokenCollector::CollectPPExpansions : public PPCallbacks {
SourceRange Range, const MacroArgs *Args) override { SourceRange Range, const MacroArgs *Args) override {
if (!Collector) if (!Collector)
return; return;
// Only record top-level expansions, not those where: const auto &SM = Collector->PP.getSourceManager();
// Only record top-level expansions that directly produce expanded tokens.
// This excludes those where:
// - the macro use is inside a macro body, // - the macro use is inside a macro body,
// - the macro appears in an argument to another macro. // - the macro appears in an argument to another macro.
if (!MacroNameTok.getLocation().isFileID() || // However macro expansion isn't really a tree, it's token rewrite rules,
(LastExpansionEnd.isValid() && // so there are other cases, e.g.
Collector->PP.getSourceManager().isBeforeInTranslationUnit( // #define B(X) X
Range.getBegin(), LastExpansionEnd))) // #define A 1 + B
// A(2)
// Both A and B produce expanded tokens, though the macro name 'B' comes
// from an expansion. The best we can do is merge the mappings for both.
// The *last* token of any top-level macro expansion must be in a file.
// (In the example above, see the closing paren of the expansion of B).
if (!Range.getEnd().isFileID())
return; return;
// If there's a current expansion that encloses this one, this one can't be
// top-level.
if (LastExpansionEnd.isValid() &&
!SM.isBeforeInTranslationUnit(LastExpansionEnd, Range.getEnd()))
return;
// If the macro invocation (B) starts in a macro (A) but ends in a file,
// we'll create a merged mapping for A + B by overwriting the endpoint for
// A's startpoint.
if (!Range.getBegin().isFileID()) {
Range.setBegin(SM.getExpansionLoc(Range.getBegin()));
assert(Collector->Expansions.count(Range.getBegin().getRawEncoding()) &&
"Overlapping macros should have same expansion location");
}
Collector->Expansions[Range.getBegin().getRawEncoding()] = Range.getEnd(); Collector->Expansions[Range.getBegin().getRawEncoding()] = Range.getEnd();
LastExpansionEnd = Range.getEnd(); LastExpansionEnd = Range.getEnd();
} }
@ -399,197 +423,167 @@ class TokenCollector::Builder {
} }
TokenBuffer build() && { TokenBuffer build() && {
buildSpelledTokens();
// Walk over expanded tokens and spelled tokens in parallel, building the
// mappings between those using source locations.
// To correctly recover empty macro expansions, we also take locations
// reported to PPCallbacks::MacroExpands into account as we do not have any
// expanded tokens with source locations to guide us.
// The 'eof' token is special, it is not part of spelled token stream. We
// handle it separately at the end.
assert(!Result.ExpandedTokens.empty()); assert(!Result.ExpandedTokens.empty());
assert(Result.ExpandedTokens.back().kind() == tok::eof); assert(Result.ExpandedTokens.back().kind() == tok::eof);
for (unsigned I = 0; I < Result.ExpandedTokens.size() - 1; ++I) {
// (!) I might be updated by the following call. // Tokenize every file that contributed tokens to the expanded stream.
processExpandedToken(I); buildSpelledTokens();
// The expanded token stream consists of runs of tokens that came from
// the same source (a macro expansion, part of a file etc).
// Between these runs are the logical positions of spelled tokens that
// didn't expand to anything.
while (NextExpanded < Result.ExpandedTokens.size() - 1 /* eof */) {
// Create empty mappings for spelled tokens that expanded to nothing here.
// May advance NextSpelled, but NextExpanded is unchanged.
discard();
// Create mapping for a contiguous run of expanded tokens.
// Advances NextExpanded past the run, and NextSpelled accordingly.
unsigned OldPosition = NextExpanded;
advance();
if (NextExpanded == OldPosition)
diagnoseAdvanceFailure();
} }
// If any tokens remain in any of the files, they didn't expand to anything.
// 'eof' not handled in the loop, do it here. // Create empty mappings up until the end of the file.
assert(SM.getMainFileID() == for (const auto &File : Result.Files)
SM.getFileID(Result.ExpandedTokens.back().location())); discard(File.first);
fillGapUntil(Result.Files[SM.getMainFileID()],
Result.ExpandedTokens.back().location(),
Result.ExpandedTokens.size() - 1);
Result.Files[SM.getMainFileID()].EndExpanded = Result.ExpandedTokens.size();
// Some files might have unaccounted spelled tokens at the end, add an empty
// mapping for those as they did not have expanded counterparts.
fillGapsAtEndOfFiles();
return std::move(Result); return std::move(Result);
} }
private: private:
/// Process the next token in an expanded stream and move corresponding // Consume a sequence of spelled tokens that didn't expand to anything.
/// spelled tokens, record any mapping if needed. // In the simplest case, skips spelled tokens until finding one that produced
/// (!) \p I will be updated if this had to skip tokens, e.g. for macros. // the NextExpanded token, and creates an empty mapping for them.
void processExpandedToken(unsigned &I) { // If Drain is provided, skips remaining tokens from that file instead.
auto L = Result.ExpandedTokens[I].location(); void discard(llvm::Optional<FileID> Drain = llvm::None) {
if (L.isMacroID()) { SourceLocation Target =
processMacroExpansion(SM.getExpansionRange(L), I); Drain ? SM.getLocForEndOfFile(*Drain)
return; : SM.getExpansionLoc(
Result.ExpandedTokens[NextExpanded].location());
FileID File = SM.getFileID(Target);
const auto &SpelledTokens = Result.Files[File].SpelledTokens;
auto &NextSpelled = this->NextSpelled[File];
TokenBuffer::Mapping Mapping;
Mapping.BeginSpelled = NextSpelled;
// When dropping trailing tokens from a file, the empty mapping should
// be positioned within the file's expanded-token range (at the end).
Mapping.BeginExpanded = Mapping.EndExpanded =
Drain ? Result.Files[*Drain].EndExpanded : NextExpanded;
// We may want to split into several adjacent empty mappings.
// FlushMapping() emits the current mapping and starts a new one.
auto FlushMapping = [&, this] {
Mapping.EndSpelled = NextSpelled;
if (Mapping.BeginSpelled != Mapping.EndSpelled)
Result.Files[File].Mappings.push_back(Mapping);
Mapping.BeginSpelled = NextSpelled;
};
while (NextSpelled < SpelledTokens.size() &&
SpelledTokens[NextSpelled].location() < Target) {
// If we know mapping bounds at [NextSpelled, KnownEnd] (macro expansion)
// then we want to partition our (empty) mapping.
// [Start, NextSpelled) [NextSpelled, KnownEnd] (KnownEnd, Target)
SourceLocation KnownEnd = CollectedExpansions.lookup(
SpelledTokens[NextSpelled].location().getRawEncoding());
if (KnownEnd.isValid()) {
FlushMapping(); // Emits [Start, NextSpelled)
while (NextSpelled < SpelledTokens.size() &&
SpelledTokens[NextSpelled].location() <= KnownEnd)
++NextSpelled;
FlushMapping(); // Emits [NextSpelled, KnownEnd]
// Now the loop contitues and will emit (KnownEnd, Target).
} else {
++NextSpelled;
}
} }
if (L.isFileID()) { FlushMapping();
auto FID = SM.getFileID(L); }
TokenBuffer::MarkedFile &File = Result.Files[FID];
fillGapUntil(File, L, I); // Consumes the NextExpanded token and others that are part of the same run.
// Increases NextExpanded and NextSpelled by at least one, and adds a mapping
// (unless this is a run of file tokens, which we represent with no mapping).
void advance() {
const syntax::Token &Tok = Result.ExpandedTokens[NextExpanded];
SourceLocation Expansion = SM.getExpansionLoc(Tok.location());
FileID File = SM.getFileID(Expansion);
const auto &SpelledTokens = Result.Files[File].SpelledTokens;
auto &NextSpelled = this->NextSpelled[File];
// Skip the token. if (Tok.location().isFileID()) {
assert(File.SpelledTokens[NextSpelled[FID]].location() == L && // A run of file tokens continues while the expanded/spelled tokens match.
"no corresponding token in the spelled stream"); while (NextSpelled < SpelledTokens.size() &&
++NextSpelled[FID]; NextExpanded < Result.ExpandedTokens.size() &&
return; SpelledTokens[NextSpelled].location() ==
Result.ExpandedTokens[NextExpanded].location()) {
++NextSpelled;
++NextExpanded;
}
// We need no mapping for file tokens copied to the expanded stream.
} else {
// We found a new macro expansion. We should have its spelling bounds.
auto End = CollectedExpansions.lookup(Expansion.getRawEncoding());
assert(End.isValid() && "Macro expansion wasn't captured?");
// Mapping starts here...
TokenBuffer::Mapping Mapping;
Mapping.BeginExpanded = NextExpanded;
Mapping.BeginSpelled = NextSpelled;
// ... consumes spelled tokens within bounds we captured ...
while (NextSpelled < SpelledTokens.size() &&
SpelledTokens[NextSpelled].location() <= End)
++NextSpelled;
// ... consumes expanded tokens rooted at the same expansion ...
while (NextExpanded < Result.ExpandedTokens.size() &&
SM.getExpansionLoc(
Result.ExpandedTokens[NextExpanded].location()) == Expansion)
++NextExpanded;
// ... and ends here.
Mapping.EndExpanded = NextExpanded;
Mapping.EndSpelled = NextSpelled;
Result.Files[File].Mappings.push_back(Mapping);
} }
} }
/// Skipped expanded and spelled tokens of a macro expansion that covers \p // advance() is supposed to consume at least one token - if not, we crash.
/// SpelledRange. Add a corresponding mapping. void diagnoseAdvanceFailure() {
/// (!) \p I will be the index of the last token in an expansion after this #ifndef NDEBUG
/// function returns. // Show the failed-to-map token in context.
void processMacroExpansion(CharSourceRange SpelledRange, unsigned &I) { for (unsigned I = (NextExpanded < 10) ? 0 : NextExpanded - 10;
auto FID = SM.getFileID(SpelledRange.getBegin()); I < NextExpanded + 5 && I < Result.ExpandedTokens.size(); ++I) {
assert(FID == SM.getFileID(SpelledRange.getEnd())); const char *L =
TokenBuffer::MarkedFile &File = Result.Files[FID]; (I == NextExpanded) ? "!! " : (I < NextExpanded) ? "ok " : " ";
llvm::errs() << L << Result.ExpandedTokens[I].dumpForTests(SM) << "\n";
fillGapUntil(File, SpelledRange.getBegin(), I);
// Skip all expanded tokens from the same macro expansion.
unsigned BeginExpanded = I;
for (; I + 1 < Result.ExpandedTokens.size(); ++I) {
auto NextL = Result.ExpandedTokens[I + 1].location();
if (!NextL.isMacroID() ||
SM.getExpansionLoc(NextL) != SpelledRange.getBegin())
break;
} }
unsigned EndExpanded = I + 1; #endif
consumeMapping(File, SM.getFileOffset(SpelledRange.getEnd()), BeginExpanded, llvm_unreachable("Couldn't map expanded token to spelled tokens!");
EndExpanded, NextSpelled[FID]);
} }
/// Initializes TokenBuffer::Files and fills spelled tokens and expanded /// Initializes TokenBuffer::Files and fills spelled tokens and expanded
/// ranges for each of the files. /// ranges for each of the files.
void buildSpelledTokens() { void buildSpelledTokens() {
for (unsigned I = 0; I < Result.ExpandedTokens.size(); ++I) { for (unsigned I = 0; I < Result.ExpandedTokens.size(); ++I) {
auto FID = const auto &Tok = Result.ExpandedTokens[I];
SM.getFileID(SM.getExpansionLoc(Result.ExpandedTokens[I].location())); auto FID = SM.getFileID(SM.getExpansionLoc(Tok.location()));
auto It = Result.Files.try_emplace(FID); auto It = Result.Files.try_emplace(FID);
TokenBuffer::MarkedFile &File = It.first->second; TokenBuffer::MarkedFile &File = It.first->second;
File.EndExpanded = I + 1; // The eof token should not be considered part of the main-file's range.
File.EndExpanded = Tok.kind() == tok::eof ? I : I + 1;
if (!It.second) if (!It.second)
continue; // we have seen this file before. continue; // we have seen this file before.
// This is the first time we see this file. // This is the first time we see this file.
File.BeginExpanded = I; File.BeginExpanded = I;
File.SpelledTokens = tokenize(FID, SM, LangOpts); File.SpelledTokens = tokenize(FID, SM, LangOpts);
} }
} }
void consumeEmptyMapping(TokenBuffer::MarkedFile &File, unsigned EndOffset,
unsigned ExpandedIndex, unsigned &SpelledIndex) {
consumeMapping(File, EndOffset, ExpandedIndex, ExpandedIndex, SpelledIndex);
}
/// Consumes spelled tokens that form a macro expansion and adds a entry to
/// the resulting token buffer.
/// (!) SpelledIndex is updated in-place.
void consumeMapping(TokenBuffer::MarkedFile &File, unsigned EndOffset,
unsigned BeginExpanded, unsigned EndExpanded,
unsigned &SpelledIndex) {
// We need to record this mapping before continuing.
unsigned MappingBegin = SpelledIndex;
++SpelledIndex;
bool HitMapping =
tryConsumeSpelledUntil(File, EndOffset + 1, SpelledIndex).hasValue();
(void)HitMapping;
assert(!HitMapping && "recursive macro expansion?");
TokenBuffer::Mapping M;
M.BeginExpanded = BeginExpanded;
M.EndExpanded = EndExpanded;
M.BeginSpelled = MappingBegin;
M.EndSpelled = SpelledIndex;
File.Mappings.push_back(M);
}
/// Consumes spelled tokens until location \p L is reached and adds a mapping
/// covering the consumed tokens. The mapping will point to an empty expanded
/// range at position \p ExpandedIndex.
void fillGapUntil(TokenBuffer::MarkedFile &File, SourceLocation L,
unsigned ExpandedIndex) {
assert(L.isFileID());
FileID FID;
unsigned Offset;
std::tie(FID, Offset) = SM.getDecomposedLoc(L);
unsigned &SpelledIndex = NextSpelled[FID];
unsigned MappingBegin = SpelledIndex;
while (true) {
auto EndLoc = tryConsumeSpelledUntil(File, Offset, SpelledIndex);
if (SpelledIndex != MappingBegin) {
TokenBuffer::Mapping M;
M.BeginSpelled = MappingBegin;
M.EndSpelled = SpelledIndex;
M.BeginExpanded = M.EndExpanded = ExpandedIndex;
File.Mappings.push_back(M);
}
if (!EndLoc)
break;
consumeEmptyMapping(File, SM.getFileOffset(*EndLoc), ExpandedIndex,
SpelledIndex);
MappingBegin = SpelledIndex;
}
};
/// Consumes spelled tokens until it reaches Offset or a mapping boundary,
/// i.e. a name of a macro expansion or the start '#' token of a PP directive.
/// (!) NextSpelled is updated in place.
///
/// returns None if \p Offset was reached, otherwise returns the end location
/// of a mapping that starts at \p NextSpelled.
llvm::Optional<SourceLocation>
tryConsumeSpelledUntil(TokenBuffer::MarkedFile &File, unsigned Offset,
unsigned &NextSpelled) {
for (; NextSpelled < File.SpelledTokens.size(); ++NextSpelled) {
auto L = File.SpelledTokens[NextSpelled].location();
if (Offset <= SM.getFileOffset(L))
return llvm::None; // reached the offset we are looking for.
auto Mapping = CollectedExpansions.find(L.getRawEncoding());
if (Mapping != CollectedExpansions.end())
return Mapping->second; // found a mapping before the offset.
}
return llvm::None; // no more tokens, we "reached" the offset.
}
/// Adds empty mappings for unconsumed spelled tokens at the end of each file.
void fillGapsAtEndOfFiles() {
for (auto &F : Result.Files) {
if (F.second.SpelledTokens.empty())
continue;
fillGapUntil(F.second, F.second.SpelledTokens.back().endLocation(),
F.second.EndExpanded);
}
}
TokenBuffer Result; TokenBuffer Result;
/// For each file, a position of the next spelled token we will consume. unsigned NextExpanded = 0; // cursor in ExpandedTokens
llvm::DenseMap<FileID, unsigned> NextSpelled; llvm::DenseMap<FileID, unsigned> NextSpelled; // cursor in SpelledTokens
PPExpansions CollectedExpansions; PPExpansions CollectedExpansions;
const SourceManager &SM; const SourceManager &SM;
const LangOptions &LangOpts; const LangOptions &LangOpts;

View File

@ -2825,6 +2825,7 @@ void EmitClangAttrPCHRead(RecordKeeper &Records, raw_ostream &OS) {
if (R.isSubClassOf(InhClass)) if (R.isSubClassOf(InhClass))
OS << " bool isInherited = Record.readInt();\n"; OS << " bool isInherited = Record.readInt();\n";
OS << " bool isImplicit = Record.readInt();\n"; OS << " bool isImplicit = Record.readInt();\n";
OS << " bool isPackExpansion = Record.readInt();\n";
ArgRecords = R.getValueAsListOfDefs("Args"); ArgRecords = R.getValueAsListOfDefs("Args");
Args.clear(); Args.clear();
for (const auto *Arg : ArgRecords) { for (const auto *Arg : ArgRecords) {
@ -2840,6 +2841,7 @@ void EmitClangAttrPCHRead(RecordKeeper &Records, raw_ostream &OS) {
if (R.isSubClassOf(InhClass)) if (R.isSubClassOf(InhClass))
OS << " cast<InheritableAttr>(New)->setInherited(isInherited);\n"; OS << " cast<InheritableAttr>(New)->setInherited(isInherited);\n";
OS << " New->setImplicit(isImplicit);\n"; OS << " New->setImplicit(isImplicit);\n";
OS << " New->setPackExpansion(isPackExpansion);\n";
OS << " break;\n"; OS << " break;\n";
OS << " }\n"; OS << " }\n";
} }
@ -2866,6 +2868,7 @@ void EmitClangAttrPCHWrite(RecordKeeper &Records, raw_ostream &OS) {
if (R.isSubClassOf(InhClass)) if (R.isSubClassOf(InhClass))
OS << " Record.push_back(SA->isInherited());\n"; OS << " Record.push_back(SA->isInherited());\n";
OS << " Record.push_back(A->isImplicit());\n"; OS << " Record.push_back(A->isImplicit());\n";
OS << " Record.push_back(A->isPackExpansion());\n";
for (const auto *Arg : Args) for (const auto *Arg : Args)
createArgument(*Arg, R.getName())->writePCHWrite(OS); createArgument(*Arg, R.getName())->writePCHWrite(OS);

View File

@ -486,7 +486,9 @@ class ImportThunkChunkX86 : public ImportThunkChunk {
class ImportThunkChunkARM : public ImportThunkChunk { class ImportThunkChunkARM : public ImportThunkChunk {
public: public:
explicit ImportThunkChunkARM(Defined *s) : ImportThunkChunk(s) {} explicit ImportThunkChunkARM(Defined *s) : ImportThunkChunk(s) {
setAlignment(2);
}
size_t getSize() const override { return sizeof(importThunkARM); } size_t getSize() const override { return sizeof(importThunkARM); }
void getBaserels(std::vector<Baserel> *res) override; void getBaserels(std::vector<Baserel> *res) override;
void writeTo(uint8_t *buf) const override; void writeTo(uint8_t *buf) const override;
@ -494,14 +496,16 @@ class ImportThunkChunkARM : public ImportThunkChunk {
class ImportThunkChunkARM64 : public ImportThunkChunk { class ImportThunkChunkARM64 : public ImportThunkChunk {
public: public:
explicit ImportThunkChunkARM64(Defined *s) : ImportThunkChunk(s) {} explicit ImportThunkChunkARM64(Defined *s) : ImportThunkChunk(s) {
setAlignment(4);
}
size_t getSize() const override { return sizeof(importThunkARM64); } size_t getSize() const override { return sizeof(importThunkARM64); }
void writeTo(uint8_t *buf) const override; void writeTo(uint8_t *buf) const override;
}; };
class RangeExtensionThunkARM : public NonSectionChunk { class RangeExtensionThunkARM : public NonSectionChunk {
public: public:
explicit RangeExtensionThunkARM(Defined *t) : target(t) {} explicit RangeExtensionThunkARM(Defined *t) : target(t) { setAlignment(2); }
size_t getSize() const override; size_t getSize() const override;
void writeTo(uint8_t *buf) const override; void writeTo(uint8_t *buf) const override;

View File

@ -365,7 +365,9 @@ class TailMergeChunkX86 : public NonSectionChunk {
class ThunkChunkARM : public NonSectionChunk { class ThunkChunkARM : public NonSectionChunk {
public: public:
ThunkChunkARM(Defined *i, Chunk *tm) : imp(i), tailMerge(tm) {} ThunkChunkARM(Defined *i, Chunk *tm) : imp(i), tailMerge(tm) {
setAlignment(2);
}
size_t getSize() const override { return sizeof(thunkARM); } size_t getSize() const override { return sizeof(thunkARM); }
@ -385,7 +387,9 @@ class ThunkChunkARM : public NonSectionChunk {
class TailMergeChunkARM : public NonSectionChunk { class TailMergeChunkARM : public NonSectionChunk {
public: public:
TailMergeChunkARM(Chunk *d, Defined *h) : desc(d), helper(h) {} TailMergeChunkARM(Chunk *d, Defined *h) : desc(d), helper(h) {
setAlignment(2);
}
size_t getSize() const override { return sizeof(tailMergeARM); } size_t getSize() const override { return sizeof(tailMergeARM); }
@ -405,7 +409,9 @@ class TailMergeChunkARM : public NonSectionChunk {
class ThunkChunkARM64 : public NonSectionChunk { class ThunkChunkARM64 : public NonSectionChunk {
public: public:
ThunkChunkARM64(Defined *i, Chunk *tm) : imp(i), tailMerge(tm) {} ThunkChunkARM64(Defined *i, Chunk *tm) : imp(i), tailMerge(tm) {
setAlignment(4);
}
size_t getSize() const override { return sizeof(thunkARM64); } size_t getSize() const override { return sizeof(thunkARM64); }
@ -422,7 +428,9 @@ class ThunkChunkARM64 : public NonSectionChunk {
class TailMergeChunkARM64 : public NonSectionChunk { class TailMergeChunkARM64 : public NonSectionChunk {
public: public:
TailMergeChunkARM64(Chunk *d, Defined *h) : desc(d), helper(h) {} TailMergeChunkARM64(Chunk *d, Defined *h) : desc(d), helper(h) {
setAlignment(4);
}
size_t getSize() const override { return sizeof(tailMergeARM64); } size_t getSize() const override { return sizeof(tailMergeARM64); }

View File

@ -52,6 +52,8 @@ StringRef ScriptLexer::getLine() {
// Returns 1-based line number of the current token. // Returns 1-based line number of the current token.
size_t ScriptLexer::getLineNumber() { size_t ScriptLexer::getLineNumber() {
if (pos == 0)
return 1;
StringRef s = getCurrentMB().getBuffer(); StringRef s = getCurrentMB().getBuffer();
StringRef tok = tokens[pos - 1]; StringRef tok = tokens[pos - 1];
return s.substr(0, tok.data() - s.data()).count('\n') + 1; return s.substr(0, tok.data() - s.data()).count('\n') + 1;
@ -292,7 +294,9 @@ static bool encloses(StringRef s, StringRef t) {
MemoryBufferRef ScriptLexer::getCurrentMB() { MemoryBufferRef ScriptLexer::getCurrentMB() {
// Find input buffer containing the current token. // Find input buffer containing the current token.
assert(!mbs.empty() && pos > 0); assert(!mbs.empty());
if (pos == 0)
return mbs.back();
for (MemoryBufferRef mb : mbs) for (MemoryBufferRef mb : mbs)
if (encloses(mb.getBuffer(), tokens[pos - 1])) if (encloses(mb.getBuffer(), tokens[pos - 1]))
return mb; return mb;

View File

@ -737,6 +737,7 @@ bool ScriptParser::readSectionDirective(OutputSection *cmd, StringRef tok1, Stri
expect("("); expect("(");
if (consume("NOLOAD")) { if (consume("NOLOAD")) {
cmd->noload = true; cmd->noload = true;
cmd->type = SHT_NOBITS;
} else { } else {
skip(); // This is "COPY", "INFO" or "OVERLAY". skip(); // This is "COPY", "INFO" or "OVERLAY".
cmd->nonAlloc = true; cmd->nonAlloc = true;

View File

@ -152,6 +152,10 @@ AARCH64_CPU_NAME("kryo", ARMV8A, FK_CRYPTO_NEON_FP_ARMV8, false,
(AArch64::AEK_CRC)) (AArch64::AEK_CRC))
AARCH64_CPU_NAME("thunderx2t99", ARMV8_1A, FK_CRYPTO_NEON_FP_ARMV8, false, AARCH64_CPU_NAME("thunderx2t99", ARMV8_1A, FK_CRYPTO_NEON_FP_ARMV8, false,
(AArch64::AEK_NONE)) (AArch64::AEK_NONE))
AARCH64_CPU_NAME("thunderx3t110", ARMV8_3A, FK_CRYPTO_NEON_FP_ARMV8, false,
(AArch64::AEK_CRC | AEK_CRYPTO | AEK_FP | AEK_SIMD |
AEK_LSE | AEK_RAND | AArch64::AEK_PROFILE |
AArch64::AEK_RAS))
AARCH64_CPU_NAME("thunderx", ARMV8A, FK_CRYPTO_NEON_FP_ARMV8, false, AARCH64_CPU_NAME("thunderx", ARMV8A, FK_CRYPTO_NEON_FP_ARMV8, false,
(AArch64::AEK_CRC | AArch64::AEK_PROFILE)) (AArch64::AEK_CRC | AArch64::AEK_PROFILE))
AARCH64_CPU_NAME("thunderxt88", ARMV8A, FK_CRYPTO_NEON_FP_ARMV8, false, AARCH64_CPU_NAME("thunderxt88", ARMV8A, FK_CRYPTO_NEON_FP_ARMV8, false,

View File

@ -963,10 +963,10 @@ bool BranchFolder::TryTailMergeBlocks(MachineBasicBlock *SuccBB,
continue; continue;
} }
// If one of the blocks is the entire common tail (and not the entry // If one of the blocks is the entire common tail (and is not the entry
// block, which we can't jump to), we can treat all blocks with this same // block/an EH pad, which we can't jump to), we can treat all blocks with
// tail at once. Use PredBB if that is one of the possibilities, as that // this same tail at once. Use PredBB if that is one of the possibilities,
// will not introduce any extra branches. // as that will not introduce any extra branches.
MachineBasicBlock *EntryBB = MachineBasicBlock *EntryBB =
&MergePotentials.front().getBlock()->getParent()->front(); &MergePotentials.front().getBlock()->getParent()->front();
unsigned commonTailIndex = SameTails.size(); unsigned commonTailIndex = SameTails.size();
@ -974,19 +974,21 @@ bool BranchFolder::TryTailMergeBlocks(MachineBasicBlock *SuccBB,
// into the other. // into the other.
if (SameTails.size() == 2 && if (SameTails.size() == 2 &&
SameTails[0].getBlock()->isLayoutSuccessor(SameTails[1].getBlock()) && SameTails[0].getBlock()->isLayoutSuccessor(SameTails[1].getBlock()) &&
SameTails[1].tailIsWholeBlock()) SameTails[1].tailIsWholeBlock() && !SameTails[1].getBlock()->isEHPad())
commonTailIndex = 1; commonTailIndex = 1;
else if (SameTails.size() == 2 && else if (SameTails.size() == 2 &&
SameTails[1].getBlock()->isLayoutSuccessor( SameTails[1].getBlock()->isLayoutSuccessor(
SameTails[0].getBlock()) && SameTails[0].getBlock()) &&
SameTails[0].tailIsWholeBlock()) SameTails[0].tailIsWholeBlock() &&
!SameTails[0].getBlock()->isEHPad())
commonTailIndex = 0; commonTailIndex = 0;
else { else {
// Otherwise just pick one, favoring the fall-through predecessor if // Otherwise just pick one, favoring the fall-through predecessor if
// there is one. // there is one.
for (unsigned i = 0, e = SameTails.size(); i != e; ++i) { for (unsigned i = 0, e = SameTails.size(); i != e; ++i) {
MachineBasicBlock *MBB = SameTails[i].getBlock(); MachineBasicBlock *MBB = SameTails[i].getBlock();
if (MBB == EntryBB && SameTails[i].tailIsWholeBlock()) if ((MBB == EntryBB || MBB->isEHPad()) &&
SameTails[i].tailIsWholeBlock())
continue; continue;
if (MBB == PredBB) { if (MBB == PredBB) {
commonTailIndex = i; commonTailIndex = i;

View File

@ -443,6 +443,10 @@ def SVEUnsupported : AArch64Unsupported {
HasSVE2BitPerm]; HasSVE2BitPerm];
} }
def PAUnsupported : AArch64Unsupported {
let F = [HasPA];
}
include "AArch64SchedA53.td" include "AArch64SchedA53.td"
include "AArch64SchedA57.td" include "AArch64SchedA57.td"
include "AArch64SchedCyclone.td" include "AArch64SchedCyclone.td"
@ -453,6 +457,7 @@ include "AArch64SchedExynosM4.td"
include "AArch64SchedExynosM5.td" include "AArch64SchedExynosM5.td"
include "AArch64SchedThunderX.td" include "AArch64SchedThunderX.td"
include "AArch64SchedThunderX2T99.td" include "AArch64SchedThunderX2T99.td"
include "AArch64SchedThunderX3T110.td"
def ProcA35 : SubtargetFeature<"a35", "ARMProcFamily", "CortexA35", def ProcA35 : SubtargetFeature<"a35", "ARMProcFamily", "CortexA35",
"Cortex-A35 ARM processors", [ "Cortex-A35 ARM processors", [
@ -780,6 +785,25 @@ def ProcThunderX2T99 : SubtargetFeature<"thunderx2t99", "ARMProcFamily",
FeatureLSE, FeatureLSE,
HasV8_1aOps]>; HasV8_1aOps]>;
def ProcThunderX3T110 : SubtargetFeature<"thunderx3t110", "ARMProcFamily",
"ThunderX3T110",
"Marvell ThunderX3 processors", [
FeatureAggressiveFMA,
FeatureCRC,
FeatureCrypto,
FeatureFPARMv8,
FeatureArithmeticBccFusion,
FeatureNEON,
FeaturePostRAScheduler,
FeaturePredictableSelectIsExpensive,
FeatureLSE,
FeaturePA,
FeatureUseAA,
FeatureBalanceFPOps,
FeaturePerfMon,
FeatureStrictAlign,
HasV8_3aOps]>;
def ProcThunderX : SubtargetFeature<"thunderx", "ARMProcFamily", "ThunderX", def ProcThunderX : SubtargetFeature<"thunderx", "ARMProcFamily", "ThunderX",
"Cavium ThunderX processors", [ "Cavium ThunderX processors", [
FeatureCRC, FeatureCRC,
@ -878,6 +902,8 @@ def : ProcessorModel<"thunderxt81", ThunderXT8XModel, [ProcThunderXT81]>;
def : ProcessorModel<"thunderxt83", ThunderXT8XModel, [ProcThunderXT83]>; def : ProcessorModel<"thunderxt83", ThunderXT8XModel, [ProcThunderXT83]>;
// Cavium ThunderX2T9X Processors. Formerly Broadcom Vulcan. // Cavium ThunderX2T9X Processors. Formerly Broadcom Vulcan.
def : ProcessorModel<"thunderx2t99", ThunderX2T99Model, [ProcThunderX2T99]>; def : ProcessorModel<"thunderx2t99", ThunderX2T99Model, [ProcThunderX2T99]>;
// Marvell ThunderX3T110 Processors.
def : ProcessorModel<"thunderx3t110", ThunderX3T110Model, [ProcThunderX3T110]>;
// FIXME: HiSilicon TSV110 is currently modeled as a Cortex-A57. // FIXME: HiSilicon TSV110 is currently modeled as a Cortex-A57.
def : ProcessorModel<"tsv110", CortexA57Model, [ProcTSV110]>; def : ProcessorModel<"tsv110", CortexA57Model, [ProcTSV110]>;

View File

@ -118,9 +118,15 @@ void AArch64BranchTargets::addBTI(MachineBasicBlock &MBB, bool CouldCall,
auto MBBI = MBB.begin(); auto MBBI = MBB.begin();
// PACI[AB]SP are implicitly BTI JC, so no BTI instruction needed there. // Skip the meta instuctions, those will be removed anyway.
if (MBBI != MBB.end() && (MBBI->getOpcode() == AArch64::PACIASP || for (; MBBI != MBB.end() && MBBI->isMetaInstruction(); ++MBBI)
MBBI->getOpcode() == AArch64::PACIBSP)) ;
// SCTLR_EL1.BT[01] is set to 0 by default which means
// PACI[AB]SP are implicitly BTI C so no BTI C instruction is needed there.
if (MBBI != MBB.end() && HintNum == 34 &&
(MBBI->getOpcode() == AArch64::PACIASP ||
MBBI->getOpcode() == AArch64::PACIBSP))
return; return;
BuildMI(MBB, MBB.begin(), MBB.findDebugLoc(MBB.begin()), BuildMI(MBB, MBB.begin(), MBB.findDebugLoc(MBB.begin()),

View File

@ -26,7 +26,8 @@ def CortexA53Model : SchedMachineModel {
// v 1.0 Spreadsheet // v 1.0 Spreadsheet
let CompleteModel = 1; let CompleteModel = 1;
list<Predicate> UnsupportedFeatures = SVEUnsupported.F; list<Predicate> UnsupportedFeatures = !listconcat(SVEUnsupported.F,
PAUnsupported.F);
} }

View File

@ -31,7 +31,8 @@ def CortexA57Model : SchedMachineModel {
let LoopMicroOpBufferSize = 16; let LoopMicroOpBufferSize = 16;
let CompleteModel = 1; let CompleteModel = 1;
list<Predicate> UnsupportedFeatures = SVEUnsupported.F; list<Predicate> UnsupportedFeatures = !listconcat(SVEUnsupported.F,
PAUnsupported.F);
} }
//===----------------------------------------------------------------------===// //===----------------------------------------------------------------------===//

View File

@ -18,7 +18,8 @@ def CycloneModel : SchedMachineModel {
let MispredictPenalty = 16; // 14-19 cycles are typical. let MispredictPenalty = 16; // 14-19 cycles are typical.
let CompleteModel = 1; let CompleteModel = 1;
list<Predicate> UnsupportedFeatures = SVEUnsupported.F; list<Predicate> UnsupportedFeatures = !listconcat(SVEUnsupported.F,
PAUnsupported.F);
} }
//===----------------------------------------------------------------------===// //===----------------------------------------------------------------------===//

View File

@ -24,7 +24,8 @@ def ExynosM3Model : SchedMachineModel {
let MispredictPenalty = 16; // Minimum branch misprediction penalty. let MispredictPenalty = 16; // Minimum branch misprediction penalty.
let CompleteModel = 1; // Use the default model otherwise. let CompleteModel = 1; // Use the default model otherwise.
list<Predicate> UnsupportedFeatures = SVEUnsupported.F; list<Predicate> UnsupportedFeatures = !listconcat(SVEUnsupported.F,
PAUnsupported.F);
} }
//===----------------------------------------------------------------------===// //===----------------------------------------------------------------------===//

View File

@ -24,7 +24,8 @@ def ExynosM4Model : SchedMachineModel {
let MispredictPenalty = 16; // Minimum branch misprediction penalty. let MispredictPenalty = 16; // Minimum branch misprediction penalty.
let CompleteModel = 1; // Use the default model otherwise. let CompleteModel = 1; // Use the default model otherwise.
list<Predicate> UnsupportedFeatures = SVEUnsupported.F; list<Predicate> UnsupportedFeatures = !listconcat(SVEUnsupported.F,
PAUnsupported.F);
} }
//===----------------------------------------------------------------------===// //===----------------------------------------------------------------------===//

View File

@ -24,7 +24,8 @@ def ExynosM5Model : SchedMachineModel {
let MispredictPenalty = 15; // Minimum branch misprediction penalty. let MispredictPenalty = 15; // Minimum branch misprediction penalty.
let CompleteModel = 1; // Use the default model otherwise. let CompleteModel = 1; // Use the default model otherwise.
list<Predicate> UnsupportedFeatures = SVEUnsupported.F; list<Predicate> UnsupportedFeatures = !listconcat(SVEUnsupported.F,
PAUnsupported.F);
} }
//===----------------------------------------------------------------------===// //===----------------------------------------------------------------------===//

View File

@ -23,8 +23,8 @@ def FalkorModel : SchedMachineModel {
let MispredictPenalty = 11; // Minimum branch misprediction penalty. let MispredictPenalty = 11; // Minimum branch misprediction penalty.
let CompleteModel = 1; let CompleteModel = 1;
list<Predicate> UnsupportedFeatures = SVEUnsupported.F; list<Predicate> UnsupportedFeatures = !listconcat(SVEUnsupported.F,
PAUnsupported.F);
// FIXME: Remove when all errors have been fixed. // FIXME: Remove when all errors have been fixed.
let FullInstRWOverlapCheck = 0; let FullInstRWOverlapCheck = 0;
} }

View File

@ -27,8 +27,8 @@ def KryoModel : SchedMachineModel {
let LoopMicroOpBufferSize = 16; let LoopMicroOpBufferSize = 16;
let CompleteModel = 1; let CompleteModel = 1;
list<Predicate> UnsupportedFeatures = SVEUnsupported.F; list<Predicate> UnsupportedFeatures = !listconcat(SVEUnsupported.F,
PAUnsupported.F);
// FIXME: Remove when all errors have been fixed. // FIXME: Remove when all errors have been fixed.
let FullInstRWOverlapCheck = 0; let FullInstRWOverlapCheck = 0;
} }

View File

@ -25,8 +25,8 @@ def ThunderXT8XModel : SchedMachineModel {
let PostRAScheduler = 1; // Use PostRA scheduler. let PostRAScheduler = 1; // Use PostRA scheduler.
let CompleteModel = 1; let CompleteModel = 1;
list<Predicate> UnsupportedFeatures = SVEUnsupported.F; list<Predicate> UnsupportedFeatures = !listconcat(SVEUnsupported.F,
PAUnsupported.F);
// FIXME: Remove when all errors have been fixed. // FIXME: Remove when all errors have been fixed.
let FullInstRWOverlapCheck = 0; let FullInstRWOverlapCheck = 0;
} }

View File

@ -25,8 +25,8 @@ def ThunderX2T99Model : SchedMachineModel {
let PostRAScheduler = 1; // Using PostRA sched. let PostRAScheduler = 1; // Using PostRA sched.
let CompleteModel = 1; let CompleteModel = 1;
list<Predicate> UnsupportedFeatures = SVEUnsupported.F; list<Predicate> UnsupportedFeatures = !listconcat(SVEUnsupported.F,
PAUnsupported.F);
// FIXME: Remove when all errors have been fixed. // FIXME: Remove when all errors have been fixed.
let FullInstRWOverlapCheck = 0; let FullInstRWOverlapCheck = 0;
} }

File diff suppressed because it is too large Load Diff

View File

@ -160,6 +160,17 @@ void AArch64Subtarget::initializeProperties() {
PrefFunctionLogAlignment = 4; PrefFunctionLogAlignment = 4;
PrefLoopLogAlignment = 2; PrefLoopLogAlignment = 2;
break; break;
case ThunderX3T110:
CacheLineSize = 64;
PrefFunctionLogAlignment = 4;
PrefLoopLogAlignment = 2;
MaxInterleaveFactor = 4;
PrefetchDistance = 128;
MinPrefetchStride = 1024;
MaxPrefetchIterationsAhead = 4;
// FIXME: remove this to enable 64-bit SLP if performance looks good.
MinVectorRegisterBitWidth = 128;
break;
} }
} }

View File

@ -63,7 +63,8 @@ class AArch64Subtarget final : public AArch64GenSubtargetInfo {
ThunderXT81, ThunderXT81,
ThunderXT83, ThunderXT83,
ThunderXT88, ThunderXT88,
TSV110 TSV110,
ThunderX3T110
}; };
protected: protected:

View File

@ -404,7 +404,7 @@ void X86AsmPrinter::PrintIntelMemReference(const MachineInstr *MI,
static bool printAsmMRegister(X86AsmPrinter &P, const MachineOperand &MO, static bool printAsmMRegister(X86AsmPrinter &P, const MachineOperand &MO,
char Mode, raw_ostream &O) { char Mode, raw_ostream &O) {
Register Reg = MO.getReg(); Register Reg = MO.getReg();
bool EmitPercent = true; bool EmitPercent = MO.getParent()->getInlineAsmDialect() == InlineAsm::AD_ATT;
if (!X86::GR8RegClass.contains(Reg) && if (!X86::GR8RegClass.contains(Reg) &&
!X86::GR16RegClass.contains(Reg) && !X86::GR16RegClass.contains(Reg) &&
@ -443,6 +443,42 @@ static bool printAsmMRegister(X86AsmPrinter &P, const MachineOperand &MO,
return false; return false;
} }
static bool printAsmVRegister(X86AsmPrinter &P, const MachineOperand &MO,
char Mode, raw_ostream &O) {
unsigned Reg = MO.getReg();
bool EmitPercent = MO.getParent()->getInlineAsmDialect() == InlineAsm::AD_ATT;
unsigned Index;
if (X86::VR128XRegClass.contains(Reg))
Index = Reg - X86::XMM0;
else if (X86::VR256XRegClass.contains(Reg))
Index = Reg - X86::YMM0;
else if (X86::VR512RegClass.contains(Reg))
Index = Reg - X86::ZMM0;
else
return true;
switch (Mode) {
default: // Unknown mode.
return true;
case 'x': // Print V4SFmode register
Reg = X86::XMM0 + Index;
break;
case 't': // Print V8SFmode register
Reg = X86::YMM0 + Index;
break;
case 'g': // Print V16SFmode register
Reg = X86::ZMM0 + Index;
break;
}
if (EmitPercent)
O << '%';
O << X86ATTInstPrinter::getRegisterName(Reg);
return false;
}
/// PrintAsmOperand - Print out an operand for an inline asm expression. /// PrintAsmOperand - Print out an operand for an inline asm expression.
/// ///
bool X86AsmPrinter::PrintAsmOperand(const MachineInstr *MI, unsigned OpNo, bool X86AsmPrinter::PrintAsmOperand(const MachineInstr *MI, unsigned OpNo,
@ -517,6 +553,14 @@ bool X86AsmPrinter::PrintAsmOperand(const MachineInstr *MI, unsigned OpNo,
PrintOperand(MI, OpNo, O); PrintOperand(MI, OpNo, O);
return false; return false;
case 'x': // Print V4SFmode register
case 't': // Print V8SFmode register
case 'g': // Print V16SFmode register
if (MO.isReg())
return printAsmVRegister(*this, MO, ExtraCode[0], O);
PrintOperand(MI, OpNo, O);
return false;
case 'P': // This is the operand of a call, treat specially. case 'P': // This is the operand of a call, treat specially.
PrintPCRelImm(MI, OpNo, O); PrintPCRelImm(MI, OpNo, O);
return false; return false;

View File

@ -23319,7 +23319,8 @@ static SDValue getTargetVShiftByConstNode(unsigned Opc, const SDLoc &dl, MVT VT,
for (unsigned i = 0; i != NumElts; ++i) { for (unsigned i = 0; i != NumElts; ++i) {
SDValue CurrentOp = SrcOp->getOperand(i); SDValue CurrentOp = SrcOp->getOperand(i);
if (CurrentOp->isUndef()) { if (CurrentOp->isUndef()) {
Elts.push_back(CurrentOp); // Must produce 0s in the correct bits.
Elts.push_back(DAG.getConstant(0, dl, ElementType));
continue; continue;
} }
auto *ND = cast<ConstantSDNode>(CurrentOp); auto *ND = cast<ConstantSDNode>(CurrentOp);
@ -23331,7 +23332,8 @@ static SDValue getTargetVShiftByConstNode(unsigned Opc, const SDLoc &dl, MVT VT,
for (unsigned i = 0; i != NumElts; ++i) { for (unsigned i = 0; i != NumElts; ++i) {
SDValue CurrentOp = SrcOp->getOperand(i); SDValue CurrentOp = SrcOp->getOperand(i);
if (CurrentOp->isUndef()) { if (CurrentOp->isUndef()) {
Elts.push_back(CurrentOp); // Must produce 0s in the correct bits.
Elts.push_back(DAG.getConstant(0, dl, ElementType));
continue; continue;
} }
auto *ND = cast<ConstantSDNode>(CurrentOp); auto *ND = cast<ConstantSDNode>(CurrentOp);
@ -23343,7 +23345,8 @@ static SDValue getTargetVShiftByConstNode(unsigned Opc, const SDLoc &dl, MVT VT,
for (unsigned i = 0; i != NumElts; ++i) { for (unsigned i = 0; i != NumElts; ++i) {
SDValue CurrentOp = SrcOp->getOperand(i); SDValue CurrentOp = SrcOp->getOperand(i);
if (CurrentOp->isUndef()) { if (CurrentOp->isUndef()) {
Elts.push_back(CurrentOp); // All shifted in bits must be the same so use 0.
Elts.push_back(DAG.getConstant(0, dl, ElementType));
continue; continue;
} }
auto *ND = cast<ConstantSDNode>(CurrentOp); auto *ND = cast<ConstantSDNode>(CurrentOp);
@ -39699,14 +39702,22 @@ static SDValue combineVectorShiftImm(SDNode *N, SelectionDAG &DAG,
getTargetConstantBitsFromNode(N0, NumBitsPerElt, UndefElts, EltBits)) { getTargetConstantBitsFromNode(N0, NumBitsPerElt, UndefElts, EltBits)) {
assert(EltBits.size() == VT.getVectorNumElements() && assert(EltBits.size() == VT.getVectorNumElements() &&
"Unexpected shift value type"); "Unexpected shift value type");
for (APInt &Elt : EltBits) { // Undef elements need to fold to 0. It's possible SimplifyDemandedBits
if (X86ISD::VSHLI == Opcode) // created an undef input due to no input bits being demanded, but user
// still expects 0 in other bits.
for (unsigned i = 0, e = EltBits.size(); i != e; ++i) {
APInt &Elt = EltBits[i];
if (UndefElts[i])
Elt = 0;
else if (X86ISD::VSHLI == Opcode)
Elt <<= ShiftVal; Elt <<= ShiftVal;
else if (X86ISD::VSRAI == Opcode) else if (X86ISD::VSRAI == Opcode)
Elt.ashrInPlace(ShiftVal); Elt.ashrInPlace(ShiftVal);
else else
Elt.lshrInPlace(ShiftVal); Elt.lshrInPlace(ShiftVal);
} }
// Reset undef elements since they were zeroed above.
UndefElts = 0;
return getConstVector(EltBits, UndefElts, VT.getSimpleVT(), DAG, SDLoc(N)); return getConstVector(EltBits, UndefElts, VT.getSimpleVT(), DAG, SDLoc(N));
} }

View File

@ -3956,6 +3956,8 @@ static bool ExpandMOVImmSExti8(MachineInstrBuilder &MIB,
BuildMI(MBB, I, DL, TII.get(X86::PUSH32i8)).addImm(Imm); BuildMI(MBB, I, DL, TII.get(X86::PUSH32i8)).addImm(Imm);
MIB->setDesc(TII.get(X86::POP32r)); MIB->setDesc(TII.get(X86::POP32r));
} }
MIB->RemoveOperand(1);
MIB->addImplicitDefUseOperands(*MBB.getParent());
// Build CFI if necessary. // Build CFI if necessary.
MachineFunction &MF = *MBB.getParent(); MachineFunction &MF = *MBB.getParent();

View File

@ -527,19 +527,19 @@ namespace {
// Collect information about PHI nodes which can be transformed in // Collect information about PHI nodes which can be transformed in
// rewriteLoopExitValues. // rewriteLoopExitValues.
struct RewritePhi { struct RewritePhi {
PHINode *PN; PHINode *PN; // For which PHI node is this replacement?
unsigned Ith; // For which incoming value?
const SCEV *ExpansionSCEV; // The SCEV of the incoming value we are rewriting.
Instruction *ExpansionPoint; // Where we'd like to expand that SCEV?
bool HighCost; // Is this expansion a high-cost?
// Ith incoming value. Value *Expansion = nullptr;
unsigned Ith; bool ValidRewrite = false;
// Exit value after expansion. RewritePhi(PHINode *P, unsigned I, const SCEV *Val, Instruction *ExpansionPt,
Value *Val; bool H)
: PN(P), Ith(I), ExpansionSCEV(Val), ExpansionPoint(ExpansionPt),
// High Cost when expansion. HighCost(H) {}
bool HighCost;
RewritePhi(PHINode *P, unsigned I, Value *V, bool H)
: PN(P), Ith(I), Val(V), HighCost(H) {}
}; };
} // end anonymous namespace } // end anonymous namespace
@ -671,41 +671,65 @@ bool IndVarSimplify::rewriteLoopExitValues(Loop *L, SCEVExpander &Rewriter) {
hasHardUserWithinLoop(L, Inst)) hasHardUserWithinLoop(L, Inst))
continue; continue;
// Check if expansions of this SCEV would count as being high cost.
bool HighCost = Rewriter.isHighCostExpansion(ExitValue, L, Inst); bool HighCost = Rewriter.isHighCostExpansion(ExitValue, L, Inst);
Value *ExitVal = Rewriter.expandCodeFor(ExitValue, PN->getType(), Inst);
LLVM_DEBUG(dbgs() << "INDVARS: RLEV: AfterLoopVal = " << *ExitVal // Note that we must not perform expansions until after
<< '\n' // we query *all* the costs, because if we perform temporary expansion
<< " LoopVal = " << *Inst << "\n"); // inbetween, one that we might not intend to keep, said expansion
// *may* affect cost calculation of the the next SCEV's we'll query,
if (!isValidRewrite(Inst, ExitVal)) { // and next SCEV may errneously get smaller cost.
DeadInsts.push_back(ExitVal);
continue;
}
#ifndef NDEBUG
// If we reuse an instruction from a loop which is neither L nor one of
// its containing loops, we end up breaking LCSSA form for this loop by
// creating a new use of its instruction.
if (auto *ExitInsn = dyn_cast<Instruction>(ExitVal))
if (auto *EVL = LI->getLoopFor(ExitInsn->getParent()))
if (EVL != L)
assert(EVL->contains(L) && "LCSSA breach detected!");
#endif
// Collect all the candidate PHINodes to be rewritten. // Collect all the candidate PHINodes to be rewritten.
RewritePhiSet.emplace_back(PN, i, ExitVal, HighCost); RewritePhiSet.emplace_back(PN, i, ExitValue, Inst, HighCost);
} }
} }
} }
// Now that we've done preliminary filtering and billed all the SCEV's,
// we can perform the last sanity check - the expansion must be valid.
for (RewritePhi &Phi : RewritePhiSet) {
Phi.Expansion = Rewriter.expandCodeFor(Phi.ExpansionSCEV, Phi.PN->getType(),
Phi.ExpansionPoint);
LLVM_DEBUG(dbgs() << "rewriteLoopExitValues: AfterLoopVal = "
<< *(Phi.Expansion) << '\n'
<< " LoopVal = " << *(Phi.ExpansionPoint) << "\n");
// FIXME: isValidRewrite() is a hack. it should be an assert, eventually.
Phi.ValidRewrite = isValidRewrite(Phi.ExpansionPoint, Phi.Expansion);
if (!Phi.ValidRewrite) {
DeadInsts.push_back(Phi.Expansion);
continue;
}
#ifndef NDEBUG
// If we reuse an instruction from a loop which is neither L nor one of
// its containing loops, we end up breaking LCSSA form for this loop by
// creating a new use of its instruction.
if (auto *ExitInsn = dyn_cast<Instruction>(Phi.Expansion))
if (auto *EVL = LI->getLoopFor(ExitInsn->getParent()))
if (EVL != L)
assert(EVL->contains(L) && "LCSSA breach detected!");
#endif
}
// TODO: after isValidRewrite() is an assertion, evaluate whether
// it is beneficial to change how we calculate high-cost:
// if we have SCEV 'A' which we know we will expand, should we calculate
// the cost of other SCEV's after expanding SCEV 'A',
// thus potentially giving cost bonus to those other SCEV's?
bool LoopCanBeDel = canLoopBeDeleted(L, RewritePhiSet); bool LoopCanBeDel = canLoopBeDeleted(L, RewritePhiSet);
bool Changed = false; bool Changed = false;
// Transformation. // Transformation.
for (const RewritePhi &Phi : RewritePhiSet) { for (const RewritePhi &Phi : RewritePhiSet) {
if (!Phi.ValidRewrite)
continue;
PHINode *PN = Phi.PN; PHINode *PN = Phi.PN;
Value *ExitVal = Phi.Val; Value *ExitVal = Phi.Expansion;
// Only do the rewrite when the ExitValue can be expanded cheaply. // Only do the rewrite when the ExitValue can be expanded cheaply.
// If LoopCanBeDel is true, rewrite exit value aggressively. // If LoopCanBeDel is true, rewrite exit value aggressively.
@ -844,6 +868,8 @@ bool IndVarSimplify::canLoopBeDeleted(
// phase later. Skip it in the loop invariant check below. // phase later. Skip it in the loop invariant check below.
bool found = false; bool found = false;
for (const RewritePhi &Phi : RewritePhiSet) { for (const RewritePhi &Phi : RewritePhiSet) {
if (!Phi.ValidRewrite)
continue;
unsigned i = Phi.Ith; unsigned i = Phi.Ith;
if (Phi.PN == P && (Phi.PN)->getIncomingValue(i) == Incoming) { if (Phi.PN == P && (Phi.PN)->getIncomingValue(i) == Incoming) {
found = true; found = true;

View File

@ -369,7 +369,8 @@ Value *Mapper::mapValue(const Value *V) {
if (NewTy != IA->getFunctionType()) if (NewTy != IA->getFunctionType())
V = InlineAsm::get(NewTy, IA->getAsmString(), IA->getConstraintString(), V = InlineAsm::get(NewTy, IA->getAsmString(), IA->getConstraintString(),
IA->hasSideEffects(), IA->isAlignStack()); IA->hasSideEffects(), IA->isAlignStack(),
IA->getDialect());
} }
return getVM()[V] = const_cast<Value *>(V); return getVM()[V] = const_cast<Value *>(V);

View File

@ -1,14 +1,14 @@
// $FreeBSD$ // $FreeBSD$
#define LLVM_REVISION "llvmorg-10.0.1-rc1-0-gf79cd71e145" #define LLVM_REVISION "llvmorg-10.0.0-97-g6f71678ecd2"
#define LLVM_REPOSITORY "git@github.com:llvm/llvm-project.git" #define LLVM_REPOSITORY "git@github.com:llvm/llvm-project.git"
#define CLANG_REVISION "llvmorg-10.0.1-rc1-0-gf79cd71e145" #define CLANG_REVISION "llvmorg-10.0.0-97-g6f71678ecd2"
#define CLANG_REPOSITORY "git@github.com:llvm/llvm-project.git" #define CLANG_REPOSITORY "git@github.com:llvm/llvm-project.git"
// <Upstream revision at import>-<Local identifier in __FreeBSD_version style> // <Upstream revision at import>-<Local identifier in __FreeBSD_version style>
#define LLD_REVISION "llvmorg-10.0.1-rc1-0-gf79cd71e145-1300007" #define LLD_REVISION "llvmorg-10.0.0-97-g6f71678ecd2-1300007"
#define LLD_REPOSITORY "FreeBSD" #define LLD_REPOSITORY "FreeBSD"
#define LLDB_REVISION "llvmorg-10.0.1-rc1-0-gf79cd71e145" #define LLDB_REVISION "llvmorg-10.0.0-97-g6f71678ecd2"
#define LLDB_REPOSITORY "git@github.com:llvm/llvm-project.git" #define LLDB_REPOSITORY "git@github.com:llvm/llvm-project.git"

View File

@ -1,3 +1,3 @@
/* $FreeBSD$ */ /* $FreeBSD$ */
#define LLVM_REVISION "llvmorg-10.0.1-rc1-0-gf79cd71e145" #define LLVM_REVISION "llvmorg-10.0.0-97-g6f71678ecd2"
#define LLVM_REPOSITORY "git@github.com:llvm/llvm-project.git" #define LLVM_REPOSITORY "git@github.com:llvm/llvm-project.git"