Upgrade our copies of clang, llvm, lld, lldb, compiler-rt and libc++ to
6.0.0 (branches/release_60 r323338). MFC after: 3 months X-MFC-With: r327952 PR: 224669
This commit is contained in:
commit
042b1c2ef5
@ -33,6 +33,11 @@ uintptr_t GetCurrentProcess(void);
|
||||
#include <machine/sysarch.h>
|
||||
#endif
|
||||
|
||||
#if defined(__OpenBSD__) && defined(__mips__)
|
||||
#include <sys/types.h>
|
||||
#include <machine/sysarch.h>
|
||||
#endif
|
||||
|
||||
#if defined(__linux__) && defined(__mips__)
|
||||
#include <sys/cachectl.h>
|
||||
#include <sys/syscall.h>
|
||||
@ -142,6 +147,8 @@ void __clear_cache(void *start, void *end) {
|
||||
#else
|
||||
syscall(__NR_cacheflush, start, (end_int - start_int), BCACHE);
|
||||
#endif
|
||||
#elif defined(__mips__) && defined(__OpenBSD__)
|
||||
cacheflush(start, (uintptr_t)end - (uintptr_t)start, BCACHE);
|
||||
#elif defined(__aarch64__) && !defined(__APPLE__)
|
||||
uint64_t xstart = (uint64_t)(uintptr_t) start;
|
||||
uint64_t xend = (uint64_t)(uintptr_t) end;
|
||||
@ -156,12 +163,14 @@ void __clear_cache(void *start, void *end) {
|
||||
* uintptr_t in case this runs in an IPL32 environment.
|
||||
*/
|
||||
const size_t dcache_line_size = 4 << ((ctr_el0 >> 16) & 15);
|
||||
for (addr = xstart; addr < xend; addr += dcache_line_size)
|
||||
for (addr = xstart & ~(dcache_line_size - 1); addr < xend;
|
||||
addr += dcache_line_size)
|
||||
__asm __volatile("dc cvau, %0" :: "r"(addr));
|
||||
__asm __volatile("dsb ish");
|
||||
|
||||
const size_t icache_line_size = 4 << ((ctr_el0 >> 0) & 15);
|
||||
for (addr = xstart; addr < xend; addr += icache_line_size)
|
||||
for (addr = xstart & ~(icache_line_size - 1); addr < xend;
|
||||
addr += icache_line_size)
|
||||
__asm __volatile("ic ivau, %0" :: "r"(addr));
|
||||
__asm __volatile("isb sy");
|
||||
#elif defined (__powerpc64__)
|
||||
|
@ -254,23 +254,23 @@ std::string RegionBase<Tr>::getNameStr() const {
|
||||
template <class Tr>
|
||||
void RegionBase<Tr>::verifyBBInRegion(BlockT *BB) const {
|
||||
if (!contains(BB))
|
||||
llvm_unreachable("Broken region found: enumerated BB not in region!");
|
||||
report_fatal_error("Broken region found: enumerated BB not in region!");
|
||||
|
||||
BlockT *entry = getEntry(), *exit = getExit();
|
||||
|
||||
for (BlockT *Succ :
|
||||
make_range(BlockTraits::child_begin(BB), BlockTraits::child_end(BB))) {
|
||||
if (!contains(Succ) && exit != Succ)
|
||||
llvm_unreachable("Broken region found: edges leaving the region must go "
|
||||
"to the exit node!");
|
||||
report_fatal_error("Broken region found: edges leaving the region must go "
|
||||
"to the exit node!");
|
||||
}
|
||||
|
||||
if (entry != BB) {
|
||||
for (BlockT *Pred : make_range(InvBlockTraits::child_begin(BB),
|
||||
InvBlockTraits::child_end(BB))) {
|
||||
if (!contains(Pred))
|
||||
llvm_unreachable("Broken region found: edges entering the region must "
|
||||
"go to the entry node!");
|
||||
report_fatal_error("Broken region found: edges entering the region must "
|
||||
"go to the entry node!");
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -557,7 +557,7 @@ void RegionInfoBase<Tr>::verifyBBMap(const RegionT *R) const {
|
||||
} else {
|
||||
BlockT *BB = Element->template getNodeAs<BlockT>();
|
||||
if (getRegionFor(BB) != R)
|
||||
llvm_unreachable("BB map does not match region nesting");
|
||||
report_fatal_error("BB map does not match region nesting");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -56,7 +56,7 @@ class BaseIndexOffset {
|
||||
int64_t &Off);
|
||||
|
||||
/// Parses tree in Ptr for base, index, offset addresses.
|
||||
static BaseIndexOffset match(SDValue Ptr, const SelectionDAG &DAG);
|
||||
static BaseIndexOffset match(LSBaseSDNode *N, const SelectionDAG &DAG);
|
||||
};
|
||||
|
||||
} // end namespace llvm
|
||||
|
@ -177,13 +177,7 @@ class CodeViewContext {
|
||||
unsigned IACol);
|
||||
|
||||
/// Retreive the function info if this is a valid function id, or nullptr.
|
||||
MCCVFunctionInfo *getCVFunctionInfo(unsigned FuncId) {
|
||||
if (FuncId >= Functions.size())
|
||||
return nullptr;
|
||||
if (Functions[FuncId].isUnallocatedFunctionInfo())
|
||||
return nullptr;
|
||||
return &Functions[FuncId];
|
||||
}
|
||||
MCCVFunctionInfo *getCVFunctionInfo(unsigned FuncId);
|
||||
|
||||
/// Saves the information from the currently parsed .cv_loc directive
|
||||
/// and sets CVLocSeen. When the next instruction is assembled an entry
|
||||
@ -199,50 +193,22 @@ class CodeViewContext {
|
||||
CurrentCVLoc.setIsStmt(IsStmt);
|
||||
CVLocSeen = true;
|
||||
}
|
||||
void clearCVLocSeen() { CVLocSeen = false; }
|
||||
|
||||
bool getCVLocSeen() { return CVLocSeen; }
|
||||
void clearCVLocSeen() { CVLocSeen = false; }
|
||||
|
||||
const MCCVLoc &getCurrentCVLoc() { return CurrentCVLoc; }
|
||||
|
||||
bool isValidCVFileNumber(unsigned FileNumber);
|
||||
|
||||
/// \brief Add a line entry.
|
||||
void addLineEntry(const MCCVLineEntry &LineEntry) {
|
||||
size_t Offset = MCCVLines.size();
|
||||
auto I = MCCVLineStartStop.insert(
|
||||
{LineEntry.getFunctionId(), {Offset, Offset + 1}});
|
||||
if (!I.second)
|
||||
I.first->second.second = Offset + 1;
|
||||
MCCVLines.push_back(LineEntry);
|
||||
}
|
||||
void addLineEntry(const MCCVLineEntry &LineEntry);
|
||||
|
||||
std::vector<MCCVLineEntry> getFunctionLineEntries(unsigned FuncId) {
|
||||
std::vector<MCCVLineEntry> FilteredLines;
|
||||
std::vector<MCCVLineEntry> getFunctionLineEntries(unsigned FuncId);
|
||||
|
||||
auto I = MCCVLineStartStop.find(FuncId);
|
||||
if (I != MCCVLineStartStop.end())
|
||||
for (size_t Idx = I->second.first, End = I->second.second; Idx != End;
|
||||
++Idx)
|
||||
if (MCCVLines[Idx].getFunctionId() == FuncId)
|
||||
FilteredLines.push_back(MCCVLines[Idx]);
|
||||
return FilteredLines;
|
||||
}
|
||||
std::pair<size_t, size_t> getLineExtent(unsigned FuncId);
|
||||
|
||||
std::pair<size_t, size_t> getLineExtent(unsigned FuncId) {
|
||||
auto I = MCCVLineStartStop.find(FuncId);
|
||||
// Return an empty extent if there are no cv_locs for this function id.
|
||||
if (I == MCCVLineStartStop.end())
|
||||
return {~0ULL, 0};
|
||||
return I->second;
|
||||
}
|
||||
|
||||
ArrayRef<MCCVLineEntry> getLinesForExtent(size_t L, size_t R) {
|
||||
if (R <= L)
|
||||
return None;
|
||||
if (L >= MCCVLines.size())
|
||||
return None;
|
||||
return makeArrayRef(&MCCVLines[L], R - L);
|
||||
}
|
||||
ArrayRef<MCCVLineEntry> getLinesForExtent(size_t L, size_t R);
|
||||
|
||||
/// Emits a line table substream.
|
||||
void emitLineTableForFunction(MCObjectStreamer &OS, unsigned FuncId,
|
||||
|
@ -628,7 +628,7 @@ struct SemiNCAInfo {
|
||||
DecreasingLevel>
|
||||
Bucket; // Queue of tree nodes sorted by level in descending order.
|
||||
SmallDenseSet<TreeNodePtr, 8> Affected;
|
||||
SmallDenseSet<TreeNodePtr, 8> Visited;
|
||||
SmallDenseMap<TreeNodePtr, unsigned, 8> Visited;
|
||||
SmallVector<TreeNodePtr, 8> AffectedQueue;
|
||||
SmallVector<TreeNodePtr, 8> VisitedNotAffectedQueue;
|
||||
};
|
||||
@ -706,7 +706,7 @@ struct SemiNCAInfo {
|
||||
// algorithm does not really know or use the set of roots and can make a
|
||||
// different (implicit) decision about which nodes within an infinite loop
|
||||
// becomes a root.
|
||||
if (DT.isVirtualRoot(TN->getIDom())) {
|
||||
if (TN && !DT.isVirtualRoot(TN->getIDom())) {
|
||||
DEBUG(dbgs() << "Root " << BlockNamePrinter(R)
|
||||
<< " is not virtual root's child\n"
|
||||
<< "The entire tree needs to be rebuilt\n");
|
||||
@ -753,14 +753,16 @@ struct SemiNCAInfo {
|
||||
|
||||
while (!II.Bucket.empty()) {
|
||||
const TreeNodePtr CurrentNode = II.Bucket.top().second;
|
||||
const unsigned CurrentLevel = CurrentNode->getLevel();
|
||||
II.Bucket.pop();
|
||||
DEBUG(dbgs() << "\tAdding to Visited and AffectedQueue: "
|
||||
<< BlockNamePrinter(CurrentNode) << "\n");
|
||||
II.Visited.insert(CurrentNode);
|
||||
|
||||
II.Visited.insert({CurrentNode, CurrentLevel});
|
||||
II.AffectedQueue.push_back(CurrentNode);
|
||||
|
||||
// Discover and collect affected successors of the current node.
|
||||
VisitInsertion(DT, BUI, CurrentNode, CurrentNode->getLevel(), NCD, II);
|
||||
VisitInsertion(DT, BUI, CurrentNode, CurrentLevel, NCD, II);
|
||||
}
|
||||
|
||||
// Finish by updating immediate dominators and levels.
|
||||
@ -772,13 +774,17 @@ struct SemiNCAInfo {
|
||||
const TreeNodePtr TN, const unsigned RootLevel,
|
||||
const TreeNodePtr NCD, InsertionInfo &II) {
|
||||
const unsigned NCDLevel = NCD->getLevel();
|
||||
DEBUG(dbgs() << "Visiting " << BlockNamePrinter(TN) << "\n");
|
||||
DEBUG(dbgs() << "Visiting " << BlockNamePrinter(TN) << ", RootLevel "
|
||||
<< RootLevel << "\n");
|
||||
|
||||
SmallVector<TreeNodePtr, 8> Stack = {TN};
|
||||
assert(TN->getBlock() && II.Visited.count(TN) && "Preconditions!");
|
||||
|
||||
SmallPtrSet<TreeNodePtr, 8> Processed;
|
||||
|
||||
do {
|
||||
TreeNodePtr Next = Stack.pop_back_val();
|
||||
DEBUG(dbgs() << " Next: " << BlockNamePrinter(Next) << "\n");
|
||||
|
||||
for (const NodePtr Succ :
|
||||
ChildrenGetter<IsPostDom>::Get(Next->getBlock(), BUI)) {
|
||||
@ -786,19 +792,31 @@ struct SemiNCAInfo {
|
||||
assert(SuccTN && "Unreachable successor found at reachable insertion");
|
||||
const unsigned SuccLevel = SuccTN->getLevel();
|
||||
|
||||
DEBUG(dbgs() << "\tSuccessor " << BlockNamePrinter(Succ)
|
||||
<< ", level = " << SuccLevel << "\n");
|
||||
DEBUG(dbgs() << "\tSuccessor " << BlockNamePrinter(Succ) << ", level = "
|
||||
<< SuccLevel << "\n");
|
||||
|
||||
// Do not process the same node multiple times.
|
||||
if (Processed.count(Next) > 0)
|
||||
continue;
|
||||
|
||||
// Succ dominated by subtree From -- not affected.
|
||||
// (Based on the lemma 2.5 from the second paper.)
|
||||
if (SuccLevel > RootLevel) {
|
||||
DEBUG(dbgs() << "\t\tDominated by subtree From\n");
|
||||
if (II.Visited.count(SuccTN) != 0)
|
||||
continue;
|
||||
if (II.Visited.count(SuccTN) != 0) {
|
||||
DEBUG(dbgs() << "\t\t\talready visited at level "
|
||||
<< II.Visited[SuccTN] << "\n\t\t\tcurrent level "
|
||||
<< RootLevel << ")\n");
|
||||
|
||||
// A node can be necessary to visit again if we see it again at
|
||||
// a lower level than before.
|
||||
if (II.Visited[SuccTN] >= RootLevel)
|
||||
continue;
|
||||
}
|
||||
|
||||
DEBUG(dbgs() << "\t\tMarking visited not affected "
|
||||
<< BlockNamePrinter(Succ) << "\n");
|
||||
II.Visited.insert(SuccTN);
|
||||
II.Visited.insert({SuccTN, RootLevel});
|
||||
II.VisitedNotAffectedQueue.push_back(SuccTN);
|
||||
Stack.push_back(SuccTN);
|
||||
} else if ((SuccLevel > NCDLevel + 1) &&
|
||||
@ -809,6 +827,8 @@ struct SemiNCAInfo {
|
||||
II.Bucket.push({SuccLevel, SuccTN});
|
||||
}
|
||||
}
|
||||
|
||||
Processed.insert(Next);
|
||||
} while (!Stack.empty());
|
||||
}
|
||||
|
||||
@ -920,21 +940,21 @@ struct SemiNCAInfo {
|
||||
const NodePtr NCDBlock = DT.findNearestCommonDominator(From, To);
|
||||
const TreeNodePtr NCD = DT.getNode(NCDBlock);
|
||||
|
||||
// To dominates From -- nothing to do.
|
||||
if (ToTN == NCD) return;
|
||||
// If To dominates From -- nothing to do.
|
||||
if (ToTN != NCD) {
|
||||
DT.DFSInfoValid = false;
|
||||
|
||||
DT.DFSInfoValid = false;
|
||||
const TreeNodePtr ToIDom = ToTN->getIDom();
|
||||
DEBUG(dbgs() << "\tNCD " << BlockNamePrinter(NCD) << ", ToIDom "
|
||||
<< BlockNamePrinter(ToIDom) << "\n");
|
||||
|
||||
const TreeNodePtr ToIDom = ToTN->getIDom();
|
||||
DEBUG(dbgs() << "\tNCD " << BlockNamePrinter(NCD) << ", ToIDom "
|
||||
<< BlockNamePrinter(ToIDom) << "\n");
|
||||
|
||||
// To remains reachable after deletion.
|
||||
// (Based on the caption under Figure 4. from the second paper.)
|
||||
if (FromTN != ToIDom || HasProperSupport(DT, BUI, ToTN))
|
||||
DeleteReachable(DT, BUI, FromTN, ToTN);
|
||||
else
|
||||
DeleteUnreachable(DT, BUI, ToTN);
|
||||
// To remains reachable after deletion.
|
||||
// (Based on the caption under Figure 4. from the second paper.)
|
||||
if (FromTN != ToIDom || HasProperSupport(DT, BUI, ToTN))
|
||||
DeleteReachable(DT, BUI, FromTN, ToTN);
|
||||
else
|
||||
DeleteUnreachable(DT, BUI, ToTN);
|
||||
}
|
||||
|
||||
if (IsPostDom) UpdateRootsAfterUpdate(DT, BUI);
|
||||
}
|
||||
|
@ -577,7 +577,8 @@ bool GlobalMerge::doInitialization(Module &M) {
|
||||
for (auto &GV : M.globals()) {
|
||||
// Merge is safe for "normal" internal or external globals only
|
||||
if (GV.isDeclaration() || GV.isThreadLocal() ||
|
||||
GV.hasSection() || GV.hasImplicitSection())
|
||||
GV.hasSection() || GV.hasImplicitSection() ||
|
||||
GV.hasDLLExportStorageClass())
|
||||
continue;
|
||||
|
||||
// It's not safe to merge globals that may be preempted
|
||||
|
@ -719,15 +719,14 @@ bool PeepholeOptimizer::findNextSource(unsigned Reg, unsigned SubReg,
|
||||
CurSrcPair = Pair;
|
||||
ValueTracker ValTracker(CurSrcPair.Reg, CurSrcPair.SubReg, *MRI,
|
||||
!DisableAdvCopyOpt, TII);
|
||||
ValueTrackerResult Res;
|
||||
bool ShouldRewrite = false;
|
||||
|
||||
do {
|
||||
// Follow the chain of copies until we reach the top of the use-def chain
|
||||
// or find a more suitable source.
|
||||
Res = ValTracker.getNextSource();
|
||||
// Follow the chain of copies until we find a more suitable source, a phi
|
||||
// or have to abort.
|
||||
while (true) {
|
||||
ValueTrackerResult Res = ValTracker.getNextSource();
|
||||
// Abort at the end of a chain (without finding a suitable source).
|
||||
if (!Res.isValid())
|
||||
break;
|
||||
return false;
|
||||
|
||||
// Insert the Def -> Use entry for the recently found source.
|
||||
ValueTrackerResult CurSrcRes = RewriteMap.lookup(CurSrcPair);
|
||||
@ -763,24 +762,19 @@ bool PeepholeOptimizer::findNextSource(unsigned Reg, unsigned SubReg,
|
||||
if (TargetRegisterInfo::isPhysicalRegister(CurSrcPair.Reg))
|
||||
return false;
|
||||
|
||||
// Keep following the chain if the value isn't any better yet.
|
||||
const TargetRegisterClass *SrcRC = MRI->getRegClass(CurSrcPair.Reg);
|
||||
ShouldRewrite = TRI->shouldRewriteCopySrc(DefRC, SubReg, SrcRC,
|
||||
CurSrcPair.SubReg);
|
||||
} while (!ShouldRewrite);
|
||||
if (!TRI->shouldRewriteCopySrc(DefRC, SubReg, SrcRC, CurSrcPair.SubReg))
|
||||
continue;
|
||||
|
||||
// Continue looking for new sources...
|
||||
if (Res.isValid())
|
||||
continue;
|
||||
// We currently cannot deal with subreg operands on PHI instructions
|
||||
// (see insertPHI()).
|
||||
if (PHICount > 0 && CurSrcPair.SubReg != 0)
|
||||
continue;
|
||||
|
||||
// Do not continue searching for a new source if the there's at least
|
||||
// one use-def which cannot be rewritten.
|
||||
if (!ShouldRewrite)
|
||||
return false;
|
||||
}
|
||||
|
||||
if (PHICount >= RewritePHILimit) {
|
||||
DEBUG(dbgs() << "findNextSource: PHI limit reached\n");
|
||||
return false;
|
||||
// We found a suitable source, and are done with this chain.
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
// If we did not find a more suitable source, there is nothing to optimize.
|
||||
@ -799,6 +793,9 @@ insertPHI(MachineRegisterInfo *MRI, const TargetInstrInfo *TII,
|
||||
assert(!SrcRegs.empty() && "No sources to create a PHI instruction?");
|
||||
|
||||
const TargetRegisterClass *NewRC = MRI->getRegClass(SrcRegs[0].Reg);
|
||||
// NewRC is only correct if no subregisters are involved. findNextSource()
|
||||
// should have rejected those cases already.
|
||||
assert(SrcRegs[0].SubReg == 0 && "should not have subreg operand");
|
||||
unsigned NewVR = MRI->createVirtualRegister(NewRC);
|
||||
MachineBasicBlock *MBB = OrigPHI->getParent();
|
||||
MachineInstrBuilder MIB = BuildMI(*MBB, OrigPHI, OrigPHI->getDebugLoc(),
|
||||
|
@ -3842,9 +3842,16 @@ bool DAGCombiner::SearchForAndLoads(SDNode *N,
|
||||
EVT ExtVT;
|
||||
if (isAndLoadExtLoad(Mask, Load, Load->getValueType(0), ExtVT) &&
|
||||
isLegalNarrowLoad(Load, ISD::ZEXTLOAD, ExtVT)) {
|
||||
// Only add this load if we can make it more narrow.
|
||||
if (ExtVT.bitsLT(Load->getMemoryVT()))
|
||||
|
||||
// ZEXTLOAD is already small enough.
|
||||
if (Load->getExtensionType() == ISD::ZEXTLOAD &&
|
||||
ExtVT.bitsGE(Load->getMemoryVT()))
|
||||
continue;
|
||||
|
||||
// Use LE to convert equal sized loads to zext.
|
||||
if (ExtVT.bitsLE(Load->getMemoryVT()))
|
||||
Loads.insert(Load);
|
||||
|
||||
continue;
|
||||
}
|
||||
return false;
|
||||
@ -3899,11 +3906,13 @@ bool DAGCombiner::BackwardsPropagateMask(SDNode *N, SelectionDAG &DAG) {
|
||||
if (Loads.size() == 0)
|
||||
return false;
|
||||
|
||||
DEBUG(dbgs() << "Backwards propagate AND: "; N->dump());
|
||||
SDValue MaskOp = N->getOperand(1);
|
||||
|
||||
// If it exists, fixup the single node we allow in the tree that needs
|
||||
// masking.
|
||||
if (FixupNode) {
|
||||
DEBUG(dbgs() << "First, need to fix up: "; FixupNode->dump());
|
||||
SDValue And = DAG.getNode(ISD::AND, SDLoc(FixupNode),
|
||||
FixupNode->getValueType(0),
|
||||
SDValue(FixupNode, 0), MaskOp);
|
||||
@ -3914,14 +3923,21 @@ bool DAGCombiner::BackwardsPropagateMask(SDNode *N, SelectionDAG &DAG) {
|
||||
|
||||
// Narrow any constants that need it.
|
||||
for (auto *LogicN : NodesWithConsts) {
|
||||
auto *C = cast<ConstantSDNode>(LogicN->getOperand(1));
|
||||
SDValue And = DAG.getNode(ISD::AND, SDLoc(C), C->getValueType(0),
|
||||
SDValue(C, 0), MaskOp);
|
||||
DAG.UpdateNodeOperands(LogicN, LogicN->getOperand(0), And);
|
||||
SDValue Op0 = LogicN->getOperand(0);
|
||||
SDValue Op1 = LogicN->getOperand(1);
|
||||
|
||||
if (isa<ConstantSDNode>(Op0))
|
||||
std::swap(Op0, Op1);
|
||||
|
||||
SDValue And = DAG.getNode(ISD::AND, SDLoc(Op1), Op1.getValueType(),
|
||||
Op1, MaskOp);
|
||||
|
||||
DAG.UpdateNodeOperands(LogicN, Op0, And);
|
||||
}
|
||||
|
||||
// Create narrow loads.
|
||||
for (auto *Load : Loads) {
|
||||
DEBUG(dbgs() << "Propagate AND back to: "; Load->dump());
|
||||
SDValue And = DAG.getNode(ISD::AND, SDLoc(Load), Load->getValueType(0),
|
||||
SDValue(Load, 0), MaskOp);
|
||||
DAG.ReplaceAllUsesOfValueWith(SDValue(Load, 0), And);
|
||||
@ -5209,7 +5225,7 @@ SDValue DAGCombiner::MatchLoadCombine(SDNode *N) {
|
||||
return SDValue();
|
||||
|
||||
// Loads must share the same base address
|
||||
BaseIndexOffset Ptr = BaseIndexOffset::match(L->getBasePtr(), DAG);
|
||||
BaseIndexOffset Ptr = BaseIndexOffset::match(L, DAG);
|
||||
int64_t ByteOffsetFromBase = 0;
|
||||
if (!Base)
|
||||
Base = Ptr;
|
||||
@ -12928,7 +12944,7 @@ void DAGCombiner::getStoreMergeCandidates(
|
||||
StoreSDNode *St, SmallVectorImpl<MemOpLink> &StoreNodes) {
|
||||
// This holds the base pointer, index, and the offset in bytes from the base
|
||||
// pointer.
|
||||
BaseIndexOffset BasePtr = BaseIndexOffset::match(St->getBasePtr(), DAG);
|
||||
BaseIndexOffset BasePtr = BaseIndexOffset::match(St, DAG);
|
||||
EVT MemVT = St->getMemoryVT();
|
||||
|
||||
SDValue Val = peekThroughBitcast(St->getValue());
|
||||
@ -12949,7 +12965,7 @@ void DAGCombiner::getStoreMergeCandidates(
|
||||
EVT LoadVT;
|
||||
if (IsLoadSrc) {
|
||||
auto *Ld = cast<LoadSDNode>(Val);
|
||||
LBasePtr = BaseIndexOffset::match(Ld->getBasePtr(), DAG);
|
||||
LBasePtr = BaseIndexOffset::match(Ld, DAG);
|
||||
LoadVT = Ld->getMemoryVT();
|
||||
// Load and store should be the same type.
|
||||
if (MemVT != LoadVT)
|
||||
@ -12968,7 +12984,7 @@ void DAGCombiner::getStoreMergeCandidates(
|
||||
return false;
|
||||
// The Load's Base Ptr must also match
|
||||
if (LoadSDNode *OtherLd = dyn_cast<LoadSDNode>(Val)) {
|
||||
auto LPtr = BaseIndexOffset::match(OtherLd->getBasePtr(), DAG);
|
||||
auto LPtr = BaseIndexOffset::match(OtherLd, DAG);
|
||||
if (LoadVT != OtherLd->getMemoryVT())
|
||||
return false;
|
||||
if (!(LBasePtr.equalBaseIndex(LPtr, DAG)))
|
||||
@ -12992,7 +13008,7 @@ void DAGCombiner::getStoreMergeCandidates(
|
||||
Val.getOpcode() != ISD::EXTRACT_SUBVECTOR)
|
||||
return false;
|
||||
}
|
||||
Ptr = BaseIndexOffset::match(Other->getBasePtr(), DAG);
|
||||
Ptr = BaseIndexOffset::match(Other, DAG);
|
||||
return (BasePtr.equalBaseIndex(Ptr, DAG, Offset));
|
||||
};
|
||||
|
||||
@ -13365,7 +13381,7 @@ bool DAGCombiner::MergeConsecutiveStores(StoreSDNode *St) {
|
||||
if (Ld->getMemoryVT() != MemVT)
|
||||
break;
|
||||
|
||||
BaseIndexOffset LdPtr = BaseIndexOffset::match(Ld->getBasePtr(), DAG);
|
||||
BaseIndexOffset LdPtr = BaseIndexOffset::match(Ld, DAG);
|
||||
// If this is not the first ptr that we check.
|
||||
int64_t LdOffset = 0;
|
||||
if (LdBasePtr.getBase().getNode()) {
|
||||
@ -17432,44 +17448,46 @@ bool DAGCombiner::isAlias(LSBaseSDNode *Op0, LSBaseSDNode *Op1) const {
|
||||
unsigned NumBytes1 = Op1->getMemoryVT().getStoreSize();
|
||||
|
||||
// Check for BaseIndexOffset matching.
|
||||
BaseIndexOffset BasePtr0 = BaseIndexOffset::match(Op0->getBasePtr(), DAG);
|
||||
BaseIndexOffset BasePtr1 = BaseIndexOffset::match(Op1->getBasePtr(), DAG);
|
||||
BaseIndexOffset BasePtr0 = BaseIndexOffset::match(Op0, DAG);
|
||||
BaseIndexOffset BasePtr1 = BaseIndexOffset::match(Op1, DAG);
|
||||
int64_t PtrDiff;
|
||||
if (BasePtr0.equalBaseIndex(BasePtr1, DAG, PtrDiff))
|
||||
return !((NumBytes0 <= PtrDiff) || (PtrDiff + NumBytes1 <= 0));
|
||||
if (BasePtr0.getBase().getNode() && BasePtr1.getBase().getNode()) {
|
||||
if (BasePtr0.equalBaseIndex(BasePtr1, DAG, PtrDiff))
|
||||
return !((NumBytes0 <= PtrDiff) || (PtrDiff + NumBytes1 <= 0));
|
||||
|
||||
// If both BasePtr0 and BasePtr1 are FrameIndexes, we will not be
|
||||
// able to calculate their relative offset if at least one arises
|
||||
// from an alloca. However, these allocas cannot overlap and we
|
||||
// can infer there is no alias.
|
||||
if (auto *A = dyn_cast<FrameIndexSDNode>(BasePtr0.getBase()))
|
||||
if (auto *B = dyn_cast<FrameIndexSDNode>(BasePtr1.getBase())) {
|
||||
MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
|
||||
// If the base are the same frame index but the we couldn't find a
|
||||
// constant offset, (indices are different) be conservative.
|
||||
if (A != B && (!MFI.isFixedObjectIndex(A->getIndex()) ||
|
||||
!MFI.isFixedObjectIndex(B->getIndex())))
|
||||
return false;
|
||||
}
|
||||
// If both BasePtr0 and BasePtr1 are FrameIndexes, we will not be
|
||||
// able to calculate their relative offset if at least one arises
|
||||
// from an alloca. However, these allocas cannot overlap and we
|
||||
// can infer there is no alias.
|
||||
if (auto *A = dyn_cast<FrameIndexSDNode>(BasePtr0.getBase()))
|
||||
if (auto *B = dyn_cast<FrameIndexSDNode>(BasePtr1.getBase())) {
|
||||
MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
|
||||
// If the base are the same frame index but the we couldn't find a
|
||||
// constant offset, (indices are different) be conservative.
|
||||
if (A != B && (!MFI.isFixedObjectIndex(A->getIndex()) ||
|
||||
!MFI.isFixedObjectIndex(B->getIndex())))
|
||||
return false;
|
||||
}
|
||||
|
||||
bool IsFI0 = isa<FrameIndexSDNode>(BasePtr0.getBase());
|
||||
bool IsFI1 = isa<FrameIndexSDNode>(BasePtr1.getBase());
|
||||
bool IsGV0 = isa<GlobalAddressSDNode>(BasePtr0.getBase());
|
||||
bool IsGV1 = isa<GlobalAddressSDNode>(BasePtr1.getBase());
|
||||
bool IsCV0 = isa<ConstantPoolSDNode>(BasePtr0.getBase());
|
||||
bool IsCV1 = isa<ConstantPoolSDNode>(BasePtr1.getBase());
|
||||
bool IsFI0 = isa<FrameIndexSDNode>(BasePtr0.getBase());
|
||||
bool IsFI1 = isa<FrameIndexSDNode>(BasePtr1.getBase());
|
||||
bool IsGV0 = isa<GlobalAddressSDNode>(BasePtr0.getBase());
|
||||
bool IsGV1 = isa<GlobalAddressSDNode>(BasePtr1.getBase());
|
||||
bool IsCV0 = isa<ConstantPoolSDNode>(BasePtr0.getBase());
|
||||
bool IsCV1 = isa<ConstantPoolSDNode>(BasePtr1.getBase());
|
||||
|
||||
// If of mismatched base types or checkable indices we can check
|
||||
// they do not alias.
|
||||
if ((BasePtr0.getIndex() == BasePtr1.getIndex() || (IsFI0 != IsFI1) ||
|
||||
(IsGV0 != IsGV1) || (IsCV0 != IsCV1)) &&
|
||||
(IsFI0 || IsGV0 || IsCV0) && (IsFI1 || IsGV1 || IsCV1))
|
||||
return false;
|
||||
// If of mismatched base types or checkable indices we can check
|
||||
// they do not alias.
|
||||
if ((BasePtr0.getIndex() == BasePtr1.getIndex() || (IsFI0 != IsFI1) ||
|
||||
(IsGV0 != IsGV1) || (IsCV0 != IsCV1)) &&
|
||||
(IsFI0 || IsGV0 || IsCV0) && (IsFI1 || IsGV1 || IsCV1))
|
||||
return false;
|
||||
}
|
||||
|
||||
// If we know required SrcValue1 and SrcValue2 have relatively large alignment
|
||||
// compared to the size and offset of the access, we may be able to prove they
|
||||
// do not alias. This check is conservative for now to catch cases created by
|
||||
// splitting vector types.
|
||||
// If we know required SrcValue1 and SrcValue2 have relatively large
|
||||
// alignment compared to the size and offset of the access, we may be able
|
||||
// to prove they do not alias. This check is conservative for now to catch
|
||||
// cases created by splitting vector types.
|
||||
int64_t SrcValOffset0 = Op0->getSrcValueOffset();
|
||||
int64_t SrcValOffset1 = Op1->getSrcValueOffset();
|
||||
unsigned OrigAlignment0 = Op0->getOriginalAlignment();
|
||||
@ -17479,8 +17497,8 @@ bool DAGCombiner::isAlias(LSBaseSDNode *Op0, LSBaseSDNode *Op1) const {
|
||||
int64_t OffAlign0 = SrcValOffset0 % OrigAlignment0;
|
||||
int64_t OffAlign1 = SrcValOffset1 % OrigAlignment1;
|
||||
|
||||
// There is no overlap between these relatively aligned accesses of similar
|
||||
// size. Return no alias.
|
||||
// There is no overlap between these relatively aligned accesses of
|
||||
// similar size. Return no alias.
|
||||
if ((OffAlign0 + NumBytes0) <= OffAlign1 ||
|
||||
(OffAlign1 + NumBytes1) <= OffAlign0)
|
||||
return false;
|
||||
@ -17643,7 +17661,7 @@ bool DAGCombiner::findBetterNeighborChains(StoreSDNode *St) {
|
||||
|
||||
// This holds the base pointer, index, and the offset in bytes from the base
|
||||
// pointer.
|
||||
BaseIndexOffset BasePtr = BaseIndexOffset::match(St->getBasePtr(), DAG);
|
||||
BaseIndexOffset BasePtr = BaseIndexOffset::match(St, DAG);
|
||||
|
||||
// We must have a base and an offset.
|
||||
if (!BasePtr.getBase().getNode())
|
||||
@ -17669,7 +17687,7 @@ bool DAGCombiner::findBetterNeighborChains(StoreSDNode *St) {
|
||||
break;
|
||||
|
||||
// Find the base pointer and offset for this memory node.
|
||||
BaseIndexOffset Ptr = BaseIndexOffset::match(Index->getBasePtr(), DAG);
|
||||
BaseIndexOffset Ptr = BaseIndexOffset::match(Index, DAG);
|
||||
|
||||
// Check that the base pointer is the same as the original one.
|
||||
if (!BasePtr.equalBaseIndex(Ptr, DAG))
|
||||
|
@ -2965,12 +2965,12 @@ bool SelectionDAGLegalize::ExpandNode(SDNode *Node) {
|
||||
case ISD::ZERO_EXTEND:
|
||||
LHS = DAG.getNode(ISD::AssertZext, dl, OuterType, Res,
|
||||
DAG.getValueType(AtomicType));
|
||||
RHS = DAG.getNode(ISD::ZERO_EXTEND, dl, OuterType, Node->getOperand(2));
|
||||
RHS = DAG.getZeroExtendInReg(Node->getOperand(2), dl, AtomicType);
|
||||
ExtRes = LHS;
|
||||
break;
|
||||
case ISD::ANY_EXTEND:
|
||||
LHS = DAG.getZeroExtendInReg(Res, dl, AtomicType);
|
||||
RHS = DAG.getNode(ISD::ZERO_EXTEND, dl, OuterType, Node->getOperand(2));
|
||||
RHS = DAG.getZeroExtendInReg(Node->getOperand(2), dl, AtomicType);
|
||||
break;
|
||||
default:
|
||||
llvm_unreachable("Invalid atomic op extension");
|
||||
|
@ -7947,11 +7947,8 @@ bool SelectionDAG::areNonVolatileConsecutiveLoads(LoadSDNode *LD,
|
||||
if (VT.getSizeInBits() / 8 != Bytes)
|
||||
return false;
|
||||
|
||||
SDValue Loc = LD->getOperand(1);
|
||||
SDValue BaseLoc = Base->getOperand(1);
|
||||
|
||||
auto BaseLocDecomp = BaseIndexOffset::match(BaseLoc, *this);
|
||||
auto LocDecomp = BaseIndexOffset::match(Loc, *this);
|
||||
auto BaseLocDecomp = BaseIndexOffset::match(Base, *this);
|
||||
auto LocDecomp = BaseIndexOffset::match(LD, *this);
|
||||
|
||||
int64_t Offset = 0;
|
||||
if (BaseLocDecomp.equalBaseIndex(LocDecomp, *this, Offset))
|
||||
|
@ -21,6 +21,9 @@ using namespace llvm;
|
||||
|
||||
bool BaseIndexOffset::equalBaseIndex(BaseIndexOffset &Other,
|
||||
const SelectionDAG &DAG, int64_t &Off) {
|
||||
// Conservatively fail if we a match failed..
|
||||
if (!Base.getNode() || !Other.Base.getNode())
|
||||
return false;
|
||||
// Initial Offset difference.
|
||||
Off = Other.Offset - Offset;
|
||||
|
||||
@ -72,13 +75,29 @@ bool BaseIndexOffset::equalBaseIndex(BaseIndexOffset &Other,
|
||||
}
|
||||
|
||||
/// Parses tree in Ptr for base, index, offset addresses.
|
||||
BaseIndexOffset BaseIndexOffset::match(SDValue Ptr, const SelectionDAG &DAG) {
|
||||
BaseIndexOffset BaseIndexOffset::match(LSBaseSDNode *N,
|
||||
const SelectionDAG &DAG) {
|
||||
SDValue Ptr = N->getBasePtr();
|
||||
|
||||
// (((B + I*M) + c)) + c ...
|
||||
SDValue Base = DAG.getTargetLoweringInfo().unwrapAddress(Ptr);
|
||||
SDValue Index = SDValue();
|
||||
int64_t Offset = 0;
|
||||
bool IsIndexSignExt = false;
|
||||
|
||||
// pre-inc/pre-dec ops are components of EA.
|
||||
if (N->getAddressingMode() == ISD::PRE_INC) {
|
||||
if (auto *C = dyn_cast<ConstantSDNode>(N->getOffset()))
|
||||
Offset += C->getSExtValue();
|
||||
else // If unknown, give up now.
|
||||
return BaseIndexOffset(SDValue(), SDValue(), 0, false);
|
||||
} else if (N->getAddressingMode() == ISD::PRE_DEC) {
|
||||
if (auto *C = dyn_cast<ConstantSDNode>(N->getOffset()))
|
||||
Offset -= C->getSExtValue();
|
||||
else // If unknown, give up now.
|
||||
return BaseIndexOffset(SDValue(), SDValue(), 0, false);
|
||||
}
|
||||
|
||||
// Consume constant adds & ors with appropriate masking.
|
||||
while (Base->getOpcode() == ISD::ADD || Base->getOpcode() == ISD::OR) {
|
||||
if (auto *C = dyn_cast<ConstantSDNode>(Base->getOperand(1))) {
|
||||
|
@ -132,9 +132,18 @@ void TargetLoweringBase::InitLibcalls(const Triple &TT) {
|
||||
setLibcallName(RTLIB::FPEXT_F16_F32, "__extendhfsf2");
|
||||
setLibcallName(RTLIB::FPROUND_F32_F16, "__truncsfhf2");
|
||||
|
||||
// Darwin 10 and higher has an optimized __bzero.
|
||||
if (!TT.isMacOSX() || !TT.isMacOSXVersionLT(10, 6) || TT.isArch64Bit()) {
|
||||
setLibcallName(RTLIB::BZERO, TT.isAArch64() ? "bzero" : "__bzero");
|
||||
// Some darwins have an optimized __bzero/bzero function.
|
||||
switch (TT.getArch()) {
|
||||
case Triple::x86:
|
||||
case Triple::x86_64:
|
||||
if (TT.isMacOSX() && !TT.isMacOSXVersionLT(10, 6))
|
||||
setLibcallName(RTLIB::BZERO, "__bzero");
|
||||
break;
|
||||
case Triple::aarch64:
|
||||
setLibcallName(RTLIB::BZERO, "bzero");
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
if (darwinHasSinCos(TT)) {
|
||||
|
@ -954,7 +954,12 @@ Expected<Constant *> IRLinker::linkGlobalValueProto(GlobalValue *SGV,
|
||||
NewGV->setLinkage(GlobalValue::InternalLinkage);
|
||||
|
||||
Constant *C = NewGV;
|
||||
if (DGV)
|
||||
// Only create a bitcast if necessary. In particular, with
|
||||
// DebugTypeODRUniquing we may reach metadata in the destination module
|
||||
// containing a GV from the source module, in which case SGV will be
|
||||
// the same as DGV and NewGV, and TypeMap.get() will assert since it
|
||||
// assumes it is being invoked on a type in the source module.
|
||||
if (DGV && NewGV != SGV)
|
||||
C = ConstantExpr::getBitCast(NewGV, TypeMap.get(SGV->getType()));
|
||||
|
||||
if (DGV && NewGV != DGV) {
|
||||
|
@ -76,6 +76,14 @@ bool CodeViewContext::addFile(MCStreamer &OS, unsigned FileNumber,
|
||||
return true;
|
||||
}
|
||||
|
||||
MCCVFunctionInfo *CodeViewContext::getCVFunctionInfo(unsigned FuncId) {
|
||||
if (FuncId >= Functions.size())
|
||||
return nullptr;
|
||||
if (Functions[FuncId].isUnallocatedFunctionInfo())
|
||||
return nullptr;
|
||||
return &Functions[FuncId];
|
||||
}
|
||||
|
||||
bool CodeViewContext::recordFunctionId(unsigned FuncId) {
|
||||
if (FuncId >= Functions.size())
|
||||
Functions.resize(FuncId + 1);
|
||||
@ -247,6 +255,67 @@ void CodeViewContext::emitFileChecksumOffset(MCObjectStreamer &OS,
|
||||
OS.EmitValueImpl(SRE, 4);
|
||||
}
|
||||
|
||||
void CodeViewContext::addLineEntry(const MCCVLineEntry &LineEntry) {
|
||||
size_t Offset = MCCVLines.size();
|
||||
auto I = MCCVLineStartStop.insert(
|
||||
{LineEntry.getFunctionId(), {Offset, Offset + 1}});
|
||||
if (!I.second)
|
||||
I.first->second.second = Offset + 1;
|
||||
MCCVLines.push_back(LineEntry);
|
||||
}
|
||||
|
||||
std::vector<MCCVLineEntry>
|
||||
CodeViewContext::getFunctionLineEntries(unsigned FuncId) {
|
||||
std::vector<MCCVLineEntry> FilteredLines;
|
||||
auto I = MCCVLineStartStop.find(FuncId);
|
||||
if (I != MCCVLineStartStop.end()) {
|
||||
MCCVFunctionInfo *SiteInfo = getCVFunctionInfo(FuncId);
|
||||
for (size_t Idx = I->second.first, End = I->second.second; Idx != End;
|
||||
++Idx) {
|
||||
unsigned LocationFuncId = MCCVLines[Idx].getFunctionId();
|
||||
if (LocationFuncId == FuncId) {
|
||||
// This was a .cv_loc directly for FuncId, so record it.
|
||||
FilteredLines.push_back(MCCVLines[Idx]);
|
||||
} else {
|
||||
// Check if the current location is inlined in this function. If it is,
|
||||
// synthesize a statement .cv_loc at the original inlined call site.
|
||||
auto I = SiteInfo->InlinedAtMap.find(LocationFuncId);
|
||||
if (I != SiteInfo->InlinedAtMap.end()) {
|
||||
MCCVFunctionInfo::LineInfo &IA = I->second;
|
||||
// Only add the location if it differs from the previous location.
|
||||
// Large inlined calls will have many .cv_loc entries and we only need
|
||||
// one line table entry in the parent function.
|
||||
if (FilteredLines.empty() ||
|
||||
FilteredLines.back().getFileNum() != IA.File ||
|
||||
FilteredLines.back().getLine() != IA.Line ||
|
||||
FilteredLines.back().getColumn() != IA.Col) {
|
||||
FilteredLines.push_back(MCCVLineEntry(
|
||||
MCCVLines[Idx].getLabel(),
|
||||
MCCVLoc(FuncId, IA.File, IA.Line, IA.Col, false, false)));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return FilteredLines;
|
||||
}
|
||||
|
||||
std::pair<size_t, size_t> CodeViewContext::getLineExtent(unsigned FuncId) {
|
||||
auto I = MCCVLineStartStop.find(FuncId);
|
||||
// Return an empty extent if there are no cv_locs for this function id.
|
||||
if (I == MCCVLineStartStop.end())
|
||||
return {~0ULL, 0};
|
||||
return I->second;
|
||||
}
|
||||
|
||||
ArrayRef<MCCVLineEntry> CodeViewContext::getLinesForExtent(size_t L, size_t R) {
|
||||
if (R <= L)
|
||||
return None;
|
||||
if (L >= MCCVLines.size())
|
||||
return None;
|
||||
return makeArrayRef(&MCCVLines[L], R - L);
|
||||
}
|
||||
|
||||
void CodeViewContext::emitLineTableForFunction(MCObjectStreamer &OS,
|
||||
unsigned FuncId,
|
||||
const MCSymbol *FuncBegin,
|
||||
|
@ -868,6 +868,40 @@ bool AArch64InstructionSelector::select(MachineInstr &I,
|
||||
if (OpFlags & AArch64II::MO_GOT) {
|
||||
I.setDesc(TII.get(AArch64::LOADgot));
|
||||
I.getOperand(1).setTargetFlags(OpFlags);
|
||||
} else if (TM.getCodeModel() == CodeModel::Large) {
|
||||
// Materialize the global using movz/movk instructions.
|
||||
unsigned MovZDstReg = MRI.createVirtualRegister(&AArch64::GPR64RegClass);
|
||||
auto InsertPt = std::next(I.getIterator());
|
||||
auto MovZ =
|
||||
BuildMI(MBB, InsertPt, I.getDebugLoc(), TII.get(AArch64::MOVZXi))
|
||||
.addDef(MovZDstReg);
|
||||
MovZ->addOperand(MF, I.getOperand(1));
|
||||
MovZ->getOperand(1).setTargetFlags(OpFlags | AArch64II::MO_G0 |
|
||||
AArch64II::MO_NC);
|
||||
MovZ->addOperand(MF, MachineOperand::CreateImm(0));
|
||||
constrainSelectedInstRegOperands(*MovZ, TII, TRI, RBI);
|
||||
|
||||
auto BuildMovK = [&](unsigned SrcReg, unsigned char Flags,
|
||||
unsigned Offset, unsigned ForceDstReg) {
|
||||
unsigned DstReg =
|
||||
ForceDstReg ? ForceDstReg
|
||||
: MRI.createVirtualRegister(&AArch64::GPR64RegClass);
|
||||
auto MovI = BuildMI(MBB, InsertPt, MovZ->getDebugLoc(),
|
||||
TII.get(AArch64::MOVKXi))
|
||||
.addDef(DstReg)
|
||||
.addReg(SrcReg);
|
||||
MovI->addOperand(MF, MachineOperand::CreateGA(
|
||||
GV, MovZ->getOperand(1).getOffset(), Flags));
|
||||
MovI->addOperand(MF, MachineOperand::CreateImm(Offset));
|
||||
constrainSelectedInstRegOperands(*MovI, TII, TRI, RBI);
|
||||
return DstReg;
|
||||
};
|
||||
unsigned DstReg = BuildMovK(MovZ->getOperand(0).getReg(),
|
||||
AArch64II::MO_G1 | AArch64II::MO_NC, 16, 0);
|
||||
DstReg = BuildMovK(DstReg, AArch64II::MO_G2 | AArch64II::MO_NC, 32, 0);
|
||||
BuildMovK(DstReg, AArch64II::MO_G3, 48, I.getOperand(0).getReg());
|
||||
I.eraseFromParent();
|
||||
return true;
|
||||
} else {
|
||||
I.setDesc(TII.get(AArch64::MOVaddr));
|
||||
I.getOperand(1).setTargetFlags(OpFlags | AArch64II::MO_PAGE);
|
||||
|
@ -821,7 +821,6 @@ namespace llvm {
|
||||
MutableArrayRef<int> NewMask, unsigned Options = None);
|
||||
OpRef packp(ShuffleMask SM, OpRef Va, OpRef Vb, ResultStack &Results,
|
||||
MutableArrayRef<int> NewMask);
|
||||
OpRef zerous(ShuffleMask SM, OpRef Va, ResultStack &Results);
|
||||
OpRef vmuxs(ArrayRef<uint8_t> Bytes, OpRef Va, OpRef Vb,
|
||||
ResultStack &Results);
|
||||
OpRef vmuxp(ArrayRef<uint8_t> Bytes, OpRef Va, OpRef Vb,
|
||||
@ -1139,25 +1138,6 @@ OpRef HvxSelector::packp(ShuffleMask SM, OpRef Va, OpRef Vb,
|
||||
return concat(Out[0], Out[1], Results);
|
||||
}
|
||||
|
||||
OpRef HvxSelector::zerous(ShuffleMask SM, OpRef Va, ResultStack &Results) {
|
||||
DEBUG_WITH_TYPE("isel", {dbgs() << __func__ << '\n';});
|
||||
|
||||
int VecLen = SM.Mask.size();
|
||||
SmallVector<uint8_t,128> UsedBytes(VecLen);
|
||||
bool HasUnused = false;
|
||||
for (int I = 0; I != VecLen; ++I) {
|
||||
if (SM.Mask[I] != -1)
|
||||
UsedBytes[I] = 0xFF;
|
||||
else
|
||||
HasUnused = true;
|
||||
}
|
||||
if (!HasUnused)
|
||||
return Va;
|
||||
SDValue B = getVectorConstant(UsedBytes, SDLoc(Results.InpNode));
|
||||
Results.push(Hexagon::V6_vand, getSingleVT(MVT::i8), {Va, OpRef(B)});
|
||||
return OpRef::res(Results.top());
|
||||
}
|
||||
|
||||
OpRef HvxSelector::vmuxs(ArrayRef<uint8_t> Bytes, OpRef Va, OpRef Vb,
|
||||
ResultStack &Results) {
|
||||
DEBUG_WITH_TYPE("isel", {dbgs() << __func__ << '\n';});
|
||||
|
@ -142,6 +142,9 @@ PPCTargetLowering::PPCTargetLowering(const PPCTargetMachine &TM,
|
||||
setOperationAction(ISD::BITREVERSE, MVT::i32, Legal);
|
||||
setOperationAction(ISD::BITREVERSE, MVT::i64, Legal);
|
||||
|
||||
// Sub-word ATOMIC_CMP_SWAP need to ensure that the input is zero-extended.
|
||||
setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i32, Custom);
|
||||
|
||||
// PowerPC has an i16 but no i8 (or i1) SEXTLOAD.
|
||||
for (MVT VT : MVT::integer_valuetypes()) {
|
||||
setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote);
|
||||
@ -1154,6 +1157,8 @@ const char *PPCTargetLowering::getTargetNodeName(unsigned Opcode) const {
|
||||
case PPCISD::Hi: return "PPCISD::Hi";
|
||||
case PPCISD::Lo: return "PPCISD::Lo";
|
||||
case PPCISD::TOC_ENTRY: return "PPCISD::TOC_ENTRY";
|
||||
case PPCISD::ATOMIC_CMP_SWAP_8: return "PPCISD::ATOMIC_CMP_SWAP_8";
|
||||
case PPCISD::ATOMIC_CMP_SWAP_16: return "PPCISD::ATOMIC_CMP_SWAP_16";
|
||||
case PPCISD::DYNALLOC: return "PPCISD::DYNALLOC";
|
||||
case PPCISD::DYNAREAOFFSET: return "PPCISD::DYNAREAOFFSET";
|
||||
case PPCISD::GlobalBaseReg: return "PPCISD::GlobalBaseReg";
|
||||
@ -8834,6 +8839,42 @@ SDValue PPCTargetLowering::LowerBSWAP(SDValue Op, SelectionDAG &DAG) const {
|
||||
return Op;
|
||||
}
|
||||
|
||||
// ATOMIC_CMP_SWAP for i8/i16 needs to zero-extend its input since it will be
|
||||
// compared to a value that is atomically loaded (atomic loads zero-extend).
|
||||
SDValue PPCTargetLowering::LowerATOMIC_CMP_SWAP(SDValue Op,
|
||||
SelectionDAG &DAG) const {
|
||||
assert(Op.getOpcode() == ISD::ATOMIC_CMP_SWAP &&
|
||||
"Expecting an atomic compare-and-swap here.");
|
||||
SDLoc dl(Op);
|
||||
auto *AtomicNode = cast<AtomicSDNode>(Op.getNode());
|
||||
EVT MemVT = AtomicNode->getMemoryVT();
|
||||
if (MemVT.getSizeInBits() >= 32)
|
||||
return Op;
|
||||
|
||||
SDValue CmpOp = Op.getOperand(2);
|
||||
// If this is already correctly zero-extended, leave it alone.
|
||||
auto HighBits = APInt::getHighBitsSet(32, 32 - MemVT.getSizeInBits());
|
||||
if (DAG.MaskedValueIsZero(CmpOp, HighBits))
|
||||
return Op;
|
||||
|
||||
// Clear the high bits of the compare operand.
|
||||
unsigned MaskVal = (1 << MemVT.getSizeInBits()) - 1;
|
||||
SDValue NewCmpOp =
|
||||
DAG.getNode(ISD::AND, dl, MVT::i32, CmpOp,
|
||||
DAG.getConstant(MaskVal, dl, MVT::i32));
|
||||
|
||||
// Replace the existing compare operand with the properly zero-extended one.
|
||||
SmallVector<SDValue, 4> Ops;
|
||||
for (int i = 0, e = AtomicNode->getNumOperands(); i < e; i++)
|
||||
Ops.push_back(AtomicNode->getOperand(i));
|
||||
Ops[2] = NewCmpOp;
|
||||
MachineMemOperand *MMO = AtomicNode->getMemOperand();
|
||||
SDVTList Tys = DAG.getVTList(MVT::i32, MVT::Other);
|
||||
auto NodeTy =
|
||||
(MemVT == MVT::i8) ? PPCISD::ATOMIC_CMP_SWAP_8 : PPCISD::ATOMIC_CMP_SWAP_16;
|
||||
return DAG.getMemIntrinsicNode(NodeTy, dl, Tys, Ops, MemVT, MMO);
|
||||
}
|
||||
|
||||
SDValue PPCTargetLowering::LowerSIGN_EXTEND_INREG(SDValue Op,
|
||||
SelectionDAG &DAG) const {
|
||||
SDLoc dl(Op);
|
||||
@ -9325,6 +9366,8 @@ SDValue PPCTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
|
||||
return LowerREM(Op, DAG);
|
||||
case ISD::BSWAP:
|
||||
return LowerBSWAP(Op, DAG);
|
||||
case ISD::ATOMIC_CMP_SWAP:
|
||||
return LowerATOMIC_CMP_SWAP(Op, DAG);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -430,6 +430,11 @@ namespace llvm {
|
||||
/// The 4xf32 load used for v4i1 constants.
|
||||
QVLFSb,
|
||||
|
||||
/// ATOMIC_CMP_SWAP - the exact same as the target-independent nodes
|
||||
/// except they ensure that the compare input is zero-extended for
|
||||
/// sub-word versions because the atomic loads zero-extend.
|
||||
ATOMIC_CMP_SWAP_8, ATOMIC_CMP_SWAP_16,
|
||||
|
||||
/// GPRC = TOC_ENTRY GA, TOC
|
||||
/// Loads the entry for GA from the TOC, where the TOC base is given by
|
||||
/// the last operand.
|
||||
@ -955,6 +960,7 @@ namespace llvm {
|
||||
SDValue LowerINTRINSIC_VOID(SDValue Op, SelectionDAG &DAG) const;
|
||||
SDValue LowerREM(SDValue Op, SelectionDAG &DAG) const;
|
||||
SDValue LowerBSWAP(SDValue Op, SelectionDAG &DAG) const;
|
||||
SDValue LowerATOMIC_CMP_SWAP(SDValue Op, SelectionDAG &DAG) const;
|
||||
SDValue LowerSCALAR_TO_VECTOR(SDValue Op, SelectionDAG &DAG) const;
|
||||
SDValue LowerSIGN_EXTEND_INREG(SDValue Op, SelectionDAG &DAG) const;
|
||||
SDValue LowerMUL(SDValue Op, SelectionDAG &DAG) const;
|
||||
|
@ -257,6 +257,13 @@ def PPCvcmp_o : SDNode<"PPCISD::VCMPo", SDT_PPCvcmp, [SDNPOutGlue]>;
|
||||
def PPCcondbranch : SDNode<"PPCISD::COND_BRANCH", SDT_PPCcondbr,
|
||||
[SDNPHasChain, SDNPOptInGlue]>;
|
||||
|
||||
// PPC-specific atomic operations.
|
||||
def PPCatomicCmpSwap_8 :
|
||||
SDNode<"PPCISD::ATOMIC_CMP_SWAP_8", SDTAtomic3,
|
||||
[SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>;
|
||||
def PPCatomicCmpSwap_16 :
|
||||
SDNode<"PPCISD::ATOMIC_CMP_SWAP_16", SDTAtomic3,
|
||||
[SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>;
|
||||
def PPClbrx : SDNode<"PPCISD::LBRX", SDT_PPClbrx,
|
||||
[SDNPHasChain, SDNPMayLoad, SDNPMemOperand]>;
|
||||
def PPCstbrx : SDNode<"PPCISD::STBRX", SDT_PPCstbrx,
|
||||
@ -1710,6 +1717,11 @@ let usesCustomInserter = 1 in {
|
||||
}
|
||||
}
|
||||
|
||||
def : Pat<(PPCatomicCmpSwap_8 xoaddr:$ptr, i32:$old, i32:$new),
|
||||
(ATOMIC_CMP_SWAP_I8 xoaddr:$ptr, i32:$old, i32:$new)>;
|
||||
def : Pat<(PPCatomicCmpSwap_16 xoaddr:$ptr, i32:$old, i32:$new),
|
||||
(ATOMIC_CMP_SWAP_I16 xoaddr:$ptr, i32:$old, i32:$new)>;
|
||||
|
||||
// Instructions to support atomic operations
|
||||
let mayLoad = 1, mayStore = 0, hasSideEffects = 0 in {
|
||||
def LBARX : XForm_1<31, 52, (outs gprc:$rD), (ins memrr:$src),
|
||||
|
@ -7893,8 +7893,14 @@ LowerBUILD_VECTORAsVariablePermute(SDValue V, SelectionDAG &DAG,
|
||||
IndicesVT = MVT::getVectorVT(MVT::getIntegerVT(VT.getScalarSizeInBits()),
|
||||
VT.getVectorNumElements());
|
||||
IndicesVec = DAG.getZExtOrTrunc(IndicesVec, SDLoc(IndicesVec), IndicesVT);
|
||||
return DAG.getNode(VT == MVT::v16i8 ? X86ISD::PSHUFB : X86ISD::VPERMV,
|
||||
SDLoc(V), VT, IndicesVec, SrcVec);
|
||||
if (SrcVec.getValueSizeInBits() < IndicesVT.getSizeInBits()) {
|
||||
SrcVec =
|
||||
DAG.getNode(ISD::INSERT_SUBVECTOR, SDLoc(SrcVec), VT, DAG.getUNDEF(VT),
|
||||
SrcVec, DAG.getIntPtrConstant(0, SDLoc(SrcVec)));
|
||||
}
|
||||
if (VT == MVT::v16i8)
|
||||
return DAG.getNode(X86ISD::PSHUFB, SDLoc(V), VT, SrcVec, IndicesVec);
|
||||
return DAG.getNode(X86ISD::VPERMV, SDLoc(V), VT, IndicesVec, SrcVec);
|
||||
}
|
||||
|
||||
SDValue
|
||||
@ -18262,6 +18268,18 @@ SDValue X86TargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) const {
|
||||
return DAG.getNode(X86ISD::SELECTS, DL, VT, Cmp, Op1, Op2);
|
||||
}
|
||||
|
||||
// For v64i1 without 64-bit support we need to split and rejoin.
|
||||
if (VT == MVT::v64i1 && !Subtarget.is64Bit()) {
|
||||
assert(Subtarget.hasBWI() && "Expected BWI to be legal");
|
||||
SDValue Op1Lo = extractSubVector(Op1, 0, DAG, DL, 32);
|
||||
SDValue Op2Lo = extractSubVector(Op2, 0, DAG, DL, 32);
|
||||
SDValue Op1Hi = extractSubVector(Op1, 32, DAG, DL, 32);
|
||||
SDValue Op2Hi = extractSubVector(Op2, 32, DAG, DL, 32);
|
||||
SDValue Lo = DAG.getSelect(DL, MVT::v32i1, Cond, Op1Lo, Op2Lo);
|
||||
SDValue Hi = DAG.getSelect(DL, MVT::v32i1, Cond, Op1Hi, Op2Hi);
|
||||
return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, Lo, Hi);
|
||||
}
|
||||
|
||||
if (VT.isVector() && VT.getVectorElementType() == MVT::i1) {
|
||||
SDValue Op1Scalar;
|
||||
if (ISD::isBuildVectorOfConstantSDNodes(Op1.getNode()))
|
||||
@ -28652,13 +28670,14 @@ static SDValue combineX86ShuffleChain(ArrayRef<SDValue> Inputs, SDValue Root,
|
||||
}
|
||||
}
|
||||
|
||||
SDValue NewV1 = V1; // Save operand in case early exit happens.
|
||||
if (matchUnaryVectorShuffle(MaskVT, Mask, AllowFloatDomain, AllowIntDomain,
|
||||
V1, DL, DAG, Subtarget, Shuffle, ShuffleSrcVT,
|
||||
ShuffleVT) &&
|
||||
NewV1, DL, DAG, Subtarget, Shuffle,
|
||||
ShuffleSrcVT, ShuffleVT) &&
|
||||
(!IsEVEXShuffle || (NumRootElts == ShuffleVT.getVectorNumElements()))) {
|
||||
if (Depth == 1 && Root.getOpcode() == Shuffle)
|
||||
return SDValue(); // Nothing to do!
|
||||
Res = DAG.getBitcast(ShuffleSrcVT, V1);
|
||||
Res = DAG.getBitcast(ShuffleSrcVT, NewV1);
|
||||
DCI.AddToWorklist(Res.getNode());
|
||||
Res = DAG.getNode(Shuffle, DL, ShuffleVT, Res);
|
||||
DCI.AddToWorklist(Res.getNode());
|
||||
@ -28680,33 +28699,36 @@ static SDValue combineX86ShuffleChain(ArrayRef<SDValue> Inputs, SDValue Root,
|
||||
}
|
||||
}
|
||||
|
||||
SDValue NewV1 = V1; // Save operands in case early exit happens.
|
||||
SDValue NewV2 = V2;
|
||||
if (matchBinaryVectorShuffle(MaskVT, Mask, AllowFloatDomain, AllowIntDomain,
|
||||
V1, V2, DL, DAG, Subtarget, Shuffle,
|
||||
NewV1, NewV2, DL, DAG, Subtarget, Shuffle,
|
||||
ShuffleSrcVT, ShuffleVT, UnaryShuffle) &&
|
||||
(!IsEVEXShuffle || (NumRootElts == ShuffleVT.getVectorNumElements()))) {
|
||||
if (Depth == 1 && Root.getOpcode() == Shuffle)
|
||||
return SDValue(); // Nothing to do!
|
||||
V1 = DAG.getBitcast(ShuffleSrcVT, V1);
|
||||
DCI.AddToWorklist(V1.getNode());
|
||||
V2 = DAG.getBitcast(ShuffleSrcVT, V2);
|
||||
DCI.AddToWorklist(V2.getNode());
|
||||
Res = DAG.getNode(Shuffle, DL, ShuffleVT, V1, V2);
|
||||
NewV1 = DAG.getBitcast(ShuffleSrcVT, NewV1);
|
||||
DCI.AddToWorklist(NewV1.getNode());
|
||||
NewV2 = DAG.getBitcast(ShuffleSrcVT, NewV2);
|
||||
DCI.AddToWorklist(NewV2.getNode());
|
||||
Res = DAG.getNode(Shuffle, DL, ShuffleVT, NewV1, NewV2);
|
||||
DCI.AddToWorklist(Res.getNode());
|
||||
return DAG.getBitcast(RootVT, Res);
|
||||
}
|
||||
|
||||
if (matchBinaryPermuteVectorShuffle(MaskVT, Mask, Zeroable, AllowFloatDomain,
|
||||
AllowIntDomain, V1, V2, DL, DAG,
|
||||
Subtarget, Shuffle, ShuffleVT,
|
||||
PermuteImm) &&
|
||||
NewV1 = V1; // Save operands in case early exit happens.
|
||||
NewV2 = V2;
|
||||
if (matchBinaryPermuteVectorShuffle(
|
||||
MaskVT, Mask, Zeroable, AllowFloatDomain, AllowIntDomain, NewV1,
|
||||
NewV2, DL, DAG, Subtarget, Shuffle, ShuffleVT, PermuteImm) &&
|
||||
(!IsEVEXShuffle || (NumRootElts == ShuffleVT.getVectorNumElements()))) {
|
||||
if (Depth == 1 && Root.getOpcode() == Shuffle)
|
||||
return SDValue(); // Nothing to do!
|
||||
V1 = DAG.getBitcast(ShuffleVT, V1);
|
||||
DCI.AddToWorklist(V1.getNode());
|
||||
V2 = DAG.getBitcast(ShuffleVT, V2);
|
||||
DCI.AddToWorklist(V2.getNode());
|
||||
Res = DAG.getNode(Shuffle, DL, ShuffleVT, V1, V2,
|
||||
NewV1 = DAG.getBitcast(ShuffleVT, NewV1);
|
||||
DCI.AddToWorklist(NewV1.getNode());
|
||||
NewV2 = DAG.getBitcast(ShuffleVT, NewV2);
|
||||
DCI.AddToWorklist(NewV2.getNode());
|
||||
Res = DAG.getNode(Shuffle, DL, ShuffleVT, NewV1, NewV2,
|
||||
DAG.getConstant(PermuteImm, DL, MVT::i8));
|
||||
DCI.AddToWorklist(Res.getNode());
|
||||
return DAG.getBitcast(RootVT, Res);
|
||||
|
@ -648,7 +648,7 @@ class GVNHoist {
|
||||
// track in a CHI. In the PDom walk, there can be values in the
|
||||
// stack which are not control dependent e.g., nested loop.
|
||||
if (si != RenameStack.end() && si->second.size() &&
|
||||
DT->dominates(Pred, si->second.back()->getParent())) {
|
||||
DT->properlyDominates(Pred, si->second.back()->getParent())) {
|
||||
C.Dest = BB; // Assign the edge
|
||||
C.I = si->second.pop_back_val(); // Assign the argument
|
||||
DEBUG(dbgs() << "\nCHI Inserted in BB: " << C.Dest->getName()
|
||||
|
@ -14,7 +14,6 @@
|
||||
#include "llvm/ADT/SmallPtrSet.h"
|
||||
#include "llvm/ADT/SmallVector.h"
|
||||
#include "llvm/Analysis/DivergenceAnalysis.h"
|
||||
#include "llvm/Analysis/LoopInfo.h"
|
||||
#include "llvm/Analysis/RegionInfo.h"
|
||||
#include "llvm/Analysis/RegionIterator.h"
|
||||
#include "llvm/Analysis/RegionPass.h"
|
||||
@ -177,9 +176,8 @@ class StructurizeCFG : public RegionPass {
|
||||
Region *ParentRegion;
|
||||
|
||||
DominatorTree *DT;
|
||||
LoopInfo *LI;
|
||||
|
||||
SmallVector<RegionNode *, 8> Order;
|
||||
std::deque<RegionNode *> Order;
|
||||
BBSet Visited;
|
||||
|
||||
BBPhiMap DeletedPhis;
|
||||
@ -204,7 +202,7 @@ class StructurizeCFG : public RegionPass {
|
||||
|
||||
void gatherPredicates(RegionNode *N);
|
||||
|
||||
void collectInfos();
|
||||
void analyzeNode(RegionNode *N);
|
||||
|
||||
void insertConditions(bool Loops);
|
||||
|
||||
@ -258,7 +256,6 @@ class StructurizeCFG : public RegionPass {
|
||||
AU.addRequired<DivergenceAnalysis>();
|
||||
AU.addRequiredID(LowerSwitchID);
|
||||
AU.addRequired<DominatorTreeWrapperPass>();
|
||||
AU.addRequired<LoopInfoWrapperPass>();
|
||||
|
||||
AU.addPreserved<DominatorTreeWrapperPass>();
|
||||
RegionPass::getAnalysisUsage(AU);
|
||||
@ -292,55 +289,17 @@ bool StructurizeCFG::doInitialization(Region *R, RGPassManager &RGM) {
|
||||
|
||||
/// \brief Build up the general order of nodes
|
||||
void StructurizeCFG::orderNodes() {
|
||||
ReversePostOrderTraversal<Region*> RPOT(ParentRegion);
|
||||
SmallDenseMap<Loop*, unsigned, 8> LoopBlocks;
|
||||
assert(Visited.empty());
|
||||
assert(Predicates.empty());
|
||||
assert(Loops.empty());
|
||||
assert(LoopPreds.empty());
|
||||
|
||||
// The reverse post-order traversal of the list gives us an ordering close
|
||||
// to what we want. The only problem with it is that sometimes backedges
|
||||
// for outer loops will be visited before backedges for inner loops.
|
||||
for (RegionNode *RN : RPOT) {
|
||||
BasicBlock *BB = RN->getEntry();
|
||||
Loop *Loop = LI->getLoopFor(BB);
|
||||
++LoopBlocks[Loop];
|
||||
// This must be RPO order for the back edge detection to work
|
||||
for (RegionNode *RN : ReversePostOrderTraversal<Region*>(ParentRegion)) {
|
||||
// FIXME: Is there a better order to use for structurization?
|
||||
Order.push_back(RN);
|
||||
analyzeNode(RN);
|
||||
}
|
||||
|
||||
unsigned CurrentLoopDepth = 0;
|
||||
Loop *CurrentLoop = nullptr;
|
||||
for (auto I = RPOT.begin(), E = RPOT.end(); I != E; ++I) {
|
||||
BasicBlock *BB = (*I)->getEntry();
|
||||
unsigned LoopDepth = LI->getLoopDepth(BB);
|
||||
|
||||
if (is_contained(Order, *I))
|
||||
continue;
|
||||
|
||||
if (LoopDepth < CurrentLoopDepth) {
|
||||
// Make sure we have visited all blocks in this loop before moving back to
|
||||
// the outer loop.
|
||||
|
||||
auto LoopI = I;
|
||||
while (unsigned &BlockCount = LoopBlocks[CurrentLoop]) {
|
||||
LoopI++;
|
||||
BasicBlock *LoopBB = (*LoopI)->getEntry();
|
||||
if (LI->getLoopFor(LoopBB) == CurrentLoop) {
|
||||
--BlockCount;
|
||||
Order.push_back(*LoopI);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
CurrentLoop = LI->getLoopFor(BB);
|
||||
if (CurrentLoop)
|
||||
LoopBlocks[CurrentLoop]--;
|
||||
|
||||
CurrentLoopDepth = LoopDepth;
|
||||
Order.push_back(*I);
|
||||
}
|
||||
|
||||
// This pass originally used a post-order traversal and then operated on
|
||||
// the list in reverse. Now that we are using a reverse post-order traversal
|
||||
// rather than re-working the whole pass to operate on the list in order,
|
||||
// we just reverse the list and continue to operate on it in reverse.
|
||||
std::reverse(Order.begin(), Order.end());
|
||||
}
|
||||
|
||||
/// \brief Determine the end of the loops
|
||||
@ -466,32 +425,19 @@ void StructurizeCFG::gatherPredicates(RegionNode *N) {
|
||||
}
|
||||
|
||||
/// \brief Collect various loop and predicate infos
|
||||
void StructurizeCFG::collectInfos() {
|
||||
// Reset predicate
|
||||
Predicates.clear();
|
||||
void StructurizeCFG::analyzeNode(RegionNode *RN) {
|
||||
DEBUG(dbgs() << "Visiting: "
|
||||
<< (RN->isSubRegion() ? "SubRegion with entry: " : "")
|
||||
<< RN->getEntry()->getName() << '\n');
|
||||
|
||||
// and loop infos
|
||||
Loops.clear();
|
||||
LoopPreds.clear();
|
||||
// Analyze all the conditions leading to a node
|
||||
gatherPredicates(RN);
|
||||
|
||||
// Reset the visited nodes
|
||||
Visited.clear();
|
||||
// Remember that we've seen this node
|
||||
Visited.insert(RN->getEntry());
|
||||
|
||||
for (RegionNode *RN : reverse(Order)) {
|
||||
DEBUG(dbgs() << "Visiting: "
|
||||
<< (RN->isSubRegion() ? "SubRegion with entry: " : "")
|
||||
<< RN->getEntry()->getName() << " Loop Depth: "
|
||||
<< LI->getLoopDepth(RN->getEntry()) << "\n");
|
||||
|
||||
// Analyze all the conditions leading to a node
|
||||
gatherPredicates(RN);
|
||||
|
||||
// Remember that we've seen this node
|
||||
Visited.insert(RN->getEntry());
|
||||
|
||||
// Find the last back edges
|
||||
analyzeLoops(RN);
|
||||
}
|
||||
// Find the last back edges
|
||||
analyzeLoops(RN);
|
||||
}
|
||||
|
||||
/// \brief Insert the missing branch conditions
|
||||
@ -664,7 +610,7 @@ void StructurizeCFG::changeExit(RegionNode *Node, BasicBlock *NewExit,
|
||||
BasicBlock *StructurizeCFG::getNextFlow(BasicBlock *Dominator) {
|
||||
LLVMContext &Context = Func->getContext();
|
||||
BasicBlock *Insert = Order.empty() ? ParentRegion->getExit() :
|
||||
Order.back()->getEntry();
|
||||
Order.front()->getEntry();
|
||||
BasicBlock *Flow = BasicBlock::Create(Context, FlowBlockName,
|
||||
Func, Insert);
|
||||
DT->addNewBlock(Flow, Dominator);
|
||||
@ -744,7 +690,8 @@ bool StructurizeCFG::isPredictableTrue(RegionNode *Node) {
|
||||
/// Take one node from the order vector and wire it up
|
||||
void StructurizeCFG::wireFlow(bool ExitUseAllowed,
|
||||
BasicBlock *LoopEnd) {
|
||||
RegionNode *Node = Order.pop_back_val();
|
||||
RegionNode *Node = Order.front();
|
||||
Order.pop_front();
|
||||
Visited.insert(Node->getEntry());
|
||||
|
||||
if (isPredictableTrue(Node)) {
|
||||
@ -768,7 +715,7 @@ void StructurizeCFG::wireFlow(bool ExitUseAllowed,
|
||||
|
||||
PrevNode = Node;
|
||||
while (!Order.empty() && !Visited.count(LoopEnd) &&
|
||||
dominatesPredicates(Entry, Order.back())) {
|
||||
dominatesPredicates(Entry, Order.front())) {
|
||||
handleLoops(false, LoopEnd);
|
||||
}
|
||||
|
||||
@ -779,7 +726,7 @@ void StructurizeCFG::wireFlow(bool ExitUseAllowed,
|
||||
|
||||
void StructurizeCFG::handleLoops(bool ExitUseAllowed,
|
||||
BasicBlock *LoopEnd) {
|
||||
RegionNode *Node = Order.back();
|
||||
RegionNode *Node = Order.front();
|
||||
BasicBlock *LoopStart = Node->getEntry();
|
||||
|
||||
if (!Loops.count(LoopStart)) {
|
||||
@ -924,10 +871,9 @@ bool StructurizeCFG::runOnRegion(Region *R, RGPassManager &RGM) {
|
||||
ParentRegion = R;
|
||||
|
||||
DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
|
||||
LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
|
||||
|
||||
orderNodes();
|
||||
collectInfos();
|
||||
|
||||
createFlow();
|
||||
insertConditions(false);
|
||||
insertConditions(true);
|
||||
|
@ -1347,7 +1347,6 @@ void BoUpSLP::buildTree(ArrayRef<Value *> Roots,
|
||||
DEBUG(dbgs() << "SLP: Need to extract: Extra arg from lane " <<
|
||||
Lane << " from " << *Scalar << ".\n");
|
||||
ExternalUses.emplace_back(Scalar, nullptr, Lane);
|
||||
continue;
|
||||
}
|
||||
for (User *U : Scalar->users()) {
|
||||
DEBUG(dbgs() << "SLP: Checking user:" << *U << ".\n");
|
||||
|
@ -549,6 +549,7 @@ def Aligned : InheritableAttr {
|
||||
Keyword<"_Alignas">]>,
|
||||
Accessor<"isDeclspec",[Declspec<"align">]>];
|
||||
let Documentation = [Undocumented];
|
||||
let DuplicatesAllowedWhileMerging = 1;
|
||||
}
|
||||
|
||||
def AlignValue : Attr {
|
||||
|
@ -1357,15 +1357,15 @@ TARGET_BUILTIN(__builtin_ia32_vpshrdvw128_maskz, "V8sV8sV8sV8sUc", "", "avx512vl
|
||||
TARGET_BUILTIN(__builtin_ia32_vpshrdvw256_maskz, "V16sV16sV16sV16sUs", "", "avx512vl,avx512vbmi2")
|
||||
TARGET_BUILTIN(__builtin_ia32_vpshrdvw512_maskz, "V32sV32sV32sV32sUi", "", "avx512vbmi2")
|
||||
|
||||
TARGET_BUILTIN(__builtin_ia32_vpshrdd128_mask, "V4iV4iV4iiV4iUc", "", "avx512vl,avx512vbmi2")
|
||||
TARGET_BUILTIN(__builtin_ia32_vpshrdd256_mask, "V8iV8iV8iiV8iUc", "", "avx512vl,avx512vbmi2")
|
||||
TARGET_BUILTIN(__builtin_ia32_vpshrdd512_mask, "V16iV16iV16iiV16iUs", "", "avx512vbmi2")
|
||||
TARGET_BUILTIN(__builtin_ia32_vpshrdq128_mask, "V2LLiV2LLiV2LLiiV2LLiUc", "", "avx512vl,avx512vbmi2")
|
||||
TARGET_BUILTIN(__builtin_ia32_vpshrdq256_mask, "V4LLiV4LLiV4LLiiV4LLiUc", "", "avx512vl,avx512vbmi2")
|
||||
TARGET_BUILTIN(__builtin_ia32_vpshrdq512_mask, "V8LLiV8LLiV8LLiiV8LLiUc", "", "avx512vbmi2")
|
||||
TARGET_BUILTIN(__builtin_ia32_vpshrdw128_mask, "V8sV8sV8siV8sUc", "", "avx512vl,avx512vbmi2")
|
||||
TARGET_BUILTIN(__builtin_ia32_vpshrdw256_mask, "V16sV16sV16siV16sUs", "", "avx512vl,avx512vbmi2")
|
||||
TARGET_BUILTIN(__builtin_ia32_vpshrdw512_mask, "V32sV32sV32siV32sUi", "", "avx512vbmi2")
|
||||
TARGET_BUILTIN(__builtin_ia32_vpshrdd128_mask, "V4iV4iV4iIiV4iUc", "", "avx512vl,avx512vbmi2")
|
||||
TARGET_BUILTIN(__builtin_ia32_vpshrdd256_mask, "V8iV8iV8iIiV8iUc", "", "avx512vl,avx512vbmi2")
|
||||
TARGET_BUILTIN(__builtin_ia32_vpshrdd512_mask, "V16iV16iV16iIiV16iUs", "", "avx512vbmi2")
|
||||
TARGET_BUILTIN(__builtin_ia32_vpshrdq128_mask, "V2LLiV2LLiV2LLiIiV2LLiUc", "", "avx512vl,avx512vbmi2")
|
||||
TARGET_BUILTIN(__builtin_ia32_vpshrdq256_mask, "V4LLiV4LLiV4LLiIiV4LLiUc", "", "avx512vl,avx512vbmi2")
|
||||
TARGET_BUILTIN(__builtin_ia32_vpshrdq512_mask, "V8LLiV8LLiV8LLiIiV8LLiUc", "", "avx512vbmi2")
|
||||
TARGET_BUILTIN(__builtin_ia32_vpshrdw128_mask, "V8sV8sV8sIiV8sUc", "", "avx512vl,avx512vbmi2")
|
||||
TARGET_BUILTIN(__builtin_ia32_vpshrdw256_mask, "V16sV16sV16sIiV16sUs", "", "avx512vl,avx512vbmi2")
|
||||
TARGET_BUILTIN(__builtin_ia32_vpshrdw512_mask, "V32sV32sV32sIiV32sUi", "", "avx512vbmi2")
|
||||
|
||||
TARGET_BUILTIN(__builtin_ia32_pmovswb512_mask, "V32cV32sV32cUi", "", "avx512bw")
|
||||
TARGET_BUILTIN(__builtin_ia32_pmovuswb512_mask, "V32cV32sV32cUi", "", "avx512bw")
|
||||
|
@ -444,8 +444,7 @@ def TautologicalInRangeCompare : DiagGroup<"tautological-constant-in-range-compa
|
||||
TautologicalUnsignedEnumZeroCompare]>;
|
||||
def TautologicalOutOfRangeCompare : DiagGroup<"tautological-constant-out-of-range-compare">;
|
||||
def TautologicalConstantCompare : DiagGroup<"tautological-constant-compare",
|
||||
[TautologicalInRangeCompare,
|
||||
TautologicalOutOfRangeCompare]>;
|
||||
[TautologicalOutOfRangeCompare]>;
|
||||
def TautologicalPointerCompare : DiagGroup<"tautological-pointer-compare">;
|
||||
def TautologicalOverlapCompare : DiagGroup<"tautological-overlap-compare">;
|
||||
def TautologicalUndefinedCompare : DiagGroup<"tautological-undefined-compare">;
|
||||
@ -719,7 +718,6 @@ def IntToPointerCast : DiagGroup<"int-to-pointer-cast",
|
||||
def Move : DiagGroup<"move", [PessimizingMove, RedundantMove, SelfMove]>;
|
||||
|
||||
def Extra : DiagGroup<"extra", [
|
||||
TautologicalInRangeCompare,
|
||||
MissingFieldInitializers,
|
||||
IgnoredQualifiers,
|
||||
InitializerOverrides,
|
||||
|
@ -398,7 +398,6 @@ TYPE_TRAIT_2(__builtin_types_compatible_p, TypeCompatible, KEYNOCXX)
|
||||
KEYWORD(__builtin_va_arg , KEYALL)
|
||||
KEYWORD(__extension__ , KEYALL)
|
||||
KEYWORD(__float128 , KEYALL)
|
||||
ALIAS("_Float128", __float128 , KEYNOCXX)
|
||||
KEYWORD(__imag , KEYALL)
|
||||
KEYWORD(__int128 , KEYALL)
|
||||
KEYWORD(__label__ , KEYALL)
|
||||
|
@ -32,27 +32,39 @@ class BugType {
|
||||
const CheckName Check;
|
||||
const std::string Name;
|
||||
const std::string Category;
|
||||
bool SuppressonSink;
|
||||
const CheckerBase *Checker;
|
||||
bool SuppressOnSink;
|
||||
|
||||
virtual void anchor();
|
||||
public:
|
||||
BugType(class CheckName check, StringRef name, StringRef cat)
|
||||
: Check(check), Name(name), Category(cat), SuppressonSink(false) {}
|
||||
BugType(const CheckerBase *checker, StringRef name, StringRef cat)
|
||||
: Check(checker->getCheckName()), Name(name), Category(cat),
|
||||
SuppressonSink(false) {}
|
||||
virtual ~BugType() {}
|
||||
|
||||
// FIXME: Should these be made strings as well?
|
||||
public:
|
||||
BugType(CheckName Check, StringRef Name, StringRef Cat)
|
||||
: Check(Check), Name(Name), Category(Cat), Checker(nullptr),
|
||||
SuppressOnSink(false) {}
|
||||
BugType(const CheckerBase *Checker, StringRef Name, StringRef Cat)
|
||||
: Check(Checker->getCheckName()), Name(Name), Category(Cat),
|
||||
Checker(Checker), SuppressOnSink(false) {}
|
||||
virtual ~BugType() = default;
|
||||
|
||||
StringRef getName() const { return Name; }
|
||||
StringRef getCategory() const { return Category; }
|
||||
StringRef getCheckName() const { return Check.getName(); }
|
||||
StringRef getCheckName() const {
|
||||
// FIXME: This is a workaround to ensure that the correct check name is used
|
||||
// The check names are set after the constructors are run.
|
||||
// In case the BugType object is initialized in the checker's ctor
|
||||
// the Check field will be empty. To circumvent this problem we use
|
||||
// CheckerBase whenever it is possible.
|
||||
StringRef CheckName =
|
||||
Checker ? Checker->getCheckName().getName() : Check.getName();
|
||||
assert(!CheckName.empty() && "Check name is not set properly.");
|
||||
return CheckName;
|
||||
}
|
||||
|
||||
/// isSuppressOnSink - Returns true if bug reports associated with this bug
|
||||
/// type should be suppressed if the end node of the report is post-dominated
|
||||
/// by a sink node.
|
||||
bool isSuppressOnSink() const { return SuppressonSink; }
|
||||
void setSuppressOnSink(bool x) { SuppressonSink = x; }
|
||||
bool isSuppressOnSink() const { return SuppressOnSink; }
|
||||
void setSuppressOnSink(bool x) { SuppressOnSink = x; }
|
||||
|
||||
virtual void FlushReports(BugReporter& BR);
|
||||
};
|
||||
@ -74,7 +86,7 @@ class BuiltinBug : public BugType {
|
||||
StringRef getDescription() const { return desc; }
|
||||
};
|
||||
|
||||
} // end GR namespace
|
||||
} // end ento namespace
|
||||
|
||||
} // end clang namespace
|
||||
#endif
|
||||
|
@ -891,12 +891,14 @@ bool Decl::AccessDeclContextSanity() const {
|
||||
// 4. the context is not a record
|
||||
// 5. it's invalid
|
||||
// 6. it's a C++0x static_assert.
|
||||
// 7. it's a block literal declaration
|
||||
if (isa<TranslationUnitDecl>(this) ||
|
||||
isa<TemplateTypeParmDecl>(this) ||
|
||||
isa<NonTypeTemplateParmDecl>(this) ||
|
||||
!isa<CXXRecordDecl>(getDeclContext()) ||
|
||||
isInvalidDecl() ||
|
||||
isa<StaticAssertDecl>(this) ||
|
||||
isa<BlockDecl>(this) ||
|
||||
// FIXME: a ParmVarDecl can have ClassTemplateSpecialization
|
||||
// as DeclContext (?).
|
||||
isa<ParmVarDecl>(this) ||
|
||||
|
@ -478,6 +478,8 @@ void ODRHash::AddFunctionDecl(const FunctionDecl *Function) {
|
||||
|
||||
// TODO: Fix hashing for class methods.
|
||||
if (isa<CXXMethodDecl>(Function)) return;
|
||||
// And friend functions.
|
||||
if (Function->getFriendObjectKind()) return;
|
||||
|
||||
// Skip functions that are specializations or in specialization context.
|
||||
const DeclContext *DC = Function;
|
||||
|
@ -915,7 +915,11 @@ EmitCheckedMixedSignMultiply(CodeGenFunction &CGF, const clang::Expr *Op1,
|
||||
Overflow = CGF.Builder.CreateOr(Overflow, TruncOverflow);
|
||||
}
|
||||
|
||||
Result = CGF.Builder.CreateTrunc(UnsignedResult, ResTy);
|
||||
// Negate the product if it would be negative in infinite precision.
|
||||
Result = CGF.Builder.CreateSelect(
|
||||
IsNegative, CGF.Builder.CreateNeg(UnsignedResult), UnsignedResult);
|
||||
|
||||
Result = CGF.Builder.CreateTrunc(Result, ResTy);
|
||||
}
|
||||
assert(Overflow && Result && "Missing overflow or result");
|
||||
|
||||
|
@ -229,6 +229,11 @@ class PCHContainerGenerator : public ASTConsumer {
|
||||
Builder->getModuleDebugInfo()->completeRequiredType(RD);
|
||||
}
|
||||
|
||||
void HandleImplicitImportDecl(ImportDecl *D) override {
|
||||
if (!D->getImportedOwningModule())
|
||||
Builder->getModuleDebugInfo()->EmitImportDecl(*D);
|
||||
}
|
||||
|
||||
/// Emit a container holding the serialized AST.
|
||||
void HandleTranslationUnit(ASTContext &Ctx) override {
|
||||
assert(M && VMContext && Builder);
|
||||
|
@ -817,10 +817,6 @@ static void InitializePredefinedMacros(const TargetInfo &TI,
|
||||
DefineFloatMacros(Builder, "FLT", &TI.getFloatFormat(), "F");
|
||||
DefineFloatMacros(Builder, "DBL", &TI.getDoubleFormat(), "");
|
||||
DefineFloatMacros(Builder, "LDBL", &TI.getLongDoubleFormat(), "L");
|
||||
if (TI.hasFloat128Type())
|
||||
// FIXME: Switch away from the non-standard "Q" when we can
|
||||
DefineFloatMacros(Builder, "FLT128", &TI.getFloat128Format(), "Q");
|
||||
|
||||
|
||||
// Define a __POINTER_WIDTH__ macro for stdint.h.
|
||||
Builder.defineMacro("__POINTER_WIDTH__",
|
||||
|
@ -2009,18 +2009,21 @@ bool Lexer::LexAngledStringLiteral(Token &Result, const char *CurPtr) {
|
||||
const char *AfterLessPos = CurPtr;
|
||||
char C = getAndAdvanceChar(CurPtr, Result);
|
||||
while (C != '>') {
|
||||
// Skip escaped characters.
|
||||
if (C == '\\' && CurPtr < BufferEnd) {
|
||||
// Skip the escaped character.
|
||||
getAndAdvanceChar(CurPtr, Result);
|
||||
} else if (C == '\n' || C == '\r' || // Newline.
|
||||
(C == 0 && (CurPtr-1 == BufferEnd || // End of file.
|
||||
isCodeCompletionPoint(CurPtr-1)))) {
|
||||
// Skip escaped characters. Escaped newlines will already be processed by
|
||||
// getAndAdvanceChar.
|
||||
if (C == '\\')
|
||||
C = getAndAdvanceChar(CurPtr, Result);
|
||||
|
||||
if (C == '\n' || C == '\r' || // Newline.
|
||||
(C == 0 && (CurPtr-1 == BufferEnd || // End of file.
|
||||
isCodeCompletionPoint(CurPtr-1)))) {
|
||||
// If the filename is unterminated, then it must just be a lone <
|
||||
// character. Return this as such.
|
||||
FormTokenWithChars(Result, AfterLessPos, tok::less);
|
||||
return true;
|
||||
} else if (C == 0) {
|
||||
}
|
||||
|
||||
if (C == 0) {
|
||||
NulCharacter = CurPtr-1;
|
||||
}
|
||||
C = getAndAdvanceChar(CurPtr, Result);
|
||||
|
@ -105,8 +105,10 @@ void Preprocessor::CachingLex(Token &Result) {
|
||||
}
|
||||
|
||||
void Preprocessor::EnterCachingLexMode() {
|
||||
if (InCachingLexMode())
|
||||
if (InCachingLexMode()) {
|
||||
assert(CurLexerKind == CLK_CachingLexer && "Unexpected lexer kind");
|
||||
return;
|
||||
}
|
||||
|
||||
PushIncludeMacroStack();
|
||||
CurLexerKind = CLK_CachingLexer;
|
||||
|
@ -444,6 +444,7 @@ bool Preprocessor::HandleEndOfFile(Token &Result, bool isEndOfMacro) {
|
||||
}
|
||||
|
||||
CurPPLexer = nullptr;
|
||||
recomputeCurLexerKind();
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@ -143,72 +143,43 @@ void Scope::dumpImpl(raw_ostream &OS) const {
|
||||
if (HasFlags)
|
||||
OS << "Flags: ";
|
||||
|
||||
while (Flags) {
|
||||
if (Flags & FnScope) {
|
||||
OS << "FnScope";
|
||||
Flags &= ~FnScope;
|
||||
} else if (Flags & BreakScope) {
|
||||
OS << "BreakScope";
|
||||
Flags &= ~BreakScope;
|
||||
} else if (Flags & ContinueScope) {
|
||||
OS << "ContinueScope";
|
||||
Flags &= ~ContinueScope;
|
||||
} else if (Flags & DeclScope) {
|
||||
OS << "DeclScope";
|
||||
Flags &= ~DeclScope;
|
||||
} else if (Flags & ControlScope) {
|
||||
OS << "ControlScope";
|
||||
Flags &= ~ControlScope;
|
||||
} else if (Flags & ClassScope) {
|
||||
OS << "ClassScope";
|
||||
Flags &= ~ClassScope;
|
||||
} else if (Flags & BlockScope) {
|
||||
OS << "BlockScope";
|
||||
Flags &= ~BlockScope;
|
||||
} else if (Flags & TemplateParamScope) {
|
||||
OS << "TemplateParamScope";
|
||||
Flags &= ~TemplateParamScope;
|
||||
} else if (Flags & FunctionPrototypeScope) {
|
||||
OS << "FunctionPrototypeScope";
|
||||
Flags &= ~FunctionPrototypeScope;
|
||||
} else if (Flags & FunctionDeclarationScope) {
|
||||
OS << "FunctionDeclarationScope";
|
||||
Flags &= ~FunctionDeclarationScope;
|
||||
} else if (Flags & AtCatchScope) {
|
||||
OS << "AtCatchScope";
|
||||
Flags &= ~AtCatchScope;
|
||||
} else if (Flags & ObjCMethodScope) {
|
||||
OS << "ObjCMethodScope";
|
||||
Flags &= ~ObjCMethodScope;
|
||||
} else if (Flags & SwitchScope) {
|
||||
OS << "SwitchScope";
|
||||
Flags &= ~SwitchScope;
|
||||
} else if (Flags & TryScope) {
|
||||
OS << "TryScope";
|
||||
Flags &= ~TryScope;
|
||||
} else if (Flags & FnTryCatchScope) {
|
||||
OS << "FnTryCatchScope";
|
||||
Flags &= ~FnTryCatchScope;
|
||||
} else if (Flags & SEHTryScope) {
|
||||
OS << "SEHTryScope";
|
||||
Flags &= ~SEHTryScope;
|
||||
} else if (Flags & SEHExceptScope) {
|
||||
OS << "SEHExceptScope";
|
||||
Flags &= ~SEHExceptScope;
|
||||
} else if (Flags & OpenMPDirectiveScope) {
|
||||
OS << "OpenMPDirectiveScope";
|
||||
Flags &= ~OpenMPDirectiveScope;
|
||||
} else if (Flags & OpenMPLoopDirectiveScope) {
|
||||
OS << "OpenMPLoopDirectiveScope";
|
||||
Flags &= ~OpenMPLoopDirectiveScope;
|
||||
} else if (Flags & OpenMPSimdDirectiveScope) {
|
||||
OS << "OpenMPSimdDirectiveScope";
|
||||
Flags &= ~OpenMPSimdDirectiveScope;
|
||||
}
|
||||
std::pair<unsigned, const char *> FlagInfo[] = {
|
||||
{FnScope, "FnScope"},
|
||||
{BreakScope, "BreakScope"},
|
||||
{ContinueScope, "ContinueScope"},
|
||||
{DeclScope, "DeclScope"},
|
||||
{ControlScope, "ControlScope"},
|
||||
{ClassScope, "ClassScope"},
|
||||
{BlockScope, "BlockScope"},
|
||||
{TemplateParamScope, "TemplateParamScope"},
|
||||
{FunctionPrototypeScope, "FunctionPrototypeScope"},
|
||||
{FunctionDeclarationScope, "FunctionDeclarationScope"},
|
||||
{AtCatchScope, "AtCatchScope"},
|
||||
{ObjCMethodScope, "ObjCMethodScope"},
|
||||
{SwitchScope, "SwitchScope"},
|
||||
{TryScope, "TryScope"},
|
||||
{FnTryCatchScope, "FnTryCatchScope"},
|
||||
{OpenMPDirectiveScope, "OpenMPDirectiveScope"},
|
||||
{OpenMPLoopDirectiveScope, "OpenMPLoopDirectiveScope"},
|
||||
{OpenMPSimdDirectiveScope, "OpenMPSimdDirectiveScope"},
|
||||
{EnumScope, "EnumScope"},
|
||||
{SEHTryScope, "SEHTryScope"},
|
||||
{SEHExceptScope, "SEHExceptScope"},
|
||||
{SEHFilterScope, "SEHFilterScope"},
|
||||
{CompoundStmtScope, "CompoundStmtScope"},
|
||||
{ClassInheritanceScope, "ClassInheritanceScope"}};
|
||||
|
||||
if (Flags)
|
||||
OS << " | ";
|
||||
for (auto Info : FlagInfo) {
|
||||
if (Flags & Info.first) {
|
||||
OS << Info.second;
|
||||
Flags &= ~Info.first;
|
||||
if (Flags)
|
||||
OS << " | ";
|
||||
}
|
||||
}
|
||||
|
||||
assert(Flags == 0 && "Unknown scope flags");
|
||||
|
||||
if (HasFlags)
|
||||
OS << '\n';
|
||||
|
||||
|
@ -502,6 +502,10 @@ DeduceTemplateArguments(Sema &S,
|
||||
SmallVectorImpl<DeducedTemplateArgument> &Deduced) {
|
||||
assert(Arg.isCanonical() && "Argument type must be canonical");
|
||||
|
||||
// Treat an injected-class-name as its underlying template-id.
|
||||
if (auto *Injected = dyn_cast<InjectedClassNameType>(Arg))
|
||||
Arg = Injected->getInjectedSpecializationType();
|
||||
|
||||
// Check whether the template argument is a dependent template-id.
|
||||
if (const TemplateSpecializationType *SpecArg
|
||||
= dyn_cast<TemplateSpecializationType>(Arg)) {
|
||||
|
@ -4160,7 +4160,8 @@ void Sema::BuildVariableInstantiation(
|
||||
// it right away if the type contains 'auto'.
|
||||
if ((!isa<VarTemplateSpecializationDecl>(NewVar) &&
|
||||
!InstantiatingVarTemplate &&
|
||||
!(OldVar->isInline() && OldVar->isThisDeclarationADefinition())) ||
|
||||
!(OldVar->isInline() && OldVar->isThisDeclarationADefinition() &&
|
||||
!NewVar->isThisDeclarationADefinition())) ||
|
||||
NewVar->getType()->isUndeducedType())
|
||||
InstantiateVariableInitializer(NewVar, OldVar, TemplateArgs);
|
||||
|
||||
|
@ -2900,8 +2900,13 @@ void ento::registerNewDeleteLeaksChecker(CheckerManager &mgr) {
|
||||
mgr.getCurrentCheckName();
|
||||
// We currently treat NewDeleteLeaks checker as a subchecker of NewDelete
|
||||
// checker.
|
||||
if (!checker->ChecksEnabled[MallocChecker::CK_NewDeleteChecker])
|
||||
if (!checker->ChecksEnabled[MallocChecker::CK_NewDeleteChecker]) {
|
||||
checker->ChecksEnabled[MallocChecker::CK_NewDeleteChecker] = true;
|
||||
// FIXME: This does not set the correct name, but without this workaround
|
||||
// no name will be set at all.
|
||||
checker->CheckNames[MallocChecker::CK_NewDeleteChecker] =
|
||||
mgr.getCurrentCheckName();
|
||||
}
|
||||
}
|
||||
|
||||
#define REGISTER_CHECKER(name) \
|
||||
|
@ -64,7 +64,7 @@ class ValistChecker : public Checker<check::PreCall, check::PreStmt<VAArgExpr>,
|
||||
CheckerContext &C) const;
|
||||
void reportLeakedVALists(const RegionVector &LeakedVALists, StringRef Msg1,
|
||||
StringRef Msg2, CheckerContext &C, ExplodedNode *N,
|
||||
bool ForceReport = false) const;
|
||||
bool ReportUninit = false) const;
|
||||
|
||||
void checkVAListStartCall(const CallEvent &Call, CheckerContext &C,
|
||||
bool IsCopy) const;
|
||||
@ -267,15 +267,19 @@ void ValistChecker::reportUninitializedAccess(const MemRegion *VAList,
|
||||
void ValistChecker::reportLeakedVALists(const RegionVector &LeakedVALists,
|
||||
StringRef Msg1, StringRef Msg2,
|
||||
CheckerContext &C, ExplodedNode *N,
|
||||
bool ForceReport) const {
|
||||
bool ReportUninit) const {
|
||||
if (!(ChecksEnabled[CK_Unterminated] ||
|
||||
(ChecksEnabled[CK_Uninitialized] && ForceReport)))
|
||||
(ChecksEnabled[CK_Uninitialized] && ReportUninit)))
|
||||
return;
|
||||
for (auto Reg : LeakedVALists) {
|
||||
if (!BT_leakedvalist) {
|
||||
BT_leakedvalist.reset(new BugType(CheckNames[CK_Unterminated],
|
||||
"Leaked va_list",
|
||||
categories::MemoryError));
|
||||
// FIXME: maybe creating a new check name for this type of bug is a better
|
||||
// solution.
|
||||
BT_leakedvalist.reset(
|
||||
new BugType(CheckNames[CK_Unterminated].getName().empty()
|
||||
? CheckNames[CK_Uninitialized]
|
||||
: CheckNames[CK_Unterminated],
|
||||
"Leaked va_list", categories::MemoryError));
|
||||
BT_leakedvalist->setSuppressOnSink(true);
|
||||
}
|
||||
|
||||
@ -375,7 +379,7 @@ void ValistChecker::checkVAListEndCall(const CallEvent &Call,
|
||||
|
||||
std::shared_ptr<PathDiagnosticPiece> ValistChecker::ValistBugVisitor::VisitNode(
|
||||
const ExplodedNode *N, const ExplodedNode *PrevN, BugReporterContext &BRC,
|
||||
BugReport &BR) {
|
||||
BugReport &) {
|
||||
ProgramStateRef State = N->getState();
|
||||
ProgramStateRef StatePrev = PrevN->getState();
|
||||
|
||||
|
@ -57,6 +57,7 @@ bool link(ArrayRef<const char *> Args, bool CanExitEarly, raw_ostream &Diag) {
|
||||
errorHandler().ErrorLimitExceededMsg =
|
||||
"too many errors emitted, stopping now"
|
||||
" (use /ERRORLIMIT:0 to see all errors)";
|
||||
errorHandler().ExitEarly = CanExitEarly;
|
||||
Config = make<Configuration>();
|
||||
Config->Argv = {Args.begin(), Args.end()};
|
||||
Config->CanExitEarly = CanExitEarly;
|
||||
|
@ -1823,6 +1823,9 @@ void HashTableSection::finalizeContents() {
|
||||
}
|
||||
|
||||
void HashTableSection::writeTo(uint8_t *Buf) {
|
||||
// See comment in GnuHashTableSection::writeTo.
|
||||
memset(Buf, 0, Size);
|
||||
|
||||
unsigned NumSymbols = InX::DynSymTab->getNumSymbols();
|
||||
|
||||
uint32_t *P = reinterpret_cast<uint32_t *>(Buf);
|
||||
|
@ -39,6 +39,8 @@ class MachODumper : public ObjDumper {
|
||||
void printUnwindInfo() override;
|
||||
void printStackMap() const override;
|
||||
|
||||
void printNeededLibraries() override;
|
||||
|
||||
// MachO-specific.
|
||||
void printMachODataInCode() override;
|
||||
void printMachOVersionMin() override;
|
||||
@ -675,6 +677,34 @@ void MachODumper::printStackMap() const {
|
||||
StackMapV2Parser<support::big>(StackMapContentsArray));
|
||||
}
|
||||
|
||||
void MachODumper::printNeededLibraries() {
|
||||
ListScope D(W, "NeededLibraries");
|
||||
|
||||
using LibsTy = std::vector<StringRef>;
|
||||
LibsTy Libs;
|
||||
|
||||
for (const auto &Command : Obj->load_commands()) {
|
||||
if (Command.C.cmd == MachO::LC_LOAD_DYLIB ||
|
||||
Command.C.cmd == MachO::LC_ID_DYLIB ||
|
||||
Command.C.cmd == MachO::LC_LOAD_WEAK_DYLIB ||
|
||||
Command.C.cmd == MachO::LC_REEXPORT_DYLIB ||
|
||||
Command.C.cmd == MachO::LC_LAZY_LOAD_DYLIB ||
|
||||
Command.C.cmd == MachO::LC_LOAD_UPWARD_DYLIB) {
|
||||
MachO::dylib_command Dl = Obj->getDylibIDLoadCommand(Command);
|
||||
if (Dl.dylib.name < Dl.cmdsize) {
|
||||
auto *P = static_cast<const char*>(Command.Ptr) + Dl.dylib.name;
|
||||
Libs.push_back(P);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
std::stable_sort(Libs.begin(), Libs.end());
|
||||
|
||||
for (const auto &L : Libs) {
|
||||
outs() << " " << L << "\n";
|
||||
}
|
||||
}
|
||||
|
||||
void MachODumper::printMachODataInCode() {
|
||||
for (const auto &Load : Obj->load_commands()) {
|
||||
if (Load.C.cmd == MachO::LC_DATA_IN_CODE) {
|
||||
|
@ -8,4 +8,4 @@
|
||||
|
||||
#define CLANG_VENDOR "FreeBSD "
|
||||
|
||||
#define SVN_REVISION "321788"
|
||||
#define SVN_REVISION "323338"
|
||||
|
@ -4,5 +4,5 @@
|
||||
#define LLD_VERSION_STRING "6.0.0"
|
||||
#define LLD_VERSION_MAJOR 6
|
||||
#define LLD_VERSION_MINOR 0
|
||||
#define LLD_REVISION_STRING "321788"
|
||||
#define LLD_REVISION_STRING "323338"
|
||||
#define LLD_REPOSITORY_STRING "FreeBSD"
|
||||
|
@ -1,2 +1,2 @@
|
||||
/* $FreeBSD$ */
|
||||
#define LLVM_REVISION "svn-r321788"
|
||||
#define LLVM_REVISION "svn-r323338"
|
||||
|
Loading…
Reference in New Issue
Block a user