Vendor import of llvm trunk r308421:

https://llvm.org/svn/llvm-project/llvm/trunk@308421
This commit is contained in:
Dimitry Andric 2017-07-19 07:02:10 +00:00
parent ca089b24d4
commit 93c91e39b2
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/vendor/llvm/dist/; revision=321184
svn path=/vendor/llvm/llvm-trunk-r308421/; revision=321185; tag=vendor/llvm/llvm-trunk-r308421
706 changed files with 40042 additions and 7075 deletions

View File

@ -41,14 +41,9 @@ E: hans@chromium.org
T: x86
O: Windows
N: Renato Golin
E: renato.golin@linaro.org
T: ARM
O: Linux
N: Diana Picus
E: diana.picus@linaro.org
T: AArch64
T: ARM, AArch64
O: Linux
N: Simon Dardis

View File

@ -132,7 +132,8 @@ The ``MayAlias`` response is used whenever the two pointers might refer to the
same object.
The ``PartialAlias`` response is used when the two memory objects are known to
be overlapping in some way, but do not start at the same address.
be overlapping in some way, regardless whether they start at the same address
or not.
The ``MustAlias`` response may only be returned if the two memory objects are
guaranteed to always start at exactly the same location. A ``MustAlias``

View File

@ -34,10 +34,10 @@ There are some conventions that are not uniformly followed in the code base
(e.g. the naming convention). This is because they are relatively new, and a
lot of code was written before they were put in place. Our long term goal is
for the entire codebase to follow the convention, but we explicitly *do not*
want patches that do large-scale reformating of existing code. On the other
want patches that do large-scale reformatting of existing code. On the other
hand, it is reasonable to rename the methods of a class if you're about to
change it in some other way. Just do the reformating as a separate commit from
the functionality change.
change it in some other way. Just do the reformatting as a separate commit
from the functionality change.
The ultimate goal of these guidelines is to increase the readability and
maintainability of our common source base. If you have suggestions for topics to

View File

@ -80,6 +80,13 @@ OUTPUT OPTIONS
Show more information on test failures, for example the entire test output
instead of just the test result.
.. option:: -vv, --echo-all-commands
Echo all commands to stdout, as they are being executed.
This can be valuable for debugging test failures, as the last echoed command
will be the one which has failed.
This option implies ``--verbose``.
.. option:: -a, --show-all
Show more information about all tests, for example the entire test

View File

@ -29,9 +29,9 @@ namespace llvm {
/// DominanceFrontierBase - Common base class for computing forward and inverse
/// dominance frontiers for a function.
///
template <class BlockT>
template <class BlockT, bool IsPostDom>
class DominanceFrontierBase {
public:
public:
typedef std::set<BlockT *> DomSetType; // Dom set for a bb
typedef std::map<BlockT *, DomSetType> DomSetMapType; // Dom set map
@ -40,10 +40,10 @@ class DominanceFrontierBase {
DomSetMapType Frontiers;
std::vector<BlockT *> Roots;
const bool IsPostDominators;
static constexpr bool IsPostDominators = IsPostDom;
public:
DominanceFrontierBase(bool isPostDom) : IsPostDominators(isPostDom) {}
public:
DominanceFrontierBase() {}
/// getRoots - Return the root blocks of the current CFG. This may include
/// multiple blocks if we are computing post dominators. For forward
@ -96,7 +96,7 @@ class DominanceFrontierBase {
/// compare - Return true if the other dominance frontier base matches
/// this dominance frontier base. Otherwise return false.
bool compare(DominanceFrontierBase<BlockT> &Other) const;
bool compare(DominanceFrontierBase &Other) const;
/// print - Convert to human readable form
///
@ -113,22 +113,21 @@ class DominanceFrontierBase {
/// used to compute a forward dominator frontiers.
///
template <class BlockT>
class ForwardDominanceFrontierBase : public DominanceFrontierBase<BlockT> {
private:
class ForwardDominanceFrontierBase
: public DominanceFrontierBase<BlockT, false> {
private:
typedef GraphTraits<BlockT *> BlockTraits;
public:
typedef DominatorTreeBase<BlockT> DomTreeT;
typedef DomTreeNodeBase<BlockT> DomTreeNodeT;
typedef typename DominanceFrontierBase<BlockT>::DomSetType DomSetType;
typedef DomTreeBase<BlockT> DomTreeT;
typedef DomTreeNodeBase<BlockT> DomTreeNodeT;
typedef typename DominanceFrontierBase<BlockT, false>::DomSetType DomSetType;
ForwardDominanceFrontierBase() : DominanceFrontierBase<BlockT>(false) {}
void analyze(DomTreeT &DT) {
this->Roots = DT.getRoots();
assert(this->Roots.size() == 1 &&
"Only one entry block for forward domfronts!");
calculate(DT, DT[this->Roots[0]]);
void analyze(DomTreeT &DT) {
this->Roots = DT.getRoots();
assert(this->Roots.size() == 1 &&
"Only one entry block for forward domfronts!");
calculate(DT, DT[this->Roots[0]]);
}
const DomSetType &calculate(const DomTreeT &DT, const DomTreeNodeT *Node);
@ -136,15 +135,16 @@ class ForwardDominanceFrontierBase : public DominanceFrontierBase<BlockT> {
class DominanceFrontier : public ForwardDominanceFrontierBase<BasicBlock> {
public:
typedef DominatorTreeBase<BasicBlock> DomTreeT;
typedef DomTreeNodeBase<BasicBlock> DomTreeNodeT;
typedef DominanceFrontierBase<BasicBlock>::DomSetType DomSetType;
typedef DominanceFrontierBase<BasicBlock>::iterator iterator;
typedef DominanceFrontierBase<BasicBlock>::const_iterator const_iterator;
typedef DomTreeBase<BasicBlock> DomTreeT;
typedef DomTreeNodeBase<BasicBlock> DomTreeNodeT;
typedef DominanceFrontierBase<BasicBlock, false>::DomSetType DomSetType;
typedef DominanceFrontierBase<BasicBlock, false>::iterator iterator;
typedef DominanceFrontierBase<BasicBlock, false>::const_iterator
const_iterator;
/// Handle invalidation explicitly.
bool invalidate(Function &F, const PreservedAnalyses &PA,
FunctionAnalysisManager::Invalidator &);
/// Handle invalidation explicitly.
bool invalidate(Function &F, const PreservedAnalyses &PA,
FunctionAnalysisManager::Invalidator &);
};
class DominanceFrontierWrapperPass : public FunctionPass {
@ -168,7 +168,8 @@ class DominanceFrontierWrapperPass : public FunctionPass {
void dump() const;
};
extern template class DominanceFrontierBase<BasicBlock>;
extern template class DominanceFrontierBase<BasicBlock, false>;
extern template class DominanceFrontierBase<BasicBlock, true>;
extern template class ForwardDominanceFrontierBase<BasicBlock>;
/// \brief Analysis pass which computes a \c DominanceFrontier.

View File

@ -39,33 +39,33 @@ class DFCalculateWorkObject {
const DomTreeNodeT *parentNode;
};
template <class BlockT>
void DominanceFrontierBase<BlockT>::removeBlock(BlockT *BB) {
template <class BlockT, bool IsPostDom>
void DominanceFrontierBase<BlockT, IsPostDom>::removeBlock(BlockT *BB) {
assert(find(BB) != end() && "Block is not in DominanceFrontier!");
for (iterator I = begin(), E = end(); I != E; ++I)
I->second.erase(BB);
Frontiers.erase(BB);
}
template <class BlockT>
void DominanceFrontierBase<BlockT>::addToFrontier(iterator I,
BlockT *Node) {
template <class BlockT, bool IsPostDom>
void DominanceFrontierBase<BlockT, IsPostDom>::addToFrontier(iterator I,
BlockT *Node) {
assert(I != end() && "BB is not in DominanceFrontier!");
assert(I->second.count(Node) && "Node is not in DominanceFrontier of BB");
I->second.erase(Node);
}
template <class BlockT>
void DominanceFrontierBase<BlockT>::removeFromFrontier(iterator I,
BlockT *Node) {
template <class BlockT, bool IsPostDom>
void DominanceFrontierBase<BlockT, IsPostDom>::removeFromFrontier(
iterator I, BlockT *Node) {
assert(I != end() && "BB is not in DominanceFrontier!");
assert(I->second.count(Node) && "Node is not in DominanceFrontier of BB");
I->second.erase(Node);
}
template <class BlockT>
bool DominanceFrontierBase<BlockT>::compareDomSet(DomSetType &DS1,
const DomSetType &DS2) const {
template <class BlockT, bool IsPostDom>
bool DominanceFrontierBase<BlockT, IsPostDom>::compareDomSet(
DomSetType &DS1, const DomSetType &DS2) const {
std::set<BlockT *> tmpSet;
for (BlockT *BB : DS2)
tmpSet.insert(BB);
@ -88,9 +88,9 @@ bool DominanceFrontierBase<BlockT>::compareDomSet(DomSetType &DS1,
return false;
}
template <class BlockT>
bool DominanceFrontierBase<BlockT>::compare(
DominanceFrontierBase<BlockT> &Other) const {
template <class BlockT, bool IsPostDom>
bool DominanceFrontierBase<BlockT, IsPostDom>::compare(
DominanceFrontierBase<BlockT, IsPostDom> &Other) const {
DomSetMapType tmpFrontiers;
for (typename DomSetMapType::const_iterator I = Other.begin(),
E = Other.end();
@ -118,8 +118,8 @@ bool DominanceFrontierBase<BlockT>::compare(
return false;
}
template <class BlockT>
void DominanceFrontierBase<BlockT>::print(raw_ostream &OS) const {
template <class BlockT, bool IsPostDom>
void DominanceFrontierBase<BlockT, IsPostDom>::print(raw_ostream &OS) const {
for (const_iterator I = begin(), E = end(); I != E; ++I) {
OS << " DomFrontier for BB ";
if (I->first)
@ -142,8 +142,8 @@ void DominanceFrontierBase<BlockT>::print(raw_ostream &OS) const {
}
#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
template <class BlockT>
void DominanceFrontierBase<BlockT>::dump() const {
template <class BlockT, bool IsPostDom>
void DominanceFrontierBase<BlockT, IsPostDom>::dump() const {
print(dbgs());
}
#endif

View File

@ -42,11 +42,11 @@ namespace llvm {
/// By default, liveness is not used to prune the IDF computation.
/// The template parameters should be either BasicBlock* or Inverse<BasicBlock
/// *>, depending on if you want the forward or reverse IDF.
template <class NodeTy>
template <class NodeTy, bool IsPostDom>
class IDFCalculator {
public:
IDFCalculator(DominatorTreeBase<BasicBlock> &DT) : DT(DT), useLiveIn(false) {}
public:
IDFCalculator(DominatorTreeBase<BasicBlock, IsPostDom> &DT)
: DT(DT), useLiveIn(false) {}
/// \brief Give the IDF calculator the set of blocks in which the value is
/// defined. This is equivalent to the set of starting blocks it should be
@ -84,12 +84,12 @@ class IDFCalculator {
void calculate(SmallVectorImpl<BasicBlock *> &IDFBlocks);
private:
DominatorTreeBase<BasicBlock> &DT;
bool useLiveIn;
const SmallPtrSetImpl<BasicBlock *> *LiveInBlocks;
const SmallPtrSetImpl<BasicBlock *> *DefBlocks;
DominatorTreeBase<BasicBlock, IsPostDom> &DT;
bool useLiveIn;
const SmallPtrSetImpl<BasicBlock *> *LiveInBlocks;
const SmallPtrSetImpl<BasicBlock *> *DefBlocks;
};
typedef IDFCalculator<BasicBlock *> ForwardIDFCalculator;
typedef IDFCalculator<Inverse<BasicBlock *>> ReverseIDFCalculator;
typedef IDFCalculator<BasicBlock *, false> ForwardIDFCalculator;
typedef IDFCalculator<Inverse<BasicBlock *>, true> ReverseIDFCalculator;
}
#endif

View File

@ -43,6 +43,7 @@
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/iterator.h"
#include "llvm/ADT/iterator_range.h"
#include "llvm/Analysis/TargetLibraryInfo.h"
#include "llvm/IR/BasicBlock.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/Function.h"
@ -908,7 +909,7 @@ class LazyCallGraph {
/// This sets up the graph and computes all of the entry points of the graph.
/// No function definitions are scanned until their nodes in the graph are
/// requested during traversal.
LazyCallGraph(Module &M);
LazyCallGraph(Module &M, TargetLibraryInfo &TLI);
LazyCallGraph(LazyCallGraph &&G);
LazyCallGraph &operator=(LazyCallGraph &&RHS);
@ -966,6 +967,22 @@ class LazyCallGraph {
return insertInto(F, N);
}
/// Get the sequence of known and defined library functions.
///
/// These functions, because they are known to LLVM, can have calls
/// introduced out of thin air from arbitrary IR.
ArrayRef<Function *> getLibFunctions() const {
return LibFunctions.getArrayRef();
}
/// Test whether a function is a known and defined library function tracked by
/// the call graph.
///
/// Because these functions are known to LLVM they are specially modeled in
/// the call graph and even when all IR-level references have been removed
/// remain active and reachable.
bool isLibFunction(Function &F) const { return LibFunctions.count(&F); }
///@{
/// \name Pre-SCC Mutation API
///
@ -1100,6 +1117,11 @@ class LazyCallGraph {
/// These are all of the RefSCCs which have no children.
SmallVector<RefSCC *, 4> LeafRefSCCs;
/// Defined functions that are also known library functions which the
/// optimizer can reason about and therefore might introduce calls to out of
/// thin air.
SmallSetVector<Function *, 4> LibFunctions;
/// Helper to insert a new function, with an already looked-up entry in
/// the NodeMap.
Node &insertInto(Function &F, Node *&MappedN);
@ -1216,8 +1238,8 @@ class LazyCallGraphAnalysis : public AnalysisInfoMixin<LazyCallGraphAnalysis> {
///
/// This just builds the set of entry points to the call graph. The rest is
/// built lazily as it is walked.
LazyCallGraph run(Module &M, ModuleAnalysisManager &) {
return LazyCallGraph(M);
LazyCallGraph run(Module &M, ModuleAnalysisManager &AM) {
return LazyCallGraph(M, AM.getResult<TargetLibraryAnalysis>(M));
}
};

View File

@ -56,7 +56,8 @@ class Loop;
class MDNode;
class PHINode;
class raw_ostream;
template<class N> class DominatorTreeBase;
template <class N, bool IsPostDom>
class DominatorTreeBase;
template<class N, class M> class LoopInfoBase;
template<class N, class M> class LoopBase;
@ -663,12 +664,12 @@ class LoopInfoBase {
}
/// Create the loop forest using a stable algorithm.
void analyze(const DominatorTreeBase<BlockT> &DomTree);
void analyze(const DominatorTreeBase<BlockT, false> &DomTree);
// Debugging
void print(raw_ostream &OS) const;
void verify(const DominatorTreeBase<BlockT> &DomTree) const;
void verify(const DominatorTreeBase<BlockT, false> &DomTree) const;
};
// Implementation in LoopInfoImpl.h
@ -683,7 +684,7 @@ class LoopInfo : public LoopInfoBase<BasicBlock, Loop> {
LoopInfo(const LoopInfo &) = delete;
public:
LoopInfo() {}
explicit LoopInfo(const DominatorTreeBase<BasicBlock> &DomTree);
explicit LoopInfo(const DominatorTreeBase<BasicBlock, false> &DomTree);
LoopInfo(LoopInfo &&Arg) : BaseT(std::move(static_cast<BaseT &>(Arg))) {}
LoopInfo &operator=(LoopInfo &&RHS) {

View File

@ -340,10 +340,10 @@ void LoopBase<BlockT, LoopT>::print(raw_ostream &OS, unsigned Depth,
/// Discover a subloop with the specified backedges such that: All blocks within
/// this loop are mapped to this loop or a subloop. And all subloops within this
/// loop have their parent loop set to this loop or a subloop.
template<class BlockT, class LoopT>
static void discoverAndMapSubloop(LoopT *L, ArrayRef<BlockT*> Backedges,
LoopInfoBase<BlockT, LoopT> *LI,
const DominatorTreeBase<BlockT> &DomTree) {
template <class BlockT, class LoopT>
static void discoverAndMapSubloop(
LoopT *L, ArrayRef<BlockT *> Backedges, LoopInfoBase<BlockT, LoopT> *LI,
const DomTreeBase<BlockT> &DomTree) {
typedef GraphTraits<Inverse<BlockT*> > InvBlockTraits;
unsigned NumBlocks = 0;
@ -462,10 +462,9 @@ void PopulateLoopsDFS<BlockT, LoopT>::insertIntoLoop(BlockT *Block) {
///
/// The Block vectors are inclusive, so step 3 requires loop-depth number of
/// insertions per block.
template<class BlockT, class LoopT>
void LoopInfoBase<BlockT, LoopT>::
analyze(const DominatorTreeBase<BlockT> &DomTree) {
template <class BlockT, class LoopT>
void LoopInfoBase<BlockT, LoopT>::analyze(
const DomTreeBase<BlockT> &DomTree) {
// Postorder traversal of the dominator tree.
const DomTreeNodeBase<BlockT> *DomRoot = DomTree.getRootNode();
for (auto DomNode : post_order(DomRoot)) {
@ -607,7 +606,7 @@ static void compareLoops(const LoopT *L, const LoopT *OtherL,
template <class BlockT, class LoopT>
void LoopInfoBase<BlockT, LoopT>::verify(
const DominatorTreeBase<BlockT> &DomTree) const {
const DomTreeBase<BlockT> &DomTree) const {
DenseSet<const LoopT*> Loops;
for (iterator I = begin(), E = end(); I != E; ++I) {
assert(!(*I)->getParentLoop() && "Top-level loop has a parent!");

View File

@ -22,10 +22,8 @@ namespace llvm {
/// PostDominatorTree Class - Concrete subclass of DominatorTree that is used to
/// compute the post-dominator tree.
///
struct PostDominatorTree : public DominatorTreeBase<BasicBlock> {
typedef DominatorTreeBase<BasicBlock> Base;
PostDominatorTree() : DominatorTreeBase<BasicBlock>(true) {}
struct PostDominatorTree : public PostDomTreeBase<BasicBlock> {
typedef PostDomTreeBase<BasicBlock> Base;
/// Handle invalidation explicitly.
bool invalidate(Function &F, const PreservedAnalyses &PA,

View File

@ -237,17 +237,15 @@ struct FoldingSetTrait<SCEVPredicate> : DefaultFoldingSetTrait<SCEVPredicate> {
};
/// This class represents an assumption that two SCEV expressions are equal,
/// and this can be checked at run-time. We assume that the left hand side is
/// a SCEVUnknown and the right hand side a constant.
/// and this can be checked at run-time.
class SCEVEqualPredicate final : public SCEVPredicate {
/// We assume that LHS == RHS, where LHS is a SCEVUnknown and RHS a
/// constant.
const SCEVUnknown *LHS;
const SCEVConstant *RHS;
/// We assume that LHS == RHS.
const SCEV *LHS;
const SCEV *RHS;
public:
SCEVEqualPredicate(const FoldingSetNodeIDRef ID, const SCEVUnknown *LHS,
const SCEVConstant *RHS);
SCEVEqualPredicate(const FoldingSetNodeIDRef ID, const SCEV *LHS,
const SCEV *RHS);
/// Implementation of the SCEVPredicate interface
bool implies(const SCEVPredicate *N) const override;
@ -256,10 +254,10 @@ class SCEVEqualPredicate final : public SCEVPredicate {
const SCEV *getExpr() const override;
/// Returns the left hand side of the equality.
const SCEVUnknown *getLHS() const { return LHS; }
const SCEV *getLHS() const { return LHS; }
/// Returns the right hand side of the equality.
const SCEVConstant *getRHS() const { return RHS; }
const SCEV *getRHS() const { return RHS; }
/// Methods for support type inquiry through isa, cast, and dyn_cast:
static bool classof(const SCEVPredicate *P) {
@ -1241,6 +1239,14 @@ class ScalarEvolution {
SmallVector<const SCEV *, 4> NewOp(Operands.begin(), Operands.end());
return getAddRecExpr(NewOp, L, Flags);
}
/// Checks if \p SymbolicPHI can be rewritten as an AddRecExpr under some
/// Predicates. If successful return these <AddRecExpr, Predicates>;
/// The function is intended to be called from PSCEV (the caller will decide
/// whether to actually add the predicates and carry out the rewrites).
Optional<std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>>>
createAddRecFromPHIWithCasts(const SCEVUnknown *SymbolicPHI);
/// Returns an expression for a GEP
///
/// \p GEP The GEP. The indices contained in the GEP itself are ignored,
@ -1675,8 +1681,7 @@ class ScalarEvolution {
return F.getParent()->getDataLayout();
}
const SCEVPredicate *getEqualPredicate(const SCEVUnknown *LHS,
const SCEVConstant *RHS);
const SCEVPredicate *getEqualPredicate(const SCEV *LHS, const SCEV *RHS);
const SCEVPredicate *
getWrapPredicate(const SCEVAddRecExpr *AR,
@ -1692,6 +1697,19 @@ class ScalarEvolution {
SmallPtrSetImpl<const SCEVPredicate *> &Preds);
private:
/// Similar to createAddRecFromPHI, but with the additional flexibility of
/// suggesting runtime overflow checks in case casts are encountered.
/// If successful, the analysis records that for this loop, \p SymbolicPHI,
/// which is the UnknownSCEV currently representing the PHI, can be rewritten
/// into an AddRec, assuming some predicates; The function then returns the
/// AddRec and the predicates as a pair, and caches this pair in
/// PredicatedSCEVRewrites.
/// If the analysis is not successful, a mapping from the \p SymbolicPHI to
/// itself (with no predicates) is recorded, and a nullptr with an empty
/// predicates vector is returned as a pair.
Optional<std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>>>
createAddRecFromPHIWithCastsImpl(const SCEVUnknown *SymbolicPHI);
/// Compute the backedge taken count knowing the interval difference, the
/// stride and presence of the equality in the comparison.
const SCEV *computeBECount(const SCEV *Delta, const SCEV *Stride,
@ -1722,6 +1740,12 @@ class ScalarEvolution {
FoldingSet<SCEVPredicate> UniquePreds;
BumpPtrAllocator SCEVAllocator;
/// Cache tentative mappings from UnknownSCEVs in a Loop, to a SCEV expression
/// they can be rewritten into under certain predicates.
DenseMap<std::pair<const SCEVUnknown *, const Loop *>,
std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>>>
PredicatedSCEVRewrites;
/// The head of a linked list of all SCEVUnknown values that have been
/// allocated. This is used by releaseMemory to locate them all and call
/// their destructors.

View File

@ -155,6 +155,13 @@ class TargetTransformInfo {
int getGEPCost(Type *PointeeType, const Value *Ptr,
ArrayRef<const Value *> Operands) const;
/// \brief Estimate the cost of a EXT operation when lowered.
///
/// The contract for this function is the same as \c getOperationCost except
/// that it supports an interface that provides extra information specific to
/// the EXT operation.
int getExtCost(const Instruction *I, const Value *Src) const;
/// \brief Estimate the cost of a function call when lowered.
///
/// The contract for this is the same as \c getOperationCost except that it
@ -849,6 +856,7 @@ class TargetTransformInfo::Concept {
virtual int getOperationCost(unsigned Opcode, Type *Ty, Type *OpTy) = 0;
virtual int getGEPCost(Type *PointeeType, const Value *Ptr,
ArrayRef<const Value *> Operands) = 0;
virtual int getExtCost(const Instruction *I, const Value *Src) = 0;
virtual int getCallCost(FunctionType *FTy, int NumArgs) = 0;
virtual int getCallCost(const Function *F, int NumArgs) = 0;
virtual int getCallCost(const Function *F,
@ -1022,6 +1030,9 @@ class TargetTransformInfo::Model final : public TargetTransformInfo::Concept {
ArrayRef<const Value *> Operands) override {
return Impl.getGEPCost(PointeeType, Ptr, Operands);
}
int getExtCost(const Instruction *I, const Value *Src) override {
return Impl.getExtCost(I, Src);
}
int getCallCost(FunctionType *FTy, int NumArgs) override {
return Impl.getCallCost(FTy, NumArgs);
}

View File

@ -120,6 +120,10 @@ class TargetTransformInfoImplBase {
return SI.getNumCases();
}
int getExtCost(const Instruction *I, const Value *Src) {
return TTI::TCC_Basic;
}
unsigned getCallCost(FunctionType *FTy, int NumArgs) {
assert(FTy && "FunctionType must be provided to this routine.");
@ -728,6 +732,8 @@ class TargetTransformInfoImplCRTPBase : public TargetTransformInfoImplBase {
// nop on most sane targets.
if (isa<CmpInst>(CI->getOperand(0)))
return TTI::TCC_Free;
if (isa<SExtInst>(CI) || isa<ZExtInst>(CI) || isa<FPExtInst>(CI))
return static_cast<T *>(this)->getExtCost(CI, Operands.back());
}
return static_cast<T *>(this)->getOperationCost(

View File

@ -155,6 +155,18 @@ class BasicTTIImplBase : public TargetTransformInfoImplCRTPBase<T> {
return BaseT::getGEPCost(PointeeType, Ptr, Operands);
}
int getExtCost(const Instruction *I, const Value *Src) {
if (getTLI()->isExtFree(I))
return TargetTransformInfo::TCC_Free;
if (isa<ZExtInst>(I) || isa<SExtInst>(I))
if (const LoadInst *LI = dyn_cast<LoadInst>(Src))
if (getTLI()->isExtLoad(LI, I, DL))
return TargetTransformInfo::TCC_Free;
return TargetTransformInfo::TCC_Basic;
}
unsigned getIntrinsicCost(Intrinsic::ID IID, Type *RetTy,
ArrayRef<const Value *> Arguments) {
return BaseT::getIntrinsicCost(IID, RetTy, Arguments);

View File

@ -23,27 +23,24 @@ class MachineDominanceFrontier : public MachineFunctionPass {
ForwardDominanceFrontierBase<MachineBasicBlock> Base;
public:
using DomTreeT = DominatorTreeBase<MachineBasicBlock>;
using DomTreeNodeT = DomTreeNodeBase<MachineBasicBlock>;
using DomSetType = DominanceFrontierBase<MachineBasicBlock>::DomSetType;
using iterator = DominanceFrontierBase<MachineBasicBlock>::iterator;
using const_iterator =
DominanceFrontierBase<MachineBasicBlock>::const_iterator;
using DomTreeT = DomTreeBase<MachineBasicBlock>;
using DomTreeNodeT = DomTreeNodeBase<MachineBasicBlock>;
using DomSetType = DominanceFrontierBase<MachineBasicBlock, false>::DomSetType;
using iterator = DominanceFrontierBase<MachineBasicBlock, false>::iterator;
using const_iterator =
DominanceFrontierBase<MachineBasicBlock, false>::const_iterator;
MachineDominanceFrontier(const MachineDominanceFrontier &) = delete;
MachineDominanceFrontier &
operator=(const MachineDominanceFrontier &) = delete;
MachineDominanceFrontier(const MachineDominanceFrontier &) = delete;
MachineDominanceFrontier &operator=(const MachineDominanceFrontier &) = delete;
static char ID;
static char ID;
MachineDominanceFrontier();
MachineDominanceFrontier();
DominanceFrontierBase<MachineBasicBlock> &getBase() {
return Base;
}
DominanceFrontierBase<MachineBasicBlock, false> &getBase() { return Base; }
inline const std::vector<MachineBasicBlock*> &getRoots() const {
return Base.getRoots();
inline const std::vector<MachineBasicBlock *> &getRoots() const {
return Base.getRoots();
}
MachineBasicBlock *getRoot() const {
@ -98,7 +95,7 @@ class MachineDominanceFrontier : public MachineFunctionPass {
return Base.compareDomSet(DS1, DS2);
}
bool compare(DominanceFrontierBase<MachineBasicBlock> &Other) const {
bool compare(DominanceFrontierBase<MachineBasicBlock, false> &Other) const {
return Base.compare(Other);
}

View File

@ -28,13 +28,15 @@
namespace llvm {
template<>
inline void DominatorTreeBase<MachineBasicBlock>::addRoot(MachineBasicBlock* MBB) {
template <>
inline void DominatorTreeBase<MachineBasicBlock, false>::addRoot(
MachineBasicBlock *MBB) {
this->Roots.push_back(MBB);
}
extern template class DomTreeNodeBase<MachineBasicBlock>;
extern template class DominatorTreeBase<MachineBasicBlock>;
extern template class DominatorTreeBase<MachineBasicBlock, false>; // DomTree
extern template class DominatorTreeBase<MachineBasicBlock, true>; // PostDomTree
using MachineDomTreeNode = DomTreeNodeBase<MachineBasicBlock>;
@ -65,7 +67,7 @@ class MachineDominatorTree : public MachineFunctionPass {
mutable SmallSet<MachineBasicBlock *, 32> NewBBs;
/// The DominatorTreeBase that is used to compute a normal dominator tree
std::unique_ptr<DominatorTreeBase<MachineBasicBlock>> DT;
std::unique_ptr<DomTreeBase<MachineBasicBlock>> DT;
/// \brief Apply all the recorded critical edges to the DT.
/// This updates the underlying DT information in a way that uses
@ -79,9 +81,8 @@ class MachineDominatorTree : public MachineFunctionPass {
MachineDominatorTree();
DominatorTreeBase<MachineBasicBlock> &getBase() {
if (!DT)
DT.reset(new DominatorTreeBase<MachineBasicBlock>(false));
DomTreeBase<MachineBasicBlock> &getBase() {
if (!DT) DT.reset(new DomTreeBase<MachineBasicBlock>());
applySplitCriticalEdges();
return *DT;
}

View File

@ -26,7 +26,7 @@ namespace llvm {
///
struct MachinePostDominatorTree : public MachineFunctionPass {
private:
DominatorTreeBase<MachineBasicBlock> *DT;
PostDomTreeBase<MachineBasicBlock> *DT;
public:
static char ID;

View File

@ -17,7 +17,6 @@
namespace llvm {
namespace codeview {
class TypeCollection;
class TypeServerHandler;
class TypeVisitorCallbacks;
enum VisitorDataSource {
@ -31,11 +30,9 @@ enum VisitorDataSource {
Error visitTypeRecord(CVType &Record, TypeIndex Index,
TypeVisitorCallbacks &Callbacks,
VisitorDataSource Source = VDS_BytesPresent,
TypeServerHandler *TS = nullptr);
VisitorDataSource Source = VDS_BytesPresent);
Error visitTypeRecord(CVType &Record, TypeVisitorCallbacks &Callbacks,
VisitorDataSource Source = VDS_BytesPresent,
TypeServerHandler *TS = nullptr);
VisitorDataSource Source = VDS_BytesPresent);
Error visitMemberRecord(CVMemberRecord Record, TypeVisitorCallbacks &Callbacks,
VisitorDataSource Source = VDS_BytesPresent);
@ -46,12 +43,9 @@ Error visitMemberRecordStream(ArrayRef<uint8_t> FieldList,
TypeVisitorCallbacks &Callbacks);
Error visitTypeStream(const CVTypeArray &Types, TypeVisitorCallbacks &Callbacks,
VisitorDataSource Source = VDS_BytesPresent,
TypeServerHandler *TS = nullptr);
Error visitTypeStream(CVTypeRange Types, TypeVisitorCallbacks &Callbacks,
TypeServerHandler *TS = nullptr);
Error visitTypeStream(TypeCollection &Types, TypeVisitorCallbacks &Callbacks,
TypeServerHandler *TS = nullptr);
VisitorDataSource Source = VDS_BytesPresent);
Error visitTypeStream(CVTypeRange Types, TypeVisitorCallbacks &Callbacks);
Error visitTypeStream(TypeCollection &Types, TypeVisitorCallbacks &Callbacks);
} // end namespace codeview
} // end namespace llvm

View File

@ -84,7 +84,7 @@ class CodeViewRecordIO {
Error mapEncodedInteger(uint64_t &Value);
Error mapEncodedInteger(APSInt &Value);
Error mapStringZ(StringRef &Value);
Error mapGuid(StringRef &Guid);
Error mapGuid(GUID &Guid);
Error mapStringZVectorZ(std::vector<StringRef> &Value);

View File

@ -12,6 +12,7 @@
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/DebugInfo/CodeView/GUID.h"
#include "llvm/DebugInfo/CodeView/TypeIndex.h"
#include "llvm/Support/FormatAdapters.h"
#include "llvm/Support/FormatVariadic.h"
@ -31,7 +32,7 @@ class GuidAdapter final : public FormatAdapter<ArrayRef<uint8_t>> {
explicit GuidAdapter(ArrayRef<uint8_t> Guid);
explicit GuidAdapter(StringRef Guid);
void format(raw_ostream &Stream, StringRef Style) override ;
void format(raw_ostream &Stream, StringRef Style) override;
};
} // end namespace detail
@ -60,6 +61,13 @@ template <> struct format_provider<codeview::TypeIndex> {
}
};
template <> struct format_provider<codeview::GUID> {
static void format(const codeview::GUID &V, llvm::raw_ostream &Stream,
StringRef Style) {
Stream << V;
}
};
} // end namespace llvm
#endif // LLVM_DEBUGINFO_CODEVIEW_FORMATTERS_H

View File

@ -0,0 +1,55 @@
//===- GUID.h ---------------------------------------------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_DEBUGINFO_CODEVIEW_GUID_H
#define LLVM_DEBUGINFO_CODEVIEW_GUID_H
#include <cstdint>
#include <cstring>
namespace llvm {
class raw_ostream;
namespace codeview {
/// This represents the 'GUID' type from windows.h.
struct GUID {
uint8_t Guid[16];
};
inline bool operator==(const GUID &LHS, const GUID &RHS) {
return 0 == ::memcmp(LHS.Guid, RHS.Guid, sizeof(LHS.Guid));
}
inline bool operator<(const GUID &LHS, const GUID &RHS) {
return ::memcmp(LHS.Guid, RHS.Guid, sizeof(LHS.Guid)) < 0;
}
inline bool operator<=(const GUID &LHS, const GUID &RHS) {
return ::memcmp(LHS.Guid, RHS.Guid, sizeof(LHS.Guid)) <= 0;
}
inline bool operator>(const GUID &LHS, const GUID &RHS) {
return !(LHS <= RHS);
}
inline bool operator>=(const GUID &LHS, const GUID &RHS) {
return !(LHS < RHS);
}
inline bool operator!=(const GUID &LHS, const GUID &RHS) {
return !(LHS == RHS);
}
raw_ostream &operator<<(raw_ostream &OS, const GUID &Guid);
} // namespace codeview
} // namespace llvm
#endif

View File

@ -848,7 +848,7 @@ class BuildInfoSym : public SymbolRecord {
: SymbolRecord(SymbolRecordKind::BuildInfoSym),
RecordOffset(RecordOffset) {}
uint32_t BuildId;
TypeIndex BuildId;
uint32_t RecordOffset;
};

View File

@ -18,6 +18,7 @@
#include "llvm/ADT/iterator_range.h"
#include "llvm/DebugInfo/CodeView/CVRecord.h"
#include "llvm/DebugInfo/CodeView/CodeView.h"
#include "llvm/DebugInfo/CodeView/GUID.h"
#include "llvm/DebugInfo/CodeView/TypeIndex.h"
#include "llvm/Support/BinaryStreamArray.h"
#include "llvm/Support/Endian.h"
@ -539,15 +540,17 @@ class TypeServer2Record : public TypeRecord {
public:
TypeServer2Record() = default;
explicit TypeServer2Record(TypeRecordKind Kind) : TypeRecord(Kind) {}
TypeServer2Record(StringRef Guid, uint32_t Age, StringRef Name)
: TypeRecord(TypeRecordKind::TypeServer2), Guid(Guid), Age(Age),
Name(Name) {}
TypeServer2Record(StringRef GuidStr, uint32_t Age, StringRef Name)
: TypeRecord(TypeRecordKind::TypeServer2), Age(Age), Name(Name) {
assert(GuidStr.size() == 16 && "guid isn't 16 bytes");
::memcpy(Guid.Guid, GuidStr.data(), 16);
}
StringRef getGuid() const { return Guid; }
const GUID &getGuid() const { return Guid; }
uint32_t getAge() const { return Age; }
StringRef getName() const { return Name; }
StringRef Guid;
GUID Guid;
uint32_t Age;
StringRef Name;
};

View File

@ -1,38 +0,0 @@
//===- TypeServerHandler.h --------------------------------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_DEBUGINFO_CODEVIEW_TYPESERVERHANDLER_H
#define LLVM_DEBUGINFO_CODEVIEW_TYPESERVERHANDLER_H
#include "llvm/Support/Error.h"
namespace llvm {
namespace codeview {
class TypeServer2Record;
class TypeVisitorCallbacks;
class TypeServerHandler {
public:
virtual ~TypeServerHandler() = default;
/// Handle a TypeServer record. If the implementation returns true
/// the record will not be processed by the top-level visitor. If
/// it returns false, it will be processed. If it returns an Error,
/// then the top-level visitor will fail.
virtual Expected<bool> handle(TypeServer2Record &TS,
TypeVisitorCallbacks &Callbacks) {
return false;
}
};
} // end namespace codeview
} // end namespace llvm
#endif // LLVM_DEBUGINFO_CODEVIEW_TYPESERVERHANDLER_H

View File

@ -19,7 +19,6 @@ namespace llvm {
namespace codeview {
class TypeIndex;
class TypeServerHandler;
class TypeTableBuilder;
/// \brief Merge one set of type records into another. This method assumes
@ -31,16 +30,13 @@ class TypeTableBuilder;
/// type stream, that contains the index of the corresponding type record
/// in the destination stream.
///
/// \param Handler (optional) If non-null, an interface that gets invoked
/// to handle type server records.
///
/// \param Types The collection of types to merge in.
///
/// \returns Error::success() if the operation succeeded, otherwise an
/// appropriate error code.
Error mergeTypeRecords(TypeTableBuilder &Dest,
SmallVectorImpl<TypeIndex> &SourceToDest,
TypeServerHandler *Handler, const CVTypeArray &Types);
const CVTypeArray &Types);
/// \brief Merge one set of id records into another. This method assumes
/// that all records are id records, and there are no Type records present.
@ -65,7 +61,7 @@ Error mergeTypeRecords(TypeTableBuilder &Dest,
/// appropriate error code.
Error mergeIdRecords(TypeTableBuilder &Dest, ArrayRef<TypeIndex> Types,
SmallVectorImpl<TypeIndex> &SourceToDest,
const CVTypeArray &Ids);
const CVTypeArray &Ids);
/// \brief Merge a unified set of type and id records, splitting them into
/// separate output streams.
@ -78,9 +74,6 @@ Error mergeIdRecords(TypeTableBuilder &Dest, ArrayRef<TypeIndex> Types,
/// id stream, that contains the index of the corresponding id record
/// in the destination stream.
///
/// \param Handler (optional) If non-null, an interface that gets invoked
/// to handle type server records.
///
/// \param IdsAndTypes The collection of id records to merge in.
///
/// \returns Error::success() if the operation succeeded, otherwise an
@ -88,8 +81,7 @@ Error mergeIdRecords(TypeTableBuilder &Dest, ArrayRef<TypeIndex> Types,
Error mergeTypeAndIdRecords(TypeTableBuilder &DestIds,
TypeTableBuilder &DestTypes,
SmallVectorImpl<TypeIndex> &SourceToDest,
TypeServerHandler *Handler,
const CVTypeArray &IdsAndTypes);
const CVTypeArray &IdsAndTypes);
} // end namespace codeview
} // end namespace llvm

View File

@ -238,6 +238,34 @@ class DWARFUnit {
uint8_t getUnitType() const { return UnitType; }
static bool isValidUnitType(uint8_t UnitType) {
return UnitType == dwarf::DW_UT_compile || UnitType == dwarf::DW_UT_type ||
UnitType == dwarf::DW_UT_partial ||
UnitType == dwarf::DW_UT_skeleton ||
UnitType == dwarf::DW_UT_split_compile ||
UnitType == dwarf::DW_UT_split_type;
}
/// \brief Return the number of bytes for the header of a unit of
/// UnitType type.
///
/// This function must be called with a valid unit type which in
/// DWARF5 is defined as one of the following six types.
static uint32_t getDWARF5HeaderSize(uint8_t UnitType) {
switch (UnitType) {
case dwarf::DW_UT_compile:
case dwarf::DW_UT_partial:
return 12;
case dwarf::DW_UT_skeleton:
case dwarf::DW_UT_split_compile:
return 20;
case dwarf::DW_UT_type:
case dwarf::DW_UT_split_type:
return 24;
}
llvm_unreachable("Invalid UnitType.");
}
uint64_t getBaseAddress() const { return BaseAddr; }
void setBaseAddress(uint64_t base_addr) {

View File

@ -21,6 +21,7 @@ class DWARFContext;
class DWARFDie;
class DWARFUnit;
class DWARFAcceleratorTable;
class DWARFDataExtractor;
/// A class that verifies DWARF debug information given a DWARF Context.
class DWARFVerifier {
@ -30,10 +31,35 @@ class DWARFVerifier {
/// can verify each reference points to a valid DIE and not an offset that
/// lies between to valid DIEs.
std::map<uint64_t, std::set<uint32_t>> ReferenceToDIEOffsets;
uint32_t NumDebugInfoErrors = 0;
uint32_t NumDebugLineErrors = 0;
uint32_t NumAppleNamesErrors = 0;
/// Verifies the header of a unit in the .debug_info section.
///
/// This function currently checks for:
/// - Unit is in 32-bit DWARF format. The function can be modified to
/// support 64-bit format.
/// - The DWARF version is valid
/// - The unit type is valid (if unit is in version >=5)
/// - The unit doesn't extend beyond .debug_info section
/// - The address size is valid
/// - The offset in the .debug_abbrev section is valid
///
/// \param DebugInfoData The .debug_info section data
/// \param Offset A reference to the offset start of the unit. The offset will
/// be updated to point to the next unit in .debug_info
/// \param UnitIndex The index of the unit to be verified
/// \param UnitType A reference to the type of the unit
/// \param isUnitDWARF64 A reference to a flag that shows whether the unit is
/// in 64-bit format.
///
/// \returns true if the header is verified successfully, false otherwise.
bool verifyUnitHeader(const DWARFDataExtractor DebugInfoData,
uint32_t *Offset, unsigned UnitIndex, uint8_t &UnitType,
bool &isUnitDWARF64);
bool verifyUnitContents(DWARFUnit Unit);
/// Verifies the attribute's DWARF attribute and its value.
///
/// This function currently checks for:
@ -42,7 +68,11 @@ class DWARFVerifier {
///
/// \param Die The DWARF DIE that owns the attribute value
/// \param AttrValue The DWARF attribute value to check
void verifyDebugInfoAttribute(const DWARFDie &Die, DWARFAttribute &AttrValue);
///
/// \returns NumErrors The number of errors occured during verification of
/// attributes' values in a .debug_info section unit
unsigned verifyDebugInfoAttribute(const DWARFDie &Die,
DWARFAttribute &AttrValue);
/// Verifies the attribute's DWARF form.
///
@ -53,7 +83,10 @@ class DWARFVerifier {
///
/// \param Die The DWARF DIE that owns the attribute value
/// \param AttrValue The DWARF attribute value to check
void verifyDebugInfoForm(const DWARFDie &Die, DWARFAttribute &AttrValue);
///
/// \returns NumErrors The number of errors occured during verification of
/// attributes' forms in a .debug_info section unit
unsigned verifyDebugInfoForm(const DWARFDie &Die, DWARFAttribute &AttrValue);
/// Verifies the all valid references that were found when iterating through
/// all of the DIE attributes.
@ -62,7 +95,10 @@ class DWARFVerifier {
/// offset matches. This helps to ensure if a DWARF link phase moved things
/// around, that it doesn't create invalid references by failing to relocate
/// CU relative and absolute references.
void verifyDebugInfoReferences();
///
/// \returns NumErrors The number of errors occured during verification of
/// references for the .debug_info section
unsigned verifyDebugInfoReferences();
/// Verify the the DW_AT_stmt_list encoding and value and ensure that no
/// compile units that have the same DW_AT_stmt_list value.

View File

@ -106,7 +106,7 @@ class DIARawSymbol : public IPDBRawSymbol {
getVirtualBaseTableType() const override;
PDB_DataKind getDataKind() const override;
PDB_SymType getSymTag() const override;
PDB_UniqueId getGuid() const override;
codeview::GUID getGuid() const override;
int32_t getOffset() const override;
int32_t getThisAdjust() const override;
int32_t getVirtualBasePointerOffset() const override;

View File

@ -19,6 +19,7 @@ namespace pdb {
enum class generic_error_code {
invalid_path = 1,
dia_sdk_not_present,
type_server_not_found,
unspecified,
};

View File

@ -118,7 +118,7 @@ class IPDBRawSymbol {
virtual uint32_t getVirtualTableShapeId() const = 0;
virtual PDB_DataKind getDataKind() const = 0;
virtual PDB_SymType getSymTag() const = 0;
virtual PDB_UniqueId getGuid() const = 0;
virtual codeview::GUID getGuid() const = 0;
virtual int32_t getOffset() const = 0;
virtual int32_t getThisAdjust() const = 0;
virtual int32_t getVirtualBasePointerOffset() const = 0;

View File

@ -23,13 +23,6 @@
break;
namespace llvm {
template <> struct format_provider<pdb::PDB_UniqueId> {
static void format(const pdb::PDB_UniqueId &V, llvm::raw_ostream &Stream,
StringRef Style) {
codeview::fmt_guid(V.Guid).format(Stream, Style);
}
};
template <> struct format_provider<pdb::PdbRaw_ImplVer> {
static void format(const pdb::PdbRaw_ImplVer &V, llvm::raw_ostream &Stream,
StringRef Style) {

View File

@ -12,6 +12,7 @@
#include "llvm/ADT/BitmaskEnum.h"
#include "llvm/ADT/StringMap.h"
#include "llvm/DebugInfo/CodeView/GUID.h"
#include "llvm/DebugInfo/MSF/MappedBlockStream.h"
#include "llvm/DebugInfo/PDB/Native/NamedStreamMap.h"
#include "llvm/DebugInfo/PDB/Native/RawConstants.h"
@ -39,7 +40,7 @@ class InfoStream {
PdbRaw_ImplVer getVersion() const;
uint32_t getSignature() const;
uint32_t getAge() const;
PDB_UniqueId getGuid() const;
codeview::GUID getGuid() const;
uint32_t getNamedStreamMapByteSize() const;
PdbRaw_Features getFeatures() const;
@ -71,7 +72,7 @@ class InfoStream {
// Due to the aforementioned limitations with `Signature`, this is a new
// signature present on VC70 and higher PDBs which is guaranteed to be
// universally unique.
PDB_UniqueId Guid;
codeview::GUID Guid;
BinarySubstreamRef SubNamedStreams;

View File

@ -37,7 +37,7 @@ class InfoStreamBuilder {
void setVersion(PdbRaw_ImplVer V);
void setSignature(uint32_t S);
void setAge(uint32_t A);
void setGuid(PDB_UniqueId G);
void setGuid(codeview::GUID G);
void addFeature(PdbRaw_FeatureSig Sig);
uint32_t finalize();
@ -54,7 +54,7 @@ class InfoStreamBuilder {
PdbRaw_ImplVer Ver;
uint32_t Sig;
uint32_t Age;
PDB_UniqueId Guid;
codeview::GUID Guid;
NamedStreamMap &NamedStreams;
};

View File

@ -27,7 +27,7 @@ class NativeExeSymbol : public NativeRawSymbol {
uint32_t getAge() const override;
std::string getSymbolsFileName() const override;
PDB_UniqueId getGuid() const override;
codeview::GUID getGuid() const override;
bool hasCTypes() const override;
bool hasPrivateSymbols() const override;

View File

@ -111,7 +111,7 @@ class NativeRawSymbol : public IPDBRawSymbol {
getVirtualBaseTableType() const override;
PDB_DataKind getDataKind() const override;
PDB_SymType getSymTag() const override;
PDB_UniqueId getGuid() const override;
codeview::GUID getGuid() const override;
int32_t getOffset() const override;
int32_t getThisAdjust() const override;
int32_t getVirtualBasePointerOffset() const override;

View File

@ -1,46 +0,0 @@
//===- PDBTypeServerHandler.h -----------------------------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_DEBUGINFO_PDB_PDBTYPESERVERHANDLER_H
#define LLVM_DEBUGINFO_PDB_PDBTYPESERVERHANDLER_H
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/StringSet.h"
#include "llvm/DebugInfo/CodeView/TypeRecord.h"
#include "llvm/DebugInfo/CodeView/TypeServerHandler.h"
#include "llvm/DebugInfo/PDB/Native/NativeSession.h"
#include "llvm/DebugInfo/PDB/PDBTypes.h"
#include <memory>
#include <string>
namespace llvm {
namespace pdb {
class NativeSession;
class PDBTypeServerHandler : public codeview::TypeServerHandler {
public:
PDBTypeServerHandler(bool RevisitAlways = false);
void addSearchPath(StringRef Path);
Expected<bool> handle(codeview::TypeServer2Record &TS,
codeview::TypeVisitorCallbacks &Callbacks) override;
private:
Expected<bool> handleInternal(PDBFile &File,
codeview::TypeVisitorCallbacks &Callbacks);
bool RevisitAlways;
std::unique_ptr<NativeSession> Session;
StringSet<> SearchPaths;
};
}
}
#endif

View File

@ -10,6 +10,7 @@
#ifndef LLVM_DEBUGINFO_PDB_RAW_RAWTYPES_H
#define LLVM_DEBUGINFO_PDB_RAW_RAWTYPES_H
#include "llvm/DebugInfo/CodeView/GUID.h"
#include "llvm/DebugInfo/CodeView/TypeRecord.h"
#include "llvm/Support/Endian.h"
@ -268,17 +269,6 @@ struct PublicsStreamHeader {
support::ulittle32_t NumSections;
};
/// Defines a 128-bit unique identifier. This maps to a GUID on Windows, but
/// is abstracted here for the purposes of non-Windows platforms that don't have
/// the GUID structure defined.
struct PDB_UniqueId {
uint8_t Guid[16];
};
inline bool operator==(const PDB_UniqueId &LHS, const PDB_UniqueId &RHS) {
return 0 == ::memcmp(LHS.Guid, RHS.Guid, sizeof(LHS.Guid));
}
// The header preceeding the global TPI stream.
// This corresponds to `HDR` in PDB/dbi/tpi.h.
struct TpiStreamHeader {
@ -312,7 +302,7 @@ struct InfoStreamHeader {
support::ulittle32_t Version;
support::ulittle32_t Signature;
support::ulittle32_t Age;
PDB_UniqueId Guid;
codeview::GUID Guid;
};
/// The header preceeding the /names stream.

View File

@ -10,84 +10,13 @@
#ifndef LLVM_DEBUGINFO_PDB_TPIHASHING_H
#define LLVM_DEBUGINFO_PDB_TPIHASHING_H
#include "llvm/ADT/Optional.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/DebugInfo/CodeView/TypeIndex.h"
#include "llvm/DebugInfo/CodeView/TypeRecord.h"
#include "llvm/DebugInfo/CodeView/TypeVisitorCallbacks.h"
#include "llvm/DebugInfo/PDB/Native/RawError.h"
#include "llvm/Support/BinaryStreamArray.h"
#include "llvm/Support/Endian.h"
#include "llvm/Support/Error.h"
#include <cstdint>
#include <string>
namespace llvm {
namespace pdb {
class TpiHashUpdater : public codeview::TypeVisitorCallbacks {
public:
TpiHashUpdater() = default;
#define TYPE_RECORD(EnumName, EnumVal, Name) \
virtual Error visitKnownRecord(codeview::CVType &CVR, \
codeview::Name##Record &Record) override { \
visitKnownRecordImpl(CVR, Record); \
return Error::success(); \
}
#define TYPE_RECORD_ALIAS(EnumName, EnumVal, Name, AliasName)
#define MEMBER_RECORD(EnumName, EnumVal, Name)
#define MEMBER_RECORD_ALIAS(EnumName, EnumVal, Name, AliasName)
#include "llvm/DebugInfo/CodeView/CodeViewTypes.def"
private:
template <typename RecordKind>
void visitKnownRecordImpl(codeview::CVType &CVR, RecordKind &Record) {
CVR.Hash = 0;
}
void visitKnownRecordImpl(codeview::CVType &CVR,
codeview::UdtSourceLineRecord &Rec);
void visitKnownRecordImpl(codeview::CVType &CVR,
codeview::UdtModSourceLineRecord &Rec);
void visitKnownRecordImpl(codeview::CVType &CVR, codeview::ClassRecord &Rec);
void visitKnownRecordImpl(codeview::CVType &CVR, codeview::EnumRecord &Rec);
void visitKnownRecordImpl(codeview::CVType &CVR, codeview::UnionRecord &Rec);
};
class TpiHashVerifier : public codeview::TypeVisitorCallbacks {
public:
TpiHashVerifier(FixedStreamArray<support::ulittle32_t> &HashValues,
uint32_t NumHashBuckets)
: HashValues(HashValues), NumHashBuckets(NumHashBuckets) {}
Error visitKnownRecord(codeview::CVType &CVR,
codeview::UdtSourceLineRecord &Rec) override;
Error visitKnownRecord(codeview::CVType &CVR,
codeview::UdtModSourceLineRecord &Rec) override;
Error visitKnownRecord(codeview::CVType &CVR,
codeview::ClassRecord &Rec) override;
Error visitKnownRecord(codeview::CVType &CVR,
codeview::EnumRecord &Rec) override;
Error visitKnownRecord(codeview::CVType &CVR,
codeview::UnionRecord &Rec) override;
Error visitTypeBegin(codeview::CVType &CVR) override;
private:
Error verifySourceLine(codeview::TypeIndex TI);
Error errorInvalidHash() {
return make_error<RawError>(
raw_error_code::invalid_tpi_hash,
"Type index is 0x" +
utohexstr(codeview::TypeIndex::FirstNonSimpleIndex + Index));
}
FixedStreamArray<support::ulittle32_t> HashValues;
codeview::CVType RawRecord;
uint32_t NumHashBuckets;
uint32_t Index = -1;
};
Expected<uint32_t> hashTypeRecord(const llvm::codeview::CVType &Type);
} // end namespace pdb
} // end namespace llvm

View File

@ -32,7 +32,6 @@ raw_ostream &operator<<(raw_ostream &OS, const PDB_Checksum &Checksum);
raw_ostream &operator<<(raw_ostream &OS, const PDB_Lang &Lang);
raw_ostream &operator<<(raw_ostream &OS, const PDB_SymType &Tag);
raw_ostream &operator<<(raw_ostream &OS, const PDB_MemberAccess &Access);
raw_ostream &operator<<(raw_ostream &OS, const PDB_UniqueId &Guid);
raw_ostream &operator<<(raw_ostream &OS, const PDB_UdtType &Type);
raw_ostream &operator<<(raw_ostream &OS, const PDB_Machine &Machine);

View File

@ -22,6 +22,7 @@
#include "llvm/ExecutionEngine/JITSymbol.h"
#include "llvm/ExecutionEngine/Orc/IndirectionUtils.h"
#include "llvm/ExecutionEngine/Orc/LambdaResolver.h"
#include "llvm/ExecutionEngine/Orc/OrcError.h"
#include "llvm/ExecutionEngine/RuntimeDyld.h"
#include "llvm/IR/Attributes.h"
#include "llvm/IR/Constant.h"
@ -289,21 +290,21 @@ class CompileOnDemandLayer {
// FIXME: We should track and free associated resources (unused compile
// callbacks, uncompiled IR, and no-longer-needed/reachable function
// implementations).
// FIXME: Return Error once the JIT APIs are Errorized.
bool updatePointer(std::string FuncName, JITTargetAddress FnBodyAddr) {
Error updatePointer(std::string FuncName, JITTargetAddress FnBodyAddr) {
//Find out which logical dylib contains our symbol
auto LDI = LogicalDylibs.begin();
for (auto LDE = LogicalDylibs.end(); LDI != LDE; ++LDI) {
if (auto LMResources = LDI->getLogicalModuleResourcesForSymbol(FuncName, false)) {
if (auto LMResources =
LDI->getLogicalModuleResourcesForSymbol(FuncName, false)) {
Module &SrcM = LMResources->SourceModule->getResource();
std::string CalledFnName = mangle(FuncName, SrcM.getDataLayout());
if (auto EC = LMResources->StubsMgr->updatePointer(CalledFnName, FnBodyAddr))
return false;
else
return true;
if (auto Err = LMResources->StubsMgr->updatePointer(CalledFnName,
FnBodyAddr))
return Err;
return Error::success();
}
}
return false;
return make_error<JITSymbolNotFound>(FuncName);
}
private:
@ -363,11 +364,8 @@ class CompileOnDemandLayer {
});
}
auto EC = LD.StubsMgr->createStubs(StubInits);
(void)EC;
// FIXME: This should be propagated back to the user. Stub creation may
// fail for remote JITs.
assert(!EC && "Error generating stubs");
if (auto Err = LD.StubsMgr->createStubs(StubInits))
return Err;
}
// If this module doesn't contain any globals, aliases, or module flags then

View File

@ -135,12 +135,13 @@ class RTDyldMemoryManager : public MCJITMemoryManager,
virtual void *getPointerToNamedFunction(const std::string &Name,
bool AbortOnFailure = true);
private:
protected:
struct EHFrame {
uint8_t *Addr;
size_t Size;
};
std::vector<EHFrame> EHFrames;
typedef std::vector<EHFrame> EHFrameInfos;
EHFrameInfos EHFrames;
};
// Create wrappers for C Binding types (see CBindingWrapping.h).

View File

@ -143,11 +143,15 @@ namespace CallingConv {
/// System V ABI, used on most non-Windows systems.
X86_64_SysV = 78,
/// \brief The C convention as implemented on Windows/x86-64. This
/// convention differs from the more common \c X86_64_SysV convention
/// in a number of ways, most notably in that XMM registers used to pass
/// arguments are shadowed by GPRs, and vice versa.
X86_64_Win64 = 79,
/// \brief The C convention as implemented on Windows/x86-64 and
/// AArch64. This convention differs from the more common
/// \c X86_64_SysV convention in a number of ways, most notably in
/// that XMM registers used to pass arguments are shadowed by GPRs,
/// and vice versa.
/// On AArch64, this is identical to the normal C (AAPCS) calling
/// convention for normal functions, but floats are passed in integer
/// registers to variadic functions.
Win64 = 79,
/// \brief MSVC calling convention that passes vectors and vector aggregates
/// in SSE registers.

View File

@ -598,6 +598,10 @@ class ConstantDataSequential : public ConstantData {
/// specified element in the low bits of a uint64_t.
uint64_t getElementAsInteger(unsigned i) const;
/// If this is a sequential container of integers (of any size), return the
/// specified element as an APInt.
APInt getElementAsAPInt(unsigned i) const;
/// If this is a sequential container of floating point type, return the
/// specified element as an APFloat.
APFloat getElementAsAPFloat(unsigned i) const;
@ -761,6 +765,10 @@ class ConstantDataVector final : public ConstantDataSequential {
/// i32/i64/float/double) and must be a ConstantFP or ConstantInt.
static Constant *getSplat(unsigned NumElts, Constant *Elt);
/// Returns true if this is a splat constant, meaning that all elements have
/// the same value.
bool isSplat() const;
/// If this is a splat constant, meaning that all of the elements have the
/// same value, return that value. Otherwise return NULL.
Constant *getSplatValue() const;

View File

@ -674,32 +674,37 @@ namespace llvm {
/// Create a descriptor for an imported module.
/// \param Context The scope this module is imported into
/// \param NS The namespace being imported here
/// \param Line Line number
/// \param NS The namespace being imported here.
/// \param File File where the declaration is located.
/// \param Line Line number of the declaration.
DIImportedEntity *createImportedModule(DIScope *Context, DINamespace *NS,
unsigned Line);
DIFile *File, unsigned Line);
/// Create a descriptor for an imported module.
/// \param Context The scope this module is imported into
/// \param NS An aliased namespace
/// \param Line Line number
/// \param Context The scope this module is imported into.
/// \param NS An aliased namespace.
/// \param File File where the declaration is located.
/// \param Line Line number of the declaration.
DIImportedEntity *createImportedModule(DIScope *Context,
DIImportedEntity *NS, unsigned Line);
DIImportedEntity *NS, DIFile *File,
unsigned Line);
/// Create a descriptor for an imported module.
/// \param Context The scope this module is imported into
/// \param M The module being imported here
/// \param Line Line number
/// \param Context The scope this module is imported into.
/// \param M The module being imported here
/// \param File File where the declaration is located.
/// \param Line Line number of the declaration.
DIImportedEntity *createImportedModule(DIScope *Context, DIModule *M,
unsigned Line);
DIFile *File, unsigned Line);
/// Create a descriptor for an imported function.
/// \param Context The scope this module is imported into
/// \param Decl The declaration (or definition) of a function, type, or
/// variable
/// \param Line Line number
/// \param Context The scope this module is imported into.
/// \param Decl The declaration (or definition) of a function, type, or
/// variable.
/// \param File File where the declaration is located.
/// \param Line Line number of the declaration.
DIImportedEntity *createImportedDeclaration(DIScope *Context, DINode *Decl,
unsigned Line,
DIFile *File, unsigned Line,
StringRef Name = "");
/// Insert a new llvm.dbg.declare intrinsic call.

View File

@ -435,10 +435,10 @@ class DIScope : public DINode {
/// Return the raw underlying file.
///
/// A \a DIFile is a \a DIScope, but it doesn't point at a separate file
/// (it\em is the file). If \c this is an \a DIFile, we need to return \c
/// this. Otherwise, return the first operand, which is where all other
/// subclasses store their file pointer.
/// A \a DIFile is a \a DIScope, but it doesn't point at a separate file (it
/// \em is the file). If \c this is an \a DIFile, we need to return \c this.
/// Otherwise, return the first operand, which is where all other subclasses
/// store their file pointer.
Metadata *getRawFile() const {
return isa<DIFile>(this) ? const_cast<DIScope *>(this)
: static_cast<Metadata *>(getOperand(0));
@ -2551,32 +2551,32 @@ class DIImportedEntity : public DINode {
static DIImportedEntity *getImpl(LLVMContext &Context, unsigned Tag,
DIScope *Scope, DINodeRef Entity,
unsigned Line, StringRef Name,
DIFile *File, unsigned Line, StringRef Name,
StorageType Storage,
bool ShouldCreate = true) {
return getImpl(Context, Tag, Scope, Entity, Line,
return getImpl(Context, Tag, Scope, Entity, File, Line,
getCanonicalMDString(Context, Name), Storage, ShouldCreate);
}
static DIImportedEntity *getImpl(LLVMContext &Context, unsigned Tag,
Metadata *Scope, Metadata *Entity,
unsigned Line, MDString *Name,
StorageType Storage,
Metadata *File, unsigned Line,
MDString *Name, StorageType Storage,
bool ShouldCreate = true);
TempDIImportedEntity cloneImpl() const {
return getTemporary(getContext(), getTag(), getScope(), getEntity(),
getLine(), getName());
getFile(), getLine(), getName());
}
public:
DEFINE_MDNODE_GET(DIImportedEntity,
(unsigned Tag, DIScope *Scope, DINodeRef Entity,
unsigned Line, StringRef Name = ""),
(Tag, Scope, Entity, Line, Name))
DIFile *File, unsigned Line, StringRef Name = ""),
(Tag, Scope, Entity, File, Line, Name))
DEFINE_MDNODE_GET(DIImportedEntity,
(unsigned Tag, Metadata *Scope, Metadata *Entity,
unsigned Line, MDString *Name),
(Tag, Scope, Entity, Line, Name))
Metadata *File, unsigned Line, MDString *Name),
(Tag, Scope, Entity, File, Line, Name))
TempDIImportedEntity clone() const { return cloneImpl(); }
@ -2584,10 +2584,12 @@ class DIImportedEntity : public DINode {
DIScope *getScope() const { return cast_or_null<DIScope>(getRawScope()); }
DINodeRef getEntity() const { return DINodeRef(getRawEntity()); }
StringRef getName() const { return getStringOperand(2); }
DIFile *getFile() const { return cast_or_null<DIFile>(getRawFile()); }
Metadata *getRawScope() const { return getOperand(0); }
Metadata *getRawEntity() const { return getOperand(1); }
MDString *getRawName() const { return getOperandAs<MDString>(2); }
Metadata *getRawFile() const { return getOperand(3); }
static bool classof(const Metadata *MD) {
return MD->getMetadataID() == DIImportedEntityKind;

View File

@ -34,22 +34,31 @@ class Module;
class raw_ostream;
extern template class DomTreeNodeBase<BasicBlock>;
extern template class DominatorTreeBase<BasicBlock>;
extern template class DominatorTreeBase<BasicBlock, false>; // DomTree
extern template class DominatorTreeBase<BasicBlock, true>; // PostDomTree
namespace DomTreeBuilder {
extern template void Calculate<Function, BasicBlock *>(
DominatorTreeBaseByGraphTraits<GraphTraits<BasicBlock *>> &DT, Function &F);
using BBDomTree = DomTreeBase<BasicBlock>;
using BBPostDomTree = PostDomTreeBase<BasicBlock>;
extern template void Calculate<Function, Inverse<BasicBlock *>>(
DominatorTreeBaseByGraphTraits<GraphTraits<Inverse<BasicBlock *>>> &DT,
Function &F);
extern template void Calculate<BBDomTree, Function>(BBDomTree &DT, Function &F);
extern template void Calculate<BBPostDomTree, Function>(BBPostDomTree &DT,
Function &F);
extern template bool Verify<BasicBlock *>(
const DominatorTreeBaseByGraphTraits<GraphTraits<BasicBlock *>> &DT);
extern template void InsertEdge<BBDomTree>(BBDomTree &DT, BasicBlock *From,
BasicBlock *To);
extern template void InsertEdge<BBPostDomTree>(BBPostDomTree &DT,
BasicBlock *From,
BasicBlock *To);
extern template bool Verify<Inverse<BasicBlock *>>(
const DominatorTreeBaseByGraphTraits<GraphTraits<Inverse<BasicBlock *>>>
&DT);
extern template void DeleteEdge<BBDomTree>(BBDomTree &DT, BasicBlock *From,
BasicBlock *To);
extern template void DeleteEdge<BBPostDomTree>(BBPostDomTree &DT,
BasicBlock *From,
BasicBlock *To);
extern template bool Verify<BBDomTree>(const BBDomTree &DT);
extern template bool Verify<BBPostDomTree>(const BBPostDomTree &DT);
} // namespace DomTreeBuilder
using DomTreeNode = DomTreeNodeBase<BasicBlock>;
@ -122,14 +131,12 @@ template <> struct DenseMapInfo<BasicBlockEdge> {
/// the dominator tree is initially constructed may still exist in the tree,
/// even if the tree is properly updated. Calling code should not rely on the
/// preceding statements; this is stated only to assist human understanding.
class DominatorTree : public DominatorTreeBase<BasicBlock> {
public:
using Base = DominatorTreeBase<BasicBlock>;
class DominatorTree : public DominatorTreeBase<BasicBlock, false> {
public:
using Base = DominatorTreeBase<BasicBlock, false>;
DominatorTree() : DominatorTreeBase<BasicBlock>(false) {}
explicit DominatorTree(Function &F) : DominatorTreeBase<BasicBlock>(false) {
recalculate(F);
}
DominatorTree() = default;
explicit DominatorTree(Function &F) { recalculate(F); }
/// Handle invalidation explicitly.
bool invalidate(Function &F, const PreservedAnalyses &PA,

View File

@ -32,16 +32,6 @@ class Hexagon_qi_mem_Intrinsic<string GCCIntSuffix>
: Hexagon_Intrinsic<GCCIntSuffix,
[llvm_i1_ty], [llvm_ptr_ty],
[IntrNoMem]>;
//
// DEF_FUNCTION_TYPE_1(void_ftype_SI,BT_VOID,BT_INT) ->
// Hexagon_void_si_Intrinsic<string GCCIntSuffix>
//
class Hexagon_void_si_Intrinsic<string GCCIntSuffix>
: Hexagon_Intrinsic<GCCIntSuffix,
[], [llvm_ptr_ty],
[]>;
//
// DEF_FUNCTION_TYPE_1(HI_ftype_SI,BT_I16,BT_INT) ->
// Hexagon_hi_si_Intrinsic<string GCCIntSuffix>
@ -4959,11 +4949,25 @@ Hexagon_di_di_Intrinsic<"HEXAGON_S2_interleave">;
//
def int_hexagon_S2_deinterleave :
Hexagon_di_di_Intrinsic<"HEXAGON_S2_deinterleave">;
//
// BUILTIN_INFO(HEXAGON.dcfetch_A,v_ftype_DI*,1)
//
def int_hexagon_prefetch :
Hexagon_void_si_Intrinsic<"HEXAGON_prefetch">;
Hexagon_Intrinsic<"HEXAGON_prefetch", [], [llvm_ptr_ty], []>;
def int_hexagon_Y2_dccleana :
Hexagon_Intrinsic<"HEXAGON_Y2_dccleana", [], [llvm_ptr_ty], []>;
def int_hexagon_Y2_dccleaninva :
Hexagon_Intrinsic<"HEXAGON_Y2_dccleaninva", [], [llvm_ptr_ty], []>;
def int_hexagon_Y2_dcinva :
Hexagon_Intrinsic<"HEXAGON_Y2_dcinva", [], [llvm_ptr_ty], []>;
def int_hexagon_Y2_dczeroa :
Hexagon_Intrinsic<"HEXAGON_Y2_dczeroa", [], [llvm_ptr_ty],
[IntrWriteMem, IntrArgMemOnly, IntrHasSideEffects]>;
def int_hexagon_Y4_l2fetch :
Hexagon_Intrinsic<"HEXAGON_Y4_l2fetch", [], [llvm_ptr_ty, llvm_i32_ty], []>;
def int_hexagon_Y5_l2fetch :
Hexagon_Intrinsic<"HEXAGON_Y5_l2fetch", [], [llvm_ptr_ty, llvm_i64_ty], []>;
def llvm_ptr32_ty : LLVMPointerType<llvm_i32_ty>;
def llvm_ptr64_ty : LLVMPointerType<llvm_i64_ty>;

View File

@ -373,6 +373,49 @@ let TargetPrefix = "s390" in {
def int_s390_vfidb : Intrinsic<[llvm_v2f64_ty],
[llvm_v2f64_ty, llvm_i32_ty, llvm_i32_ty],
[IntrNoMem]>;
// Instructions from the Vector Enhancements Facility 1
def int_s390_vbperm : SystemZBinaryConv<"vbperm", llvm_v2i64_ty,
llvm_v16i8_ty>;
def int_s390_vmslg : GCCBuiltin<"__builtin_s390_vmslg">,
Intrinsic<[llvm_v16i8_ty],
[llvm_v2i64_ty, llvm_v2i64_ty, llvm_v16i8_ty,
llvm_i32_ty], [IntrNoMem]>;
def int_s390_vfmaxdb : Intrinsic<[llvm_v2f64_ty],
[llvm_v2f64_ty, llvm_v2f64_ty, llvm_i32_ty],
[IntrNoMem]>;
def int_s390_vfmindb : Intrinsic<[llvm_v2f64_ty],
[llvm_v2f64_ty, llvm_v2f64_ty, llvm_i32_ty],
[IntrNoMem]>;
def int_s390_vfmaxsb : Intrinsic<[llvm_v4f32_ty],
[llvm_v4f32_ty, llvm_v4f32_ty, llvm_i32_ty],
[IntrNoMem]>;
def int_s390_vfminsb : Intrinsic<[llvm_v4f32_ty],
[llvm_v4f32_ty, llvm_v4f32_ty, llvm_i32_ty],
[IntrNoMem]>;
def int_s390_vfcesbs : SystemZBinaryConvCC<llvm_v4i32_ty, llvm_v4f32_ty>;
def int_s390_vfchsbs : SystemZBinaryConvCC<llvm_v4i32_ty, llvm_v4f32_ty>;
def int_s390_vfchesbs : SystemZBinaryConvCC<llvm_v4i32_ty, llvm_v4f32_ty>;
def int_s390_vftcisb : SystemZBinaryConvIntCC<llvm_v4i32_ty, llvm_v4f32_ty>;
def int_s390_vfisb : Intrinsic<[llvm_v4f32_ty],
[llvm_v4f32_ty, llvm_i32_ty, llvm_i32_ty],
[IntrNoMem]>;
// Instructions from the Vector Packed Decimal Facility
def int_s390_vlrl : GCCBuiltin<"__builtin_s390_vlrl">,
Intrinsic<[llvm_v16i8_ty], [llvm_i32_ty, llvm_ptr_ty],
[IntrReadMem, IntrArgMemOnly]>;
def int_s390_vstrl : GCCBuiltin<"__builtin_s390_vstrl">,
Intrinsic<[], [llvm_v16i8_ty, llvm_i32_ty, llvm_ptr_ty],
// In fact write-only but there's no property
// for that.
[IntrArgMemOnly]>;
}
//===----------------------------------------------------------------------===//

View File

@ -75,6 +75,9 @@ namespace llvm {
static LaneBitmask getNone() { return LaneBitmask(0); }
static LaneBitmask getAll() { return ~LaneBitmask(0); }
static LaneBitmask getLane(unsigned Lane) {
return LaneBitmask(Type(1) << Lane);
}
private:
Type Mask = 0;

View File

@ -69,7 +69,7 @@ class MCFixup {
/// an instruction or an assembler directive.
const MCExpr *Value;
/// The byte index of start of the relocation inside the encoded instruction.
/// The byte index of start of the relocation inside the MCFragment.
uint32_t Offset;
/// The target dependent kind of fixup item this is. The kind is used to

View File

@ -209,6 +209,15 @@ class MCInstrDesc {
/// well.
unsigned getNumOperands() const { return NumOperands; }
using const_opInfo_iterator = const MCOperandInfo *;
const_opInfo_iterator opInfo_begin() const { return OpInfo; }
const_opInfo_iterator opInfo_end() const { return OpInfo + NumOperands; }
iterator_range<const_opInfo_iterator> operands() const {
return make_range(opInfo_begin(), opInfo_end());
}
/// \brief Return the number of MachineOperands that are register
/// definitions. Register definitions always occur at the start of the
/// machine operand list. This is the number of "outs" in the .td file,

View File

@ -95,7 +95,7 @@ struct COFFShortExport {
}
};
std::error_code writeImportLibrary(StringRef DLLName,
std::error_code writeImportLibrary(StringRef ImportName,
StringRef Path,
ArrayRef<COFFShortExport> Exports,
COFF::MachineTypes Machine);

View File

@ -16,7 +16,6 @@
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_OBJECT_COFF_MODULE_DEFINITION_H
#define LLVM_OBJECT_COFF_MODULE_DEFINITION_H
@ -29,6 +28,7 @@ namespace object {
struct COFFModuleDefinition {
std::vector<COFFShortExport> Exports;
std::string OutputFile;
std::string ImportName;
uint64_t ImageBase = 0;
uint64_t StackReserve = 0;
uint64_t StackCommit = 0;
@ -40,8 +40,12 @@ struct COFFModuleDefinition {
uint32_t MinorOSVersion = 0;
};
// mingw and wine def files do not mangle _ for x86 which
// is a consequence of legacy binutils' dlltool functionality.
// This MingwDef flag should be removed once mingw stops this pratice.
Expected<COFFModuleDefinition>
parseCOFFModuleDefinition(MemoryBufferRef MB, COFF::MachineTypes Machine);
parseCOFFModuleDefinition(MemoryBufferRef MB, COFF::MachineTypes Machine,
bool MingwDef = false);
} // End namespace object.
} // End namespace llvm.

View File

@ -60,6 +60,8 @@ ArrayRef<uint8_t> toDebugT(ArrayRef<LeafRecord>, BumpPtrAllocator &Alloc);
} // end namespace llvm
LLVM_YAML_DECLARE_SCALAR_TRAITS(codeview::GUID, true)
LLVM_YAML_DECLARE_MAPPING_TRAITS(CodeViewYAML::LeafRecord)
LLVM_YAML_DECLARE_MAPPING_TRAITS(CodeViewYAML::MemberRecord)

View File

@ -43,8 +43,9 @@ AARCH64_ARCH_EXT_NAME("crypto", AArch64::AEK_CRYPTO, "+crypto","-crypto")
AARCH64_ARCH_EXT_NAME("fp", AArch64::AEK_FP, "+fp-armv8", "-fp-armv8")
AARCH64_ARCH_EXT_NAME("simd", AArch64::AEK_SIMD, "+neon", "-neon")
AARCH64_ARCH_EXT_NAME("fp16", AArch64::AEK_FP16, "+fullfp16", "-fullfp16")
AARCH64_ARCH_EXT_NAME("profile", AArch64::AEK_PROFILE, "+spe", "-spe")
AARCH64_ARCH_EXT_NAME("ras", AArch64::AEK_RAS, "+ras", "-ras")
AARCH64_ARCH_EXT_NAME("profile", AArch64::AEK_PROFILE, "+spe", "-spe")
AARCH64_ARCH_EXT_NAME("ras", AArch64::AEK_RAS, "+ras", "-ras")
AARCH64_ARCH_EXT_NAME("sve", AArch64::AEK_SVE, "+sve", "-sve")
#undef AARCH64_ARCH_EXT_NAME
#ifndef AARCH64_CPU_NAME

View File

@ -62,32 +62,45 @@ class BinaryItemStream : public BinaryStream {
return Error::success();
}
void setItems(ArrayRef<T> ItemArray) { Items = ItemArray; }
void setItems(ArrayRef<T> ItemArray) {
Items = ItemArray;
computeItemOffsets();
}
uint32_t getLength() override {
uint32_t Size = 0;
for (const auto &Item : Items)
Size += Traits::length(Item);
return Size;
return ItemEndOffsets.empty() ? 0 : ItemEndOffsets.back();
}
private:
Expected<uint32_t> translateOffsetIndex(uint32_t Offset) const {
void computeItemOffsets() {
ItemEndOffsets.clear();
ItemEndOffsets.reserve(Items.size());
uint32_t CurrentOffset = 0;
uint32_t CurrentIndex = 0;
for (const auto &Item : Items) {
if (CurrentOffset >= Offset)
break;
CurrentOffset += Traits::length(Item);
++CurrentIndex;
uint32_t Len = Traits::length(Item);
assert(Len > 0 && "no empty items");
CurrentOffset += Len;
ItemEndOffsets.push_back(CurrentOffset);
}
if (CurrentOffset != Offset)
}
Expected<uint32_t> translateOffsetIndex(uint32_t Offset) {
// Make sure the offset is somewhere in our items array.
if (Offset >= getLength())
return make_error<BinaryStreamError>(stream_error_code::stream_too_short);
return CurrentIndex;
++Offset;
auto Iter =
std::lower_bound(ItemEndOffsets.begin(), ItemEndOffsets.end(), Offset);
size_t Idx = std::distance(ItemEndOffsets.begin(), Iter);
assert(Idx < Items.size() && "binary search for offset failed");
return Idx;
}
llvm::support::endianness Endian;
ArrayRef<T> Items;
// Sorted vector of offsets to accelerate lookup.
std::vector<uint32_t> ItemEndOffsets;
};
} // end namespace llvm

View File

@ -125,30 +125,39 @@ inline format_object<Ts...> format(const char *Fmt, const Ts &... Vals) {
return format_object<Ts...>(Fmt, Vals...);
}
/// This is a helper class used for left_justify() and right_justify().
/// This is a helper class for left_justify, right_justify, and center_justify.
class FormattedString {
public:
enum Justification { JustifyNone, JustifyLeft, JustifyRight, JustifyCenter };
FormattedString(StringRef S, unsigned W, Justification J)
: Str(S), Width(W), Justify(J) {}
private:
StringRef Str;
unsigned Width;
bool RightJustify;
Justification Justify;
friend class raw_ostream;
public:
FormattedString(StringRef S, unsigned W, bool R)
: Str(S), Width(W), RightJustify(R) { }
};
/// left_justify - append spaces after string so total output is
/// \p Width characters. If \p Str is larger that \p Width, full string
/// is written with no padding.
inline FormattedString left_justify(StringRef Str, unsigned Width) {
return FormattedString(Str, Width, false);
return FormattedString(Str, Width, FormattedString::JustifyLeft);
}
/// right_justify - add spaces before string so total output is
/// \p Width characters. If \p Str is larger that \p Width, full string
/// is written with no padding.
inline FormattedString right_justify(StringRef Str, unsigned Width) {
return FormattedString(Str, Width, true);
return FormattedString(Str, Width, FormattedString::JustifyRight);
}
/// center_justify - add spaces before and after string so total output is
/// \p Width characters. If \p Str is larger that \p Width, full string
/// is written with no padding.
inline FormattedString center_justify(StringRef Str, unsigned Width) {
return FormattedString(Str, Width, FormattedString::JustifyCenter);
}
/// This is a helper class used for format_hex() and format_decimal().

View File

@ -41,27 +41,21 @@
namespace llvm {
template <class NodeT> class DominatorTreeBase;
template <typename NodeT, bool IsPostDom>
class DominatorTreeBase;
namespace detail {
template <typename GT> struct DominatorTreeBaseTraits {
static_assert(std::is_pointer<typename GT::NodeRef>::value,
"Currently NodeRef must be a pointer type.");
using type = DominatorTreeBase<
typename std::remove_pointer<typename GT::NodeRef>::type>;
};
} // end namespace detail
template <typename GT>
using DominatorTreeBaseByGraphTraits =
typename detail::DominatorTreeBaseTraits<GT>::type;
namespace DomTreeBuilder {
template <class DomTreeT>
struct SemiNCAInfo;
} // namespace DomTreeBuilder
/// \brief Base class for the actual dominator tree node.
template <class NodeT> class DomTreeNodeBase {
friend struct PostDominatorTree;
template <class N> friend class DominatorTreeBase;
friend class DominatorTreeBase<NodeT, false>;
friend class DominatorTreeBase<NodeT, true>;
friend struct DomTreeBuilder::SemiNCAInfo<DominatorTreeBase<NodeT, false>>;
friend struct DomTreeBuilder::SemiNCAInfo<DominatorTreeBase<NodeT, true>>;
NodeT *TheBB;
DomTreeNodeBase *IDom;
@ -192,58 +186,69 @@ void PrintDomTree(const DomTreeNodeBase<NodeT> *N, raw_ostream &O,
}
namespace DomTreeBuilder {
template <class NodeT>
struct SemiNCAInfo;
// The routines below are provided in a separate header but referenced here.
template <typename DomTreeT, typename FuncT>
void Calculate(DomTreeT &DT, FuncT &F);
// The calculate routine is provided in a separate header but referenced here.
template <class FuncT, class N>
void Calculate(DominatorTreeBaseByGraphTraits<GraphTraits<N>> &DT, FuncT &F);
template <class DomTreeT>
void InsertEdge(DomTreeT &DT, typename DomTreeT::NodePtr From,
typename DomTreeT::NodePtr To);
// The verify function is provided in a separate header but referenced here.
template <class N>
bool Verify(const DominatorTreeBaseByGraphTraits<GraphTraits<N>> &DT);
template <class DomTreeT>
void DeleteEdge(DomTreeT &DT, typename DomTreeT::NodePtr From,
typename DomTreeT::NodePtr To);
template <typename DomTreeT>
bool Verify(const DomTreeT &DT);
} // namespace DomTreeBuilder
/// \brief Core dominator tree base class.
///
/// This class is a generic template over graph nodes. It is instantiated for
/// various graphs in the LLVM IR or in the code generator.
template <class NodeT> class DominatorTreeBase {
template <typename NodeT, bool IsPostDom>
class DominatorTreeBase {
protected:
std::vector<NodeT *> Roots;
bool IsPostDominators;
using DomTreeNodeMapType =
DenseMap<NodeT *, std::unique_ptr<DomTreeNodeBase<NodeT>>>;
DomTreeNodeMapType DomTreeNodes;
DomTreeNodeBase<NodeT> *RootNode;
using ParentPtr = decltype(std::declval<NodeT *>()->getParent());
ParentPtr Parent = nullptr;
mutable bool DFSInfoValid = false;
mutable unsigned int SlowQueries = 0;
friend struct DomTreeBuilder::SemiNCAInfo<NodeT>;
using SNCAInfoTy = DomTreeBuilder::SemiNCAInfo<NodeT>;
friend struct DomTreeBuilder::SemiNCAInfo<DominatorTreeBase>;
public:
explicit DominatorTreeBase(bool isPostDom) : IsPostDominators(isPostDom) {}
static_assert(std::is_pointer<typename GraphTraits<NodeT *>::NodeRef>::value,
"Currently DominatorTreeBase supports only pointer nodes");
using NodeType = NodeT;
using NodePtr = NodeT *;
static constexpr bool IsPostDominator = IsPostDom;
DominatorTreeBase() {}
DominatorTreeBase(DominatorTreeBase &&Arg)
: Roots(std::move(Arg.Roots)),
IsPostDominators(Arg.IsPostDominators),
DomTreeNodes(std::move(Arg.DomTreeNodes)),
RootNode(std::move(Arg.RootNode)),
DFSInfoValid(std::move(Arg.DFSInfoValid)),
SlowQueries(std::move(Arg.SlowQueries)) {
RootNode(Arg.RootNode),
Parent(Arg.Parent),
DFSInfoValid(Arg.DFSInfoValid),
SlowQueries(Arg.SlowQueries) {
Arg.wipe();
}
DominatorTreeBase &operator=(DominatorTreeBase &&RHS) {
Roots = std::move(RHS.Roots);
IsPostDominators = RHS.IsPostDominators;
DomTreeNodes = std::move(RHS.DomTreeNodes);
RootNode = std::move(RHS.RootNode);
DFSInfoValid = std::move(RHS.DFSInfoValid);
SlowQueries = std::move(RHS.SlowQueries);
RootNode = RHS.RootNode;
Parent = RHS.Parent;
DFSInfoValid = RHS.DFSInfoValid;
SlowQueries = RHS.SlowQueries;
RHS.wipe();
return *this;
}
@ -259,11 +264,12 @@ template <class NodeT> class DominatorTreeBase {
/// isPostDominator - Returns true if analysis based of postdoms
///
bool isPostDominator() const { return IsPostDominators; }
bool isPostDominator() const { return IsPostDominator; }
/// compare - Return false if the other dominator tree base matches this
/// dominator tree base. Otherwise return true.
bool compare(const DominatorTreeBase &Other) const {
if (Parent != Other.Parent) return true;
const DomTreeNodeMapType &OtherDomTreeNodes = Other.DomTreeNodes;
if (DomTreeNodes.size() != OtherDomTreeNodes.size())
@ -443,10 +449,50 @@ template <class NodeT> class DominatorTreeBase {
const_cast<NodeT *>(B));
}
bool isVirtualRoot(const DomTreeNodeBase<NodeT> *A) const {
return isPostDominator() && !A->getBlock();
}
//===--------------------------------------------------------------------===//
// API to update (Post)DominatorTree information based on modifications to
// the CFG...
/// Inform the dominator tree about a CFG edge insertion and update the tree.
///
/// This function has to be called just before or just after making the update
/// on the actual CFG. There cannot be any other updates that the dominator
/// tree doesn't know about.
///
/// Note that for postdominators it automatically takes care of inserting
/// a reverse edge internally (so there's no need to swap the parameters).
///
void insertEdge(NodeT *From, NodeT *To) {
assert(From);
assert(To);
assert(From->getParent() == Parent);
assert(To->getParent() == Parent);
DomTreeBuilder::InsertEdge(*this, From, To);
}
/// Inform the dominator tree about a CFG edge deletion and update the tree.
///
/// This function has to be called just after making the update
/// on the actual CFG. There cannot be any other updates that the dominator
/// tree doesn't know about. The only exception is when the deletion that the
/// tree is informed about makes some (domominator) subtree unreachable -- in
/// this case, it is fine to perform deletions within this subtree.
///
/// Note that for postdominators it automatically takes care of deleting
/// a reverse edge internally (so there's no need to swap the parameters).
///
void deleteEdge(NodeT *From, NodeT *To) {
assert(From);
assert(To);
assert(From->getParent() == Parent);
assert(To->getParent() == Parent);
DomTreeBuilder::DeleteEdge(*this, From, To);
}
/// Add a new node to the dominator tree information.
///
/// This creates a new node as a child of DomBB dominator node, linking it
@ -530,7 +576,7 @@ template <class NodeT> class DominatorTreeBase {
/// splitBlock - BB is split and now it has one successor. Update dominator
/// tree to reflect this change.
void splitBlock(NodeT *NewBB) {
if (this->IsPostDominators)
if (IsPostDominator)
Split<Inverse<NodeT *>>(NewBB);
else
Split<NodeT *>(NewBB);
@ -607,37 +653,33 @@ template <class NodeT> class DominatorTreeBase {
template <class FT> void recalculate(FT &F) {
using TraitsTy = GraphTraits<FT *>;
reset();
Parent = &F;
if (!this->IsPostDominators) {
if (!IsPostDominator) {
// Initialize root
NodeT *entry = TraitsTy::getEntryNode(&F);
addRoot(entry);
DomTreeBuilder::Calculate<FT, NodeT *>(*this, F);
} else {
// Initialize the roots list
for (auto *Node : nodes(&F))
if (TraitsTy::child_begin(Node) == TraitsTy::child_end(Node))
addRoot(Node);
DomTreeBuilder::Calculate<FT, Inverse<NodeT *>>(*this, F);
}
DomTreeBuilder::Calculate(*this, F);
}
/// verify - check parent and sibling property
bool verify() const {
return this->isPostDominator()
? DomTreeBuilder::Verify<Inverse<NodeT *>>(*this)
: DomTreeBuilder::Verify<NodeT *>(*this);
}
bool verify() const { return DomTreeBuilder::Verify(*this); }
protected:
void addRoot(NodeT *BB) { this->Roots.push_back(BB); }
void reset() {
DomTreeNodes.clear();
this->Roots.clear();
Roots.clear();
RootNode = nullptr;
Parent = nullptr;
DFSInfoValid = false;
SlowQueries = 0;
}
@ -719,13 +761,21 @@ template <class NodeT> class DominatorTreeBase {
void wipe() {
DomTreeNodes.clear();
RootNode = nullptr;
Parent = nullptr;
}
};
template <typename T>
using DomTreeBase = DominatorTreeBase<T, false>;
template <typename T>
using PostDomTreeBase = DominatorTreeBase<T, true>;
// These two functions are declared out of line as a workaround for building
// with old (< r147295) versions of clang because of pr11642.
template <class NodeT>
bool DominatorTreeBase<NodeT>::dominates(const NodeT *A, const NodeT *B) const {
template <typename NodeT, bool IsPostDom>
bool DominatorTreeBase<NodeT, IsPostDom>::dominates(const NodeT *A,
const NodeT *B) const {
if (A == B)
return true;
@ -735,9 +785,9 @@ bool DominatorTreeBase<NodeT>::dominates(const NodeT *A, const NodeT *B) const {
return dominates(getNode(const_cast<NodeT *>(A)),
getNode(const_cast<NodeT *>(B)));
}
template <class NodeT>
bool DominatorTreeBase<NodeT>::properlyDominates(const NodeT *A,
const NodeT *B) const {
template <typename NodeT, bool IsPostDom>
bool DominatorTreeBase<NodeT, IsPostDom>::properlyDominates(
const NodeT *A, const NodeT *B) const {
if (A == B)
return false;

View File

@ -20,15 +20,28 @@
/// out that the theoretically slower O(n*log(n)) implementation is actually
/// faster than the almost-linear O(n*alpha(n)) version, even for large CFGs.
///
/// The file uses the Depth Based Search algorithm to perform incremental
/// upates (insertion and deletions). The implemented algorithm is based on this
/// publication:
///
/// An Experimental Study of Dynamic Dominators
/// Loukas Georgiadis, et al., April 12 2016, pp. 5-7, 9-10:
/// https://arxiv.org/pdf/1604.02711.pdf
///
//===----------------------------------------------------------------------===//
#ifndef LLVM_SUPPORT_GENERICDOMTREECONSTRUCTION_H
#define LLVM_SUPPORT_GENERICDOMTREECONSTRUCTION_H
#include <queue>
#include "llvm/ADT/DenseSet.h"
#include "llvm/ADT/DepthFirstIterator.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/GenericDomTree.h"
#define DEBUG_TYPE "dom-tree-builder"
namespace llvm {
namespace DomTreeBuilder {
@ -46,13 +59,14 @@ struct ChildrenGetter<NodePtr, true> {
}
};
// Information record used by Semi-NCA during tree construction.
template <typename NodeT>
template <typename DomTreeT>
struct SemiNCAInfo {
using NodePtr = NodeT *;
using DomTreeT = DominatorTreeBase<NodeT>;
using NodePtr = typename DomTreeT::NodePtr;
using NodeT = typename DomTreeT::NodeType;
using TreeNodePtr = DomTreeNodeBase<NodeT> *;
static constexpr bool IsPostDom = DomTreeT::IsPostDominator;
// Information record used by Semi-NCA during tree construction.
struct InfoRec {
unsigned DFSNum = 0;
unsigned Parent = 0;
@ -62,11 +76,13 @@ struct SemiNCAInfo {
SmallVector<NodePtr, 2> ReverseChildren;
};
std::vector<NodePtr> NumToNode;
// Number to node mapping is 1-based. Initialize the mapping to start with
// a dummy element.
std::vector<NodePtr> NumToNode = {nullptr};
DenseMap<NodePtr, InfoRec> NodeToInfo;
void clear() {
NumToNode.clear();
NumToNode = {nullptr}; // Restore to initial state with a dummy start node.
NodeToInfo.clear();
}
@ -90,12 +106,28 @@ struct SemiNCAInfo {
// Add a new tree node for this NodeT, and link it as a child of
// IDomNode
return (DT.DomTreeNodes[BB] = IDomNode->addChild(
llvm::make_unique<DomTreeNodeBase<NodeT>>(BB, IDomNode)))
llvm::make_unique<DomTreeNodeBase<NodeT>>(BB, IDomNode)))
.get();
}
static bool AlwaysDescend(NodePtr, NodePtr) { return true; }
struct BlockNamePrinter {
NodePtr N;
BlockNamePrinter(NodePtr Block) : N(Block) {}
BlockNamePrinter(TreeNodePtr TN) : N(TN ? TN->getBlock() : nullptr) {}
friend raw_ostream &operator<<(raw_ostream &O, const BlockNamePrinter &BP) {
if (!BP.N)
O << "nullptr";
else
BP.N->printAsOperand(O, false);
return O;
}
};
// Custom DFS implementation which can skip nodes based on a provided
// predicate. It also collects ReverseChildren so that we don't have to spend
// time getting predecessors in SemiNCA.
@ -177,44 +209,42 @@ struct SemiNCAInfo {
return VInInfo.Label;
}
template <typename NodeType>
void runSemiNCA(DomTreeT &DT, unsigned NumBlocks) {
// Step #1: Number blocks in depth-first order and initialize variables used
// in later stages of the algorithm.
const unsigned N = doFullDFSWalk(DT, AlwaysDescend);
// It might be that some blocks did not get a DFS number (e.g., blocks of
// infinite loops). In these cases an artificial exit node is required.
const bool MultipleRoots =
DT.Roots.size() > 1 || (DT.isPostDominator() && N != NumBlocks);
// This function requires DFS to be run before calling it.
void runSemiNCA(DomTreeT &DT, const unsigned MinLevel = 0) {
const unsigned NextDFSNum(NumToNode.size());
// Initialize IDoms to spanning tree parents.
for (unsigned i = 1; i <= N; ++i) {
for (unsigned i = 1; i < NextDFSNum; ++i) {
const NodePtr V = NumToNode[i];
auto &VInfo = NodeToInfo[V];
VInfo.IDom = NumToNode[VInfo.Parent];
}
// Step #2: Calculate the semidominators of all vertices.
for (unsigned i = N; i >= 2; --i) {
// Step #1: Calculate the semidominators of all vertices.
for (unsigned i = NextDFSNum - 1; i >= 2; --i) {
NodePtr W = NumToNode[i];
auto &WInfo = NodeToInfo[W];
// Initialize the semi dominator to point to the parent node.
WInfo.Semi = WInfo.Parent;
for (const auto &N : WInfo.ReverseChildren)
if (NodeToInfo.count(N)) { // Only if this predecessor is reachable!
unsigned SemiU = NodeToInfo[eval(N, i + 1)].Semi;
if (SemiU < WInfo.Semi)
WInfo.Semi = SemiU;
}
for (const auto &N : WInfo.ReverseChildren) {
if (NodeToInfo.count(N) == 0) // Skip unreachable predecessors.
continue;
const TreeNodePtr TN = DT.getNode(N);
// Skip predecessors whose level is above the subtree we are processing.
if (TN && TN->getLevel() < MinLevel)
continue;
unsigned SemiU = NodeToInfo[eval(N, i + 1)].Semi;
if (SemiU < WInfo.Semi) WInfo.Semi = SemiU;
}
}
// Step #3: Explicitly define the immediate dominator of each vertex.
// Step #2: Explicitly define the immediate dominator of each vertex.
// IDom[i] = NCA(SDom[i], SpanningTreeParent(i)).
// Note that the parents were stored in IDoms and later got invalidated
// during path compression in Eval.
for (unsigned i = 2; i <= N; ++i) {
for (unsigned i = 2; i < NextDFSNum; ++i) {
const NodePtr W = NumToNode[i];
auto &WInfo = NodeToInfo[W];
const unsigned SDomNum = NodeToInfo[NumToNode[WInfo.Semi]].DFSNum;
@ -224,46 +254,11 @@ struct SemiNCAInfo {
WInfo.IDom = WIDomCandidate;
}
if (DT.Roots.empty()) return;
// Add a node for the root. This node might be the actual root, if there is
// one exit block, or it may be the virtual exit (denoted by
// (BasicBlock *)0) which postdominates all real exits if there are multiple
// exit blocks, or an infinite loop.
NodePtr Root = !MultipleRoots ? DT.Roots[0] : nullptr;
DT.RootNode =
(DT.DomTreeNodes[Root] =
llvm::make_unique<DomTreeNodeBase<NodeT>>(Root, nullptr))
.get();
// Loop over all of the reachable blocks in the function...
for (unsigned i = 2; i <= N; ++i) {
NodePtr W = NumToNode[i];
// Don't replace this with 'count', the insertion side effect is important
if (DT.DomTreeNodes[W])
continue; // Haven't calculated this node yet?
NodePtr ImmDom = getIDom(W);
assert(ImmDom || DT.DomTreeNodes[nullptr]);
// Get or calculate the node for the immediate dominator
TreeNodePtr IDomNode = getNodeForBlock(ImmDom, DT);
// Add a new tree node for this BasicBlock, and link it as a child of
// IDomNode
DT.DomTreeNodes[W] = IDomNode->addChild(
llvm::make_unique<DomTreeNodeBase<NodeT>>(W, IDomNode));
}
}
template <typename DescendCondition>
unsigned doFullDFSWalk(const DomTreeT &DT, DescendCondition DC) {
unsigned Num = 0;
NumToNode.push_back(nullptr);
if (DT.Roots.size() > 1) {
auto &BBInfo = NodeToInfo[nullptr];
@ -283,11 +278,257 @@ struct SemiNCAInfo {
return Num;
}
static void PrintBlockOrNullptr(raw_ostream &O, NodePtr Obj) {
if (!Obj)
O << "nullptr";
void calculateFromScratch(DomTreeT &DT, const unsigned NumBlocks) {
// Step #0: Number blocks in depth-first order and initialize variables used
// in later stages of the algorithm.
const unsigned LastDFSNum = doFullDFSWalk(DT, AlwaysDescend);
runSemiNCA(DT);
if (DT.Roots.empty()) return;
// Add a node for the root. This node might be the actual root, if there is
// one exit block, or it may be the virtual exit (denoted by
// (BasicBlock *)0) which postdominates all real exits if there are multiple
// exit blocks, or an infinite loop.
// It might be that some blocks did not get a DFS number (e.g., blocks of
// infinite loops). In these cases an artificial exit node is required.
const bool MultipleRoots = DT.Roots.size() > 1 || (DT.isPostDominator() &&
LastDFSNum != NumBlocks);
NodePtr Root = !MultipleRoots ? DT.Roots[0] : nullptr;
DT.RootNode = (DT.DomTreeNodes[Root] =
llvm::make_unique<DomTreeNodeBase<NodeT>>(Root, nullptr))
.get();
attachNewSubtree(DT, DT.RootNode);
}
void attachNewSubtree(DomTreeT& DT, const TreeNodePtr AttachTo) {
// Attach the first unreachable block to AttachTo.
NodeToInfo[NumToNode[1]].IDom = AttachTo->getBlock();
// Loop over all of the discovered blocks in the function...
for (size_t i = 1, e = NumToNode.size(); i != e; ++i) {
NodePtr W = NumToNode[i];
DEBUG(dbgs() << "\tdiscovered a new reachable node "
<< BlockNamePrinter(W) << "\n");
// Don't replace this with 'count', the insertion side effect is important
if (DT.DomTreeNodes[W]) continue; // Haven't calculated this node yet?
NodePtr ImmDom = getIDom(W);
// Get or calculate the node for the immediate dominator
TreeNodePtr IDomNode = getNodeForBlock(ImmDom, DT);
// Add a new tree node for this BasicBlock, and link it as a child of
// IDomNode
DT.DomTreeNodes[W] = IDomNode->addChild(
llvm::make_unique<DomTreeNodeBase<NodeT>>(W, IDomNode));
}
}
void reattachExistingSubtree(DomTreeT &DT, const TreeNodePtr AttachTo) {
NodeToInfo[NumToNode[1]].IDom = AttachTo->getBlock();
for (size_t i = 1, e = NumToNode.size(); i != e; ++i) {
const NodePtr N = NumToNode[i];
const TreeNodePtr TN = DT.getNode(N);
assert(TN);
const TreeNodePtr NewIDom = DT.getNode(NodeToInfo[N].IDom);
TN->setIDom(NewIDom);
}
}
// Helper struct used during edge insertions.
struct InsertionInfo {
using BucketElementTy = std::pair<unsigned, TreeNodePtr>;
struct DecreasingLevel {
bool operator()(const BucketElementTy &First,
const BucketElementTy &Second) const {
return First.first > Second.first;
}
};
std::priority_queue<BucketElementTy, SmallVector<BucketElementTy, 8>,
DecreasingLevel>
Bucket; // Queue of tree nodes sorted by level in descending order.
SmallDenseSet<TreeNodePtr, 8> Affected;
SmallDenseSet<TreeNodePtr, 8> Visited;
SmallVector<TreeNodePtr, 8> AffectedQueue;
SmallVector<TreeNodePtr, 8> VisitedNotAffectedQueue;
};
static void InsertEdge(DomTreeT &DT, const NodePtr From, const NodePtr To) {
assert(From && To && "Cannot connect nullptrs");
DEBUG(dbgs() << "Inserting edge " << BlockNamePrinter(From) << " -> "
<< BlockNamePrinter(To) << "\n");
const TreeNodePtr FromTN = DT.getNode(From);
// Ignore edges from unreachable nodes.
if (!FromTN) return;
DT.DFSInfoValid = false;
const TreeNodePtr ToTN = DT.getNode(To);
if (!ToTN)
InsertUnreachable(DT, FromTN, To);
else
Obj->printAsOperand(O, false);
InsertReachable(DT, FromTN, ToTN);
}
// Handles insertion to a node already in the dominator tree.
static void InsertReachable(DomTreeT &DT, const TreeNodePtr From,
const TreeNodePtr To) {
DEBUG(dbgs() << "\tReachable " << BlockNamePrinter(From->getBlock())
<< " -> " << BlockNamePrinter(To->getBlock()) << "\n");
const NodePtr NCDBlock =
DT.findNearestCommonDominator(From->getBlock(), To->getBlock());
assert(NCDBlock || DT.isPostDominator());
const TreeNodePtr NCD = DT.getNode(NCDBlock);
assert(NCD);
DEBUG(dbgs() << "\t\tNCA == " << BlockNamePrinter(NCD) << "\n");
const TreeNodePtr ToIDom = To->getIDom();
// Nothing affected -- NCA property holds.
// (Based on the lemma 2.5 from the second paper.)
if (NCD == To || NCD == ToIDom) return;
// Identify and collect affected nodes.
InsertionInfo II;
DEBUG(dbgs() << "Marking " << BlockNamePrinter(To) << " as affected\n");
II.Affected.insert(To);
const unsigned ToLevel = To->getLevel();
DEBUG(dbgs() << "Putting " << BlockNamePrinter(To) << " into a Bucket\n");
II.Bucket.push({ToLevel, To});
while (!II.Bucket.empty()) {
const TreeNodePtr CurrentNode = II.Bucket.top().second;
II.Bucket.pop();
DEBUG(dbgs() << "\tAdding to Visited and AffectedQueue: "
<< BlockNamePrinter(CurrentNode) << "\n");
II.Visited.insert(CurrentNode);
II.AffectedQueue.push_back(CurrentNode);
// Discover and collect affected successors of the current node.
VisitInsertion(DT, CurrentNode, CurrentNode->getLevel(), NCD, II);
}
// Finish by updating immediate dominators and levels.
UpdateInsertion(DT, NCD, II);
}
// Visits an affected node and collect its affected successors.
static void VisitInsertion(DomTreeT &DT, const TreeNodePtr TN,
const unsigned RootLevel, const TreeNodePtr NCD,
InsertionInfo &II) {
const unsigned NCDLevel = NCD->getLevel();
DEBUG(dbgs() << "Visiting " << BlockNamePrinter(TN) << "\n");
assert(TN->getBlock());
for (const NodePtr Succ :
ChildrenGetter<NodePtr, IsPostDom>::Get(TN->getBlock())) {
const TreeNodePtr SuccTN = DT.getNode(Succ);
assert(SuccTN && "Unreachable successor found at reachable insertion");
const unsigned SuccLevel = SuccTN->getLevel();
DEBUG(dbgs() << "\tSuccessor " << BlockNamePrinter(Succ)
<< ", level = " << SuccLevel << "\n");
// Succ dominated by subtree From -- not affected.
// (Based on the lemma 2.5 from the second paper.)
if (SuccLevel > RootLevel) {
DEBUG(dbgs() << "\t\tDominated by subtree From\n");
if (II.Visited.count(SuccTN) != 0) continue;
DEBUG(dbgs() << "\t\tMarking visited not affected "
<< BlockNamePrinter(Succ) << "\n");
II.Visited.insert(SuccTN);
II.VisitedNotAffectedQueue.push_back(SuccTN);
VisitInsertion(DT, SuccTN, RootLevel, NCD, II);
} else if ((SuccLevel > NCDLevel + 1) && II.Affected.count(SuccTN) == 0) {
DEBUG(dbgs() << "\t\tMarking affected and adding "
<< BlockNamePrinter(Succ) << " to a Bucket\n");
II.Affected.insert(SuccTN);
II.Bucket.push({SuccLevel, SuccTN});
}
}
}
// Updates immediate dominators and levels after insertion.
static void UpdateInsertion(DomTreeT &DT, const TreeNodePtr NCD,
InsertionInfo &II) {
DEBUG(dbgs() << "Updating NCD = " << BlockNamePrinter(NCD) << "\n");
for (const TreeNodePtr TN : II.AffectedQueue) {
DEBUG(dbgs() << "\tIDom(" << BlockNamePrinter(TN)
<< ") = " << BlockNamePrinter(NCD) << "\n");
TN->setIDom(NCD);
}
UpdateLevelsAfterInsertion(II);
}
static void UpdateLevelsAfterInsertion(InsertionInfo &II) {
DEBUG(dbgs() << "Updating levels for visited but not affected nodes\n");
for (const TreeNodePtr TN : II.VisitedNotAffectedQueue) {
DEBUG(dbgs() << "\tlevel(" << BlockNamePrinter(TN) << ") = ("
<< BlockNamePrinter(TN->getIDom()) << ") "
<< TN->getIDom()->getLevel() << " + 1\n");
TN->UpdateLevel();
}
}
// Handles insertion to previously unreachable nodes.
static void InsertUnreachable(DomTreeT &DT, const TreeNodePtr From,
const NodePtr To) {
DEBUG(dbgs() << "Inserting " << BlockNamePrinter(From)
<< " -> (unreachable) " << BlockNamePrinter(To) << "\n");
// Collect discovered edges to already reachable nodes.
SmallVector<std::pair<NodePtr, TreeNodePtr>, 8> DiscoveredEdgesToReachable;
// Discover and connect nodes that became reachable with the insertion.
ComputeUnreachableDominators(DT, To, From, DiscoveredEdgesToReachable);
DEBUG(dbgs() << "Inserted " << BlockNamePrinter(From)
<< " -> (prev unreachable) " << BlockNamePrinter(To) << "\n");
DEBUG(DT.print(dbgs()));
// Used the discovered edges and inset discovered connecting (incoming)
// edges.
for (const auto &Edge : DiscoveredEdgesToReachable) {
DEBUG(dbgs() << "\tInserting discovered connecting edge "
<< BlockNamePrinter(Edge.first) << " -> "
<< BlockNamePrinter(Edge.second) << "\n");
InsertReachable(DT, DT.getNode(Edge.first), Edge.second);
}
}
// Connects nodes that become reachable with an insertion.
static void ComputeUnreachableDominators(
DomTreeT &DT, const NodePtr Root, const TreeNodePtr Incoming,
SmallVectorImpl<std::pair<NodePtr, TreeNodePtr>>
&DiscoveredConnectingEdges) {
assert(!DT.getNode(Root) && "Root must not be reachable");
// Visit only previously unreachable nodes.
auto UnreachableDescender = [&DT, &DiscoveredConnectingEdges](NodePtr From,
NodePtr To) {
const TreeNodePtr ToTN = DT.getNode(To);
if (!ToTN) return true;
DiscoveredConnectingEdges.push_back({From, ToTN});
return false;
};
SemiNCAInfo SNCA;
SNCA.runDFS<IsPostDom>(Root, 0, UnreachableDescender, 0);
SNCA.runSemiNCA(DT);
SNCA.attachNewSubtree(DT, Incoming);
DEBUG(dbgs() << "After adding unreachable nodes\n");
DEBUG(DT.print(dbgs()));
}
// Checks if the tree contains all reachable nodes in the input graph.
@ -298,12 +539,23 @@ struct SemiNCAInfo {
for (auto &NodeToTN : DT.DomTreeNodes) {
const TreeNodePtr TN = NodeToTN.second.get();
const NodePtr BB = TN->getBlock();
if (!BB) continue;
// Virtual root has a corresponding virtual CFG node.
if (DT.isVirtualRoot(TN)) continue;
if (NodeToInfo.count(BB) == 0) {
errs() << "DomTree node ";
PrintBlockOrNullptr(errs(), BB);
errs() << " not found by DFS walk!\n";
errs() << "DomTree node " << BlockNamePrinter(BB)
<< " not found by DFS walk!\n";
errs().flush();
return false;
}
}
for (const NodePtr N : NumToNode) {
if (N && !DT.getNode(N)) {
errs() << "CFG node " << BlockNamePrinter(N)
<< " not found in the DomTree!\n";
errs().flush();
return false;
@ -313,6 +565,215 @@ struct SemiNCAInfo {
return true;
}
static void DeleteEdge(DomTreeT &DT, const NodePtr From, const NodePtr To) {
assert(From && To && "Cannot disconnect nullptrs");
DEBUG(dbgs() << "Deleting edge " << BlockNamePrinter(From) << " -> "
<< BlockNamePrinter(To) << "\n");
#ifndef NDEBUG
// Ensure that the edge was in fact deleted from the CFG before informing
// the DomTree about it.
// The check is O(N), so run it only in debug configuration.
auto IsSuccessor = [](const NodePtr SuccCandidate, const NodePtr Of) {
auto Successors = ChildrenGetter<NodePtr, IsPostDom>::Get(Of);
return llvm::find(Successors, SuccCandidate) != Successors.end();
};
(void)IsSuccessor;
assert(!IsSuccessor(To, From) && "Deleted edge still exists in the CFG!");
#endif
const TreeNodePtr FromTN = DT.getNode(From);
// Deletion in an unreachable subtree -- nothing to do.
if (!FromTN) return;
const TreeNodePtr ToTN = DT.getNode(To);
assert(ToTN && "To already unreachable -- there is no edge to delete");
const NodePtr NCDBlock = DT.findNearestCommonDominator(From, To);
const TreeNodePtr NCD = DT.getNode(NCDBlock);
// To dominates From -- nothing to do.
if (ToTN == NCD) return;
const TreeNodePtr ToIDom = ToTN->getIDom();
DEBUG(dbgs() << "\tNCD " << BlockNamePrinter(NCD) << ", ToIDom "
<< BlockNamePrinter(ToIDom) << "\n");
// To remains reachable after deletion.
// (Based on the caption under Figure 4. from the second paper.)
if (FromTN != ToIDom || HasProperSupport(DT, ToTN))
DeleteReachable(DT, FromTN, ToTN);
else
DeleteUnreachable(DT, ToTN);
}
// Handles deletions that leave destination nodes reachable.
static void DeleteReachable(DomTreeT &DT, const TreeNodePtr FromTN,
const TreeNodePtr ToTN) {
DEBUG(dbgs() << "Deleting reachable " << BlockNamePrinter(FromTN) << " -> "
<< BlockNamePrinter(ToTN) << "\n");
DEBUG(dbgs() << "\tRebuilding subtree\n");
// Find the top of the subtree that needs to be rebuilt.
// (Based on the lemma 2.6 from the second paper.)
const NodePtr ToIDom =
DT.findNearestCommonDominator(FromTN->getBlock(), ToTN->getBlock());
assert(ToIDom || DT.isPostDominator());
const TreeNodePtr ToIDomTN = DT.getNode(ToIDom);
assert(ToIDomTN);
const TreeNodePtr PrevIDomSubTree = ToIDomTN->getIDom();
// Top of the subtree to rebuild is the root node. Rebuild the tree from
// scratch.
if (!PrevIDomSubTree) {
DEBUG(dbgs() << "The entire tree needs to be rebuilt\n");
DT.recalculate(*DT.Parent);
return;
}
// Only visit nodes in the subtree starting at To.
const unsigned Level = ToIDomTN->getLevel();
auto DescendBelow = [Level, &DT](NodePtr, NodePtr To) {
return DT.getNode(To)->getLevel() > Level;
};
DEBUG(dbgs() << "\tTop of subtree: " << BlockNamePrinter(ToIDomTN) << "\n");
SemiNCAInfo SNCA;
SNCA.runDFS<IsPostDom>(ToIDom, 0, DescendBelow, 0);
DEBUG(dbgs() << "\tRunning Semi-NCA\n");
SNCA.runSemiNCA(DT, Level);
SNCA.reattachExistingSubtree(DT, PrevIDomSubTree);
}
// Checks if a node has proper support, as defined on the page 3 and later
// explained on the page 7 of the second paper.
static bool HasProperSupport(DomTreeT &DT, const TreeNodePtr TN) {
DEBUG(dbgs() << "IsReachableFromIDom " << BlockNamePrinter(TN) << "\n");
for (const NodePtr Pred :
ChildrenGetter<NodePtr, !IsPostDom>::Get(TN->getBlock())) {
DEBUG(dbgs() << "\tPred " << BlockNamePrinter(Pred) << "\n");
if (!DT.getNode(Pred)) continue;
const NodePtr Support =
DT.findNearestCommonDominator(TN->getBlock(), Pred);
DEBUG(dbgs() << "\tSupport " << BlockNamePrinter(Support) << "\n");
if (Support != TN->getBlock()) {
DEBUG(dbgs() << "\t" << BlockNamePrinter(TN)
<< " is reachable from support "
<< BlockNamePrinter(Support) << "\n");
return true;
}
}
return false;
}
// Handle deletions that make destination node unreachable.
// (Based on the lemma 2.7 from the second paper.)
static void DeleteUnreachable(DomTreeT &DT, const TreeNodePtr ToTN) {
DEBUG(dbgs() << "Deleting unreachable subtree " << BlockNamePrinter(ToTN)
<< "\n");
assert(ToTN);
assert(ToTN->getBlock());
SmallVector<NodePtr, 16> AffectedQueue;
const unsigned Level = ToTN->getLevel();
// Traverse destination node's descendants with greater level in the tree
// and collect visited nodes.
auto DescendAndCollect = [Level, &AffectedQueue, &DT](NodePtr, NodePtr To) {
const TreeNodePtr TN = DT.getNode(To);
assert(TN);
if (TN->getLevel() > Level) return true;
if (llvm::find(AffectedQueue, To) == AffectedQueue.end())
AffectedQueue.push_back(To);
return false;
};
SemiNCAInfo SNCA;
unsigned LastDFSNum =
SNCA.runDFS<IsPostDom>(ToTN->getBlock(), 0, DescendAndCollect, 0);
TreeNodePtr MinNode = ToTN;
// Identify the top of the subtree to rebuilt by finding the NCD of all
// the affected nodes.
for (const NodePtr N : AffectedQueue) {
const TreeNodePtr TN = DT.getNode(N);
const NodePtr NCDBlock =
DT.findNearestCommonDominator(TN->getBlock(), ToTN->getBlock());
assert(NCDBlock || DT.isPostDominator());
const TreeNodePtr NCD = DT.getNode(NCDBlock);
assert(NCD);
DEBUG(dbgs() << "Processing affected node " << BlockNamePrinter(TN)
<< " with NCD = " << BlockNamePrinter(NCD)
<< ", MinNode =" << BlockNamePrinter(MinNode) << "\n");
if (NCD != TN && NCD->getLevel() < MinNode->getLevel()) MinNode = NCD;
}
// Root reached, rebuild the whole tree from scratch.
if (!MinNode->getIDom()) {
DEBUG(dbgs() << "The entire tree needs to be rebuilt\n");
DT.recalculate(*DT.Parent);
return;
}
// Erase the unreachable subtree in reverse preorder to process all children
// before deleting their parent.
for (unsigned i = LastDFSNum; i > 0; --i) {
const NodePtr N = SNCA.NumToNode[i];
const TreeNodePtr TN = DT.getNode(N);
DEBUG(dbgs() << "Erasing node " << BlockNamePrinter(TN) << "\n");
EraseNode(DT, TN);
}
// The affected subtree start at the To node -- there's no extra work to do.
if (MinNode == ToTN) return;
DEBUG(dbgs() << "DeleteUnreachable: running DFS with MinNode = "
<< BlockNamePrinter(MinNode) << "\n");
const unsigned MinLevel = MinNode->getLevel();
const TreeNodePtr PrevIDom = MinNode->getIDom();
assert(PrevIDom);
SNCA.clear();
// Identify nodes that remain in the affected subtree.
auto DescendBelow = [MinLevel, &DT](NodePtr, NodePtr To) {
const TreeNodePtr ToTN = DT.getNode(To);
return ToTN && ToTN->getLevel() > MinLevel;
};
SNCA.runDFS<IsPostDom>(MinNode->getBlock(), 0, DescendBelow, 0);
DEBUG(dbgs() << "Previous IDom(MinNode) = " << BlockNamePrinter(PrevIDom)
<< "\nRunning Semi-NCA\n");
// Rebuild the remaining part of affected subtree.
SNCA.runSemiNCA(DT, MinLevel);
SNCA.reattachExistingSubtree(DT, PrevIDom);
}
// Removes leaf tree nodes from the dominator tree.
static void EraseNode(DomTreeT &DT, const TreeNodePtr TN) {
assert(TN);
assert(TN->getNumChildren() == 0 && "Not a tree leaf");
const TreeNodePtr IDom = TN->getIDom();
assert(IDom);
auto ChIt = llvm::find(IDom->Children, TN);
assert(ChIt != IDom->Children.end());
std::swap(*ChIt, IDom->Children.back());
IDom->Children.pop_back();
DT.DomTreeNodes.erase(TN->getBlock());
}
//~~
//===--------------- DomTree correctness verification ---------------------===
//~~
// Check if for every parent with a level L in the tree all of its children
// have level L + 1.
static bool VerifyLevels(const DomTreeT &DT) {
@ -323,20 +784,18 @@ struct SemiNCAInfo {
const TreeNodePtr IDom = TN->getIDom();
if (!IDom && TN->getLevel() != 0) {
errs() << "Node without an IDom ";
PrintBlockOrNullptr(errs(), BB);
errs() << " has a nonzero level " << TN->getLevel() << "!\n";
errs() << "Node without an IDom " << BlockNamePrinter(BB)
<< " has a nonzero level " << TN->getLevel() << "!\n";
errs().flush();
return false;
}
if (IDom && TN->getLevel() != IDom->getLevel() + 1) {
errs() << "Node ";
PrintBlockOrNullptr(errs(), BB);
errs() << " has level " << TN->getLevel() << " while it's IDom ";
PrintBlockOrNullptr(errs(), IDom->getBlock());
errs() << " has level " << IDom->getLevel() << "!\n";
errs() << "Node " << BlockNamePrinter(BB) << " has level "
<< TN->getLevel() << " while its IDom "
<< BlockNamePrinter(IDom->getBlock()) << " has level "
<< IDom->getLevel() << "!\n";
errs().flush();
return false;
@ -363,18 +822,14 @@ struct SemiNCAInfo {
assert(ToTN);
const NodePtr NCD = DT.findNearestCommonDominator(From, To);
const TreeNodePtr NCDTN = NCD ? DT.getNode(NCD) : nullptr;
const TreeNodePtr NCDTN = DT.getNode(NCD);
const TreeNodePtr ToIDom = ToTN->getIDom();
if (NCDTN != ToTN && NCDTN != ToIDom) {
errs() << "NearestCommonDominator verification failed:\n\tNCD(From:";
PrintBlockOrNullptr(errs(), From);
errs() << ", To:";
PrintBlockOrNullptr(errs(), To);
errs() << ") = ";
PrintBlockOrNullptr(errs(), NCD);
errs() << ",\t (should be To or IDom[To]: ";
PrintBlockOrNullptr(errs(), ToIDom ? ToIDom->getBlock() : nullptr);
errs() << ")\n";
errs() << "NearestCommonDominator verification failed:\n\tNCD(From:"
<< BlockNamePrinter(From) << ", To:" << BlockNamePrinter(To)
<< ") = " << BlockNamePrinter(NCD)
<< ",\t (should be To or IDom[To]: " << BlockNamePrinter(ToIDom)
<< ")\n";
errs().flush();
return false;
@ -440,11 +895,9 @@ struct SemiNCAInfo {
for (TreeNodePtr Child : TN->getChildren())
if (NodeToInfo.count(Child->getBlock()) != 0) {
errs() << "Child ";
PrintBlockOrNullptr(errs(), Child->getBlock());
errs() << " reachable after its parent ";
PrintBlockOrNullptr(errs(), BB);
errs() << " is removed!\n";
errs() << "Child " << BlockNamePrinter(Child)
<< " reachable after its parent " << BlockNamePrinter(BB)
<< " is removed!\n";
errs().flush();
return false;
@ -477,11 +930,9 @@ struct SemiNCAInfo {
if (S == N) continue;
if (NodeToInfo.count(S->getBlock()) == 0) {
errs() << "Node ";
PrintBlockOrNullptr(errs(), S->getBlock());
errs() << " not reachable when its sibling ";
PrintBlockOrNullptr(errs(), N->getBlock());
errs() << " is removed!\n";
errs() << "Node " << BlockNamePrinter(S)
<< " not reachable when its sibling " << BlockNamePrinter(N)
<< " is removed!\n";
errs().flush();
return false;
@ -494,23 +945,30 @@ struct SemiNCAInfo {
}
};
template <class FuncT, class NodeT>
void Calculate(DominatorTreeBaseByGraphTraits<GraphTraits<NodeT>> &DT,
FuncT &F) {
using NodePtr = typename GraphTraits<NodeT>::NodeRef;
static_assert(std::is_pointer<NodePtr>::value,
"NodePtr should be a pointer type");
SemiNCAInfo<typename std::remove_pointer<NodePtr>::type> SNCA;
SNCA.template runSemiNCA<NodeT>(DT, GraphTraits<FuncT *>::size(&F));
template <class DomTreeT, class FuncT>
void Calculate(DomTreeT &DT, FuncT &F) {
SemiNCAInfo<DomTreeT> SNCA;
SNCA.calculateFromScratch(DT, GraphTraits<FuncT *>::size(&F));
}
template <class NodeT>
bool Verify(const DominatorTreeBaseByGraphTraits<GraphTraits<NodeT>> &DT) {
using NodePtr = typename GraphTraits<NodeT>::NodeRef;
static_assert(std::is_pointer<NodePtr>::value,
"NodePtr should be a pointer type");
SemiNCAInfo<typename std::remove_pointer<NodePtr>::type> SNCA;
template <class DomTreeT>
void InsertEdge(DomTreeT &DT, typename DomTreeT::NodePtr From,
typename DomTreeT::NodePtr To) {
if (DT.isPostDominator()) std::swap(From, To);
SemiNCAInfo<DomTreeT>::InsertEdge(DT, From, To);
}
template <class DomTreeT>
void DeleteEdge(DomTreeT &DT, typename DomTreeT::NodePtr From,
typename DomTreeT::NodePtr To) {
if (DT.isPostDominator()) std::swap(From, To);
SemiNCAInfo<DomTreeT>::DeleteEdge(DT, From, To);
}
template <class DomTreeT>
bool Verify(const DomTreeT &DT) {
SemiNCAInfo<DomTreeT> SNCA;
return SNCA.verifyReachability(DT) && SNCA.VerifyLevels(DT) &&
SNCA.verifyNCD(DT) && SNCA.verifyParentProperty(DT) &&
SNCA.verifySiblingProperty(DT);
@ -519,4 +977,6 @@ bool Verify(const DominatorTreeBaseByGraphTraits<GraphTraits<NodeT>> &DT) {
} // namespace DomTreeBuilder
} // namespace llvm
#undef DEBUG_TYPE
#endif

View File

@ -85,6 +85,7 @@ enum ArchExtKind : unsigned {
AEK_DSP = 0x400,
AEK_FP16 = 0x800,
AEK_RAS = 0x1000,
AEK_SVE = 0x2000,
// Unsupported extensions.
AEK_OS = 0x8000000,
AEK_IWMMXT = 0x10000000,
@ -166,7 +167,8 @@ enum ArchExtKind : unsigned {
AEK_FP16 = 0x20,
AEK_PROFILE = 0x40,
AEK_RAS = 0x80,
AEK_LSE = 0x100
AEK_LSE = 0x100,
AEK_SVE = 0x200
};
StringRef getCanonicalArchName(StringRef Arch);

View File

@ -1114,6 +1114,10 @@ class Input : public IO {
void *Ctxt = nullptr,
SourceMgr::DiagHandlerTy DiagHandler = nullptr,
void *DiagHandlerCtxt = nullptr);
Input(MemoryBufferRef Input,
void *Ctxt = nullptr,
SourceMgr::DiagHandlerTy DiagHandler = nullptr,
void *DiagHandlerCtxt = nullptr);
~Input() override;
// Check if there was an syntax or semantic error during parsing.

View File

@ -58,6 +58,7 @@ def : GINodeEquiv<G_SITOFP, sint_to_fp>;
def : GINodeEquiv<G_UITOFP, uint_to_fp>;
def : GINodeEquiv<G_FADD, fadd>;
def : GINodeEquiv<G_FSUB, fsub>;
def : GINodeEquiv<G_FMA, fma>;
def : GINodeEquiv<G_FMUL, fmul>;
def : GINodeEquiv<G_FDIV, fdiv>;
def : GINodeEquiv<G_FREM, frem>;

View File

@ -2012,6 +2012,35 @@ class TargetLoweringBase {
return isExtFreeImpl(I);
}
/// Return true if \p Load and \p Ext can form an ExtLoad.
/// For example, in AArch64
/// %L = load i8, i8* %ptr
/// %E = zext i8 %L to i32
/// can be lowered into one load instruction
/// ldrb w0, [x0]
bool isExtLoad(const LoadInst *Load, const Instruction *Ext,
const DataLayout &DL) const {
EVT VT = getValueType(DL, Ext->getType());
EVT LoadVT = getValueType(DL, Load->getType());
// If the load has other users and the truncate is not free, the ext
// probably isn't free.
if (!Load->hasOneUse() && (isTypeLegal(LoadVT) || !isTypeLegal(VT)) &&
!isTruncateFree(Ext->getType(), Load->getType()))
return false;
// Check whether the target supports casts folded into loads.
unsigned LType;
if (isa<ZExtInst>(Ext))
LType = ISD::ZEXTLOAD;
else {
assert(isa<SExtInst>(Ext) && "Unexpected ext type!");
LType = ISD::SEXTLOAD;
}
return isLoadExtLegal(LType, VT, LoadVT);
}
/// Return true if any actual instruction that defines a value of type FromTy
/// implicitly zero-extends the value to ToTy in the result register.
///

View File

@ -0,0 +1,24 @@
//===- DlltoolDriver.h - dlltool.exe-compatible driver ----------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// Defines an interface to a dlltool.exe-compatible driver.
// Used by llvm-dlltool.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_TOOLDRIVERS_LLVM_DLLTOOL_DLLTOOLDRIVER_H
#define LLVM_TOOLDRIVERS_LLVM_DLLTOOL_DLLTOOLDRIVER_H
namespace llvm {
template <typename T> class ArrayRef;
int dlltoolDriverMain(ArrayRef<const char *> ArgsArr);
} // namespace llvm
#endif

View File

@ -433,7 +433,7 @@ LazyCallGraph::SCC &llvm::updateCGAndAnalysisManagerForFunctionPass(
if (Visited.insert(C).second)
Worklist.push_back(C);
LazyCallGraph::visitReferences(Worklist, Visited, [&](Function &Referee) {
auto VisitRef = [&](Function &Referee) {
Node &RefereeN = *G.lookup(Referee);
Edge *E = N->lookup(RefereeN);
// FIXME: Similarly to new calls, we also currently preclude
@ -444,7 +444,12 @@ LazyCallGraph::SCC &llvm::updateCGAndAnalysisManagerForFunctionPass(
RetainedEdges.insert(&RefereeN);
if (E->isCall())
DemotedCallTargets.insert(&RefereeN);
});
};
LazyCallGraph::visitReferences(Worklist, Visited, VisitRef);
// Include synthetic reference edges to known, defined lib functions.
for (auto *F : G.getLibFunctions())
VisitRef(*F);
// First remove all of the edges that are no longer present in this function.
// We have to build a list of dead targets first and then remove them as the

View File

@ -14,7 +14,8 @@
using namespace llvm;
namespace llvm {
template class DominanceFrontierBase<BasicBlock>;
template class DominanceFrontierBase<BasicBlock, false>;
template class DominanceFrontierBase<BasicBlock, true>;
template class ForwardDominanceFrontierBase<BasicBlock>;
}

View File

@ -26,7 +26,6 @@ using namespace llvm;
STATISTIC(TotalInsts , "Number of instructions (of all types)");
STATISTIC(TotalBlocks, "Number of basic blocks");
STATISTIC(TotalFuncs , "Number of non-external functions");
STATISTIC(TotalMemInst, "Number of memory instructions");
#define HANDLE_INST(N, OPCODE, CLASS) \
STATISTIC(Num ## OPCODE ## Inst, "Number of " #OPCODE " insts");
@ -75,13 +74,6 @@ FunctionPass *llvm::createInstCountPass() { return new InstCount(); }
// function.
//
bool InstCount::runOnFunction(Function &F) {
unsigned StartMemInsts =
NumGetElementPtrInst + NumLoadInst + NumStoreInst + NumCallInst +
NumInvokeInst + NumAllocaInst;
visit(F);
unsigned EndMemInsts =
NumGetElementPtrInst + NumLoadInst + NumStoreInst + NumCallInst +
NumInvokeInst + NumAllocaInst;
TotalMemInst += EndMemInsts-StartMemInsts;
return false;
}

View File

@ -1745,14 +1745,11 @@ static Value *SimplifyAndInst(Value *Op0, Value *Op1, const SimplifyQuery &Q,
return Constant::getNullValue(Op0->getType());
// (A | ?) & A = A
Value *A = nullptr, *B = nullptr;
if (match(Op0, m_Or(m_Value(A), m_Value(B))) &&
(A == Op1 || B == Op1))
if (match(Op0, m_c_Or(m_Specific(Op1), m_Value())))
return Op1;
// A & (A | ?) = A
if (match(Op1, m_Or(m_Value(A), m_Value(B))) &&
(A == Op0 || B == Op0))
if (match(Op1, m_c_Or(m_Specific(Op0), m_Value())))
return Op0;
// A mask that only clears known zeros of a shifted value is a no-op.
@ -1852,26 +1849,22 @@ static Value *SimplifyOrInst(Value *Op0, Value *Op1, const SimplifyQuery &Q,
return Constant::getAllOnesValue(Op0->getType());
// (A & ?) | A = A
Value *A = nullptr, *B = nullptr;
if (match(Op0, m_And(m_Value(A), m_Value(B))) &&
(A == Op1 || B == Op1))
if (match(Op0, m_c_And(m_Specific(Op1), m_Value())))
return Op1;
// A | (A & ?) = A
if (match(Op1, m_And(m_Value(A), m_Value(B))) &&
(A == Op0 || B == Op0))
if (match(Op1, m_c_And(m_Specific(Op0), m_Value())))
return Op0;
// ~(A & ?) | A = -1
if (match(Op0, m_Not(m_And(m_Value(A), m_Value(B)))) &&
(A == Op1 || B == Op1))
if (match(Op0, m_Not(m_c_And(m_Specific(Op1), m_Value()))))
return Constant::getAllOnesValue(Op1->getType());
// A | ~(A & ?) = -1
if (match(Op1, m_Not(m_And(m_Value(A), m_Value(B)))) &&
(A == Op0 || B == Op0))
if (match(Op1, m_Not(m_c_And(m_Specific(Op1), m_Value()))))
return Constant::getAllOnesValue(Op0->getType());
Value *A, *B;
// (A & ~B) | (A ^ B) -> (A ^ B)
// (~B & A) | (A ^ B) -> (A ^ B)
// (A & ~B) | (B ^ A) -> (B ^ A)

View File

@ -17,8 +17,8 @@
#include <queue>
namespace llvm {
template <class NodeTy>
void IDFCalculator<NodeTy>::calculate(
template <class NodeTy, bool IsPostDom>
void IDFCalculator<NodeTy, IsPostDom>::calculate(
SmallVectorImpl<BasicBlock *> &PHIBlocks) {
// Use a priority queue keyed on dominator tree level so that inserted nodes
// are handled from the bottom of the dominator tree upwards.
@ -88,6 +88,6 @@ void IDFCalculator<NodeTy>::calculate(
}
}
template class IDFCalculator<BasicBlock *>;
template class IDFCalculator<Inverse<BasicBlock *>>;
template class IDFCalculator<BasicBlock *, false>;
template class IDFCalculator<Inverse<BasicBlock *>, true>;
}

View File

@ -106,6 +106,13 @@ LazyCallGraph::EdgeSequence &LazyCallGraph::Node::populateSlow() {
LazyCallGraph::Edge::Ref);
});
// Add implicit reference edges to any defined libcall functions (if we
// haven't found an explicit edge).
for (auto *F : G->LibFunctions)
if (!Visited.count(F))
addEdge(Edges->Edges, Edges->EdgeIndexMap, G->get(*F),
LazyCallGraph::Edge::Ref);
return *Edges;
}
@ -120,15 +127,34 @@ LLVM_DUMP_METHOD void LazyCallGraph::Node::dump() const {
}
#endif
LazyCallGraph::LazyCallGraph(Module &M) {
static bool isKnownLibFunction(Function &F, TargetLibraryInfo &TLI) {
LibFunc LF;
// Either this is a normal library function or a "vectorizable" function.
return TLI.getLibFunc(F, LF) || TLI.isFunctionVectorizable(F.getName());
}
LazyCallGraph::LazyCallGraph(Module &M, TargetLibraryInfo &TLI) {
DEBUG(dbgs() << "Building CG for module: " << M.getModuleIdentifier()
<< "\n");
for (Function &F : M)
if (!F.isDeclaration() && !F.hasLocalLinkage()) {
DEBUG(dbgs() << " Adding '" << F.getName()
<< "' to entry set of the graph.\n");
addEdge(EntryEdges.Edges, EntryEdges.EdgeIndexMap, get(F), Edge::Ref);
}
for (Function &F : M) {
if (F.isDeclaration())
continue;
// If this function is a known lib function to LLVM then we want to
// synthesize reference edges to it to model the fact that LLVM can turn
// arbitrary code into a library function call.
if (isKnownLibFunction(F, TLI))
LibFunctions.insert(&F);
if (F.hasLocalLinkage())
continue;
// External linkage defined functions have edges to them from other
// modules.
DEBUG(dbgs() << " Adding '" << F.getName()
<< "' to entry set of the graph.\n");
addEdge(EntryEdges.Edges, EntryEdges.EdgeIndexMap, get(F), Edge::Ref);
}
// Now add entry nodes for functions reachable via initializers to globals.
SmallVector<Constant *, 16> Worklist;
@ -149,7 +175,8 @@ LazyCallGraph::LazyCallGraph(Module &M) {
LazyCallGraph::LazyCallGraph(LazyCallGraph &&G)
: BPA(std::move(G.BPA)), NodeMap(std::move(G.NodeMap)),
EntryEdges(std::move(G.EntryEdges)), SCCBPA(std::move(G.SCCBPA)),
SCCMap(std::move(G.SCCMap)), LeafRefSCCs(std::move(G.LeafRefSCCs)) {
SCCMap(std::move(G.SCCMap)), LeafRefSCCs(std::move(G.LeafRefSCCs)),
LibFunctions(std::move(G.LibFunctions)) {
updateGraphPtrs();
}
@ -160,6 +187,7 @@ LazyCallGraph &LazyCallGraph::operator=(LazyCallGraph &&G) {
SCCBPA = std::move(G.SCCBPA);
SCCMap = std::move(G.SCCMap);
LeafRefSCCs = std::move(G.LeafRefSCCs);
LibFunctions = std::move(G.LibFunctions);
updateGraphPtrs();
return *this;
}
@ -1580,6 +1608,11 @@ void LazyCallGraph::removeDeadFunction(Function &F) {
assert(F.use_empty() &&
"This routine should only be called on trivially dead functions!");
// We shouldn't remove library functions as they are never really dead while
// the call graph is in use -- every function definition refers to them.
assert(!isLibFunction(F) &&
"Must not remove lib functions from the call graph!");
auto NI = NodeMap.find(&F);
if (NI == NodeMap.end())
// Not in the graph at all!

View File

@ -609,7 +609,7 @@ Loop *UnloopUpdater::getNearestLoop(BasicBlock *BB, Loop *BBLoop) {
return NearLoop;
}
LoopInfo::LoopInfo(const DominatorTreeBase<BasicBlock> &DomTree) {
LoopInfo::LoopInfo(const DomTreeBase<BasicBlock> &DomTree) {
analyze(DomTree);
}

View File

@ -39,7 +39,6 @@
#include "llvm/IR/PatternMatch.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/FormattedStream.h"
#include "llvm/Transforms/Scalar.h"
#include <algorithm>
#define DEBUG_TYPE "memoryssa"

View File

@ -23,6 +23,8 @@ using namespace llvm;
#define DEBUG_TYPE "postdomtree"
template class llvm::DominatorTreeBase<BasicBlock, true>; // PostDomTreeBase
//===----------------------------------------------------------------------===//
// PostDominatorTree Implementation
//===----------------------------------------------------------------------===//

View File

@ -4173,6 +4173,319 @@ static Optional<BinaryOp> MatchBinaryOp(Value *V, DominatorTree &DT) {
return None;
}
/// Helper function to createAddRecFromPHIWithCasts. We have a phi
/// node whose symbolic (unknown) SCEV is \p SymbolicPHI, which is updated via
/// the loop backedge by a SCEVAddExpr, possibly also with a few casts on the
/// way. This function checks if \p Op, an operand of this SCEVAddExpr,
/// follows one of the following patterns:
/// Op == (SExt ix (Trunc iy (%SymbolicPHI) to ix) to iy)
/// Op == (ZExt ix (Trunc iy (%SymbolicPHI) to ix) to iy)
/// If the SCEV expression of \p Op conforms with one of the expected patterns
/// we return the type of the truncation operation, and indicate whether the
/// truncated type should be treated as signed/unsigned by setting
/// \p Signed to true/false, respectively.
static Type *isSimpleCastedPHI(const SCEV *Op, const SCEVUnknown *SymbolicPHI,
bool &Signed, ScalarEvolution &SE) {
// The case where Op == SymbolicPHI (that is, with no type conversions on
// the way) is handled by the regular add recurrence creating logic and
// would have already been triggered in createAddRecForPHI. Reaching it here
// means that createAddRecFromPHI had failed for this PHI before (e.g.,
// because one of the other operands of the SCEVAddExpr updating this PHI is
// not invariant).
//
// Here we look for the case where Op = (ext(trunc(SymbolicPHI))), and in
// this case predicates that allow us to prove that Op == SymbolicPHI will
// be added.
if (Op == SymbolicPHI)
return nullptr;
unsigned SourceBits = SE.getTypeSizeInBits(SymbolicPHI->getType());
unsigned NewBits = SE.getTypeSizeInBits(Op->getType());
if (SourceBits != NewBits)
return nullptr;
const SCEVSignExtendExpr *SExt = dyn_cast<SCEVSignExtendExpr>(Op);
const SCEVZeroExtendExpr *ZExt = dyn_cast<SCEVZeroExtendExpr>(Op);
if (!SExt && !ZExt)
return nullptr;
const SCEVTruncateExpr *Trunc =
SExt ? dyn_cast<SCEVTruncateExpr>(SExt->getOperand())
: dyn_cast<SCEVTruncateExpr>(ZExt->getOperand());
if (!Trunc)
return nullptr;
const SCEV *X = Trunc->getOperand();
if (X != SymbolicPHI)
return nullptr;
Signed = SExt ? true : false;
return Trunc->getType();
}
static const Loop *isIntegerLoopHeaderPHI(const PHINode *PN, LoopInfo &LI) {
if (!PN->getType()->isIntegerTy())
return nullptr;
const Loop *L = LI.getLoopFor(PN->getParent());
if (!L || L->getHeader() != PN->getParent())
return nullptr;
return L;
}
// Analyze \p SymbolicPHI, a SCEV expression of a phi node, and check if the
// computation that updates the phi follows the following pattern:
// (SExt/ZExt ix (Trunc iy (%SymbolicPHI) to ix) to iy) + InvariantAccum
// which correspond to a phi->trunc->sext/zext->add->phi update chain.
// If so, try to see if it can be rewritten as an AddRecExpr under some
// Predicates. If successful, return them as a pair. Also cache the results
// of the analysis.
//
// Example usage scenario:
// Say the Rewriter is called for the following SCEV:
// 8 * ((sext i32 (trunc i64 %X to i32) to i64) + %Step)
// where:
// %X = phi i64 (%Start, %BEValue)
// It will visitMul->visitAdd->visitSExt->visitTrunc->visitUnknown(%X),
// and call this function with %SymbolicPHI = %X.
//
// The analysis will find that the value coming around the backedge has
// the following SCEV:
// BEValue = ((sext i32 (trunc i64 %X to i32) to i64) + %Step)
// Upon concluding that this matches the desired pattern, the function
// will return the pair {NewAddRec, SmallPredsVec} where:
// NewAddRec = {%Start,+,%Step}
// SmallPredsVec = {P1, P2, P3} as follows:
// P1(WrapPred): AR: {trunc(%Start),+,(trunc %Step)}<nsw> Flags: <nssw>
// P2(EqualPred): %Start == (sext i32 (trunc i64 %Start to i32) to i64)
// P3(EqualPred): %Step == (sext i32 (trunc i64 %Step to i32) to i64)
// The returned pair means that SymbolicPHI can be rewritten into NewAddRec
// under the predicates {P1,P2,P3}.
// This predicated rewrite will be cached in PredicatedSCEVRewrites:
// PredicatedSCEVRewrites[{%X,L}] = {NewAddRec, {P1,P2,P3)}
//
// TODO's:
//
// 1) Extend the Induction descriptor to also support inductions that involve
// casts: When needed (namely, when we are called in the context of the
// vectorizer induction analysis), a Set of cast instructions will be
// populated by this method, and provided back to isInductionPHI. This is
// needed to allow the vectorizer to properly record them to be ignored by
// the cost model and to avoid vectorizing them (otherwise these casts,
// which are redundant under the runtime overflow checks, will be
// vectorized, which can be costly).
//
// 2) Support additional induction/PHISCEV patterns: We also want to support
// inductions where the sext-trunc / zext-trunc operations (partly) occur
// after the induction update operation (the induction increment):
//
// (Trunc iy (SExt/ZExt ix (%SymbolicPHI + InvariantAccum) to iy) to ix)
// which correspond to a phi->add->trunc->sext/zext->phi update chain.
//
// (Trunc iy ((SExt/ZExt ix (%SymbolicPhi) to iy) + InvariantAccum) to ix)
// which correspond to a phi->trunc->add->sext/zext->phi update chain.
//
// 3) Outline common code with createAddRecFromPHI to avoid duplication.
//
Optional<std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>>>
ScalarEvolution::createAddRecFromPHIWithCastsImpl(const SCEVUnknown *SymbolicPHI) {
SmallVector<const SCEVPredicate *, 3> Predicates;
// *** Part1: Analyze if we have a phi-with-cast pattern for which we can
// return an AddRec expression under some predicate.
auto *PN = cast<PHINode>(SymbolicPHI->getValue());
const Loop *L = isIntegerLoopHeaderPHI(PN, LI);
assert (L && "Expecting an integer loop header phi");
// The loop may have multiple entrances or multiple exits; we can analyze
// this phi as an addrec if it has a unique entry value and a unique
// backedge value.
Value *BEValueV = nullptr, *StartValueV = nullptr;
for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
Value *V = PN->getIncomingValue(i);
if (L->contains(PN->getIncomingBlock(i))) {
if (!BEValueV) {
BEValueV = V;
} else if (BEValueV != V) {
BEValueV = nullptr;
break;
}
} else if (!StartValueV) {
StartValueV = V;
} else if (StartValueV != V) {
StartValueV = nullptr;
break;
}
}
if (!BEValueV || !StartValueV)
return None;
const SCEV *BEValue = getSCEV(BEValueV);
// If the value coming around the backedge is an add with the symbolic
// value we just inserted, possibly with casts that we can ignore under
// an appropriate runtime guard, then we found a simple induction variable!
const auto *Add = dyn_cast<SCEVAddExpr>(BEValue);
if (!Add)
return None;
// If there is a single occurrence of the symbolic value, possibly
// casted, replace it with a recurrence.
unsigned FoundIndex = Add->getNumOperands();
Type *TruncTy = nullptr;
bool Signed;
for (unsigned i = 0, e = Add->getNumOperands(); i != e; ++i)
if ((TruncTy =
isSimpleCastedPHI(Add->getOperand(i), SymbolicPHI, Signed, *this)))
if (FoundIndex == e) {
FoundIndex = i;
break;
}
if (FoundIndex == Add->getNumOperands())
return None;
// Create an add with everything but the specified operand.
SmallVector<const SCEV *, 8> Ops;
for (unsigned i = 0, e = Add->getNumOperands(); i != e; ++i)
if (i != FoundIndex)
Ops.push_back(Add->getOperand(i));
const SCEV *Accum = getAddExpr(Ops);
// The runtime checks will not be valid if the step amount is
// varying inside the loop.
if (!isLoopInvariant(Accum, L))
return None;
// *** Part2: Create the predicates
// Analysis was successful: we have a phi-with-cast pattern for which we
// can return an AddRec expression under the following predicates:
//
// P1: A Wrap predicate that guarantees that Trunc(Start) + i*Trunc(Accum)
// fits within the truncated type (does not overflow) for i = 0 to n-1.
// P2: An Equal predicate that guarantees that
// Start = (Ext ix (Trunc iy (Start) to ix) to iy)
// P3: An Equal predicate that guarantees that
// Accum = (Ext ix (Trunc iy (Accum) to ix) to iy)
//
// As we next prove, the above predicates guarantee that:
// Start + i*Accum = (Ext ix (Trunc iy ( Start + i*Accum ) to ix) to iy)
//
//
// More formally, we want to prove that:
// Expr(i+1) = Start + (i+1) * Accum
// = (Ext ix (Trunc iy (Expr(i)) to ix) to iy) + Accum
//
// Given that:
// 1) Expr(0) = Start
// 2) Expr(1) = Start + Accum
// = (Ext ix (Trunc iy (Start) to ix) to iy) + Accum :: from P2
// 3) Induction hypothesis (step i):
// Expr(i) = (Ext ix (Trunc iy (Expr(i-1)) to ix) to iy) + Accum
//
// Proof:
// Expr(i+1) =
// = Start + (i+1)*Accum
// = (Start + i*Accum) + Accum
// = Expr(i) + Accum
// = (Ext ix (Trunc iy (Expr(i-1)) to ix) to iy) + Accum + Accum
// :: from step i
//
// = (Ext ix (Trunc iy (Start + (i-1)*Accum) to ix) to iy) + Accum + Accum
//
// = (Ext ix (Trunc iy (Start + (i-1)*Accum) to ix) to iy)
// + (Ext ix (Trunc iy (Accum) to ix) to iy)
// + Accum :: from P3
//
// = (Ext ix (Trunc iy ((Start + (i-1)*Accum) + Accum) to ix) to iy)
// + Accum :: from P1: Ext(x)+Ext(y)=>Ext(x+y)
//
// = (Ext ix (Trunc iy (Start + i*Accum) to ix) to iy) + Accum
// = (Ext ix (Trunc iy (Expr(i)) to ix) to iy) + Accum
//
// By induction, the same applies to all iterations 1<=i<n:
//
// Create a truncated addrec for which we will add a no overflow check (P1).
const SCEV *StartVal = getSCEV(StartValueV);
const SCEV *PHISCEV =
getAddRecExpr(getTruncateExpr(StartVal, TruncTy),
getTruncateExpr(Accum, TruncTy), L, SCEV::FlagAnyWrap);
const auto *AR = cast<SCEVAddRecExpr>(PHISCEV);
SCEVWrapPredicate::IncrementWrapFlags AddedFlags =
Signed ? SCEVWrapPredicate::IncrementNSSW
: SCEVWrapPredicate::IncrementNUSW;
const SCEVPredicate *AddRecPred = getWrapPredicate(AR, AddedFlags);
Predicates.push_back(AddRecPred);
// Create the Equal Predicates P2,P3:
auto AppendPredicate = [&](const SCEV *Expr) -> void {
assert (isLoopInvariant(Expr, L) && "Expr is expected to be invariant");
const SCEV *TruncatedExpr = getTruncateExpr(Expr, TruncTy);
const SCEV *ExtendedExpr =
Signed ? getSignExtendExpr(TruncatedExpr, Expr->getType())
: getZeroExtendExpr(TruncatedExpr, Expr->getType());
if (Expr != ExtendedExpr &&
!isKnownPredicate(ICmpInst::ICMP_EQ, Expr, ExtendedExpr)) {
const SCEVPredicate *Pred = getEqualPredicate(Expr, ExtendedExpr);
DEBUG (dbgs() << "Added Predicate: " << *Pred);
Predicates.push_back(Pred);
}
};
AppendPredicate(StartVal);
AppendPredicate(Accum);
// *** Part3: Predicates are ready. Now go ahead and create the new addrec in
// which the casts had been folded away. The caller can rewrite SymbolicPHI
// into NewAR if it will also add the runtime overflow checks specified in
// Predicates.
auto *NewAR = getAddRecExpr(StartVal, Accum, L, SCEV::FlagAnyWrap);
std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>> PredRewrite =
std::make_pair(NewAR, Predicates);
// Remember the result of the analysis for this SCEV at this locayyytion.
PredicatedSCEVRewrites[{SymbolicPHI, L}] = PredRewrite;
return PredRewrite;
}
Optional<std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>>>
ScalarEvolution::createAddRecFromPHIWithCasts(const SCEVUnknown *SymbolicPHI) {
auto *PN = cast<PHINode>(SymbolicPHI->getValue());
const Loop *L = isIntegerLoopHeaderPHI(PN, LI);
if (!L)
return None;
// Check to see if we already analyzed this PHI.
auto I = PredicatedSCEVRewrites.find({SymbolicPHI, L});
if (I != PredicatedSCEVRewrites.end()) {
std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>> Rewrite =
I->second;
// Analysis was done before and failed to create an AddRec:
if (Rewrite.first == SymbolicPHI)
return None;
// Analysis was done before and succeeded to create an AddRec under
// a predicate:
assert(isa<SCEVAddRecExpr>(Rewrite.first) && "Expected an AddRec");
assert(!(Rewrite.second).empty() && "Expected to find Predicates");
return Rewrite;
}
Optional<std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>>>
Rewrite = createAddRecFromPHIWithCastsImpl(SymbolicPHI);
// Record in the cache that the analysis failed
if (!Rewrite) {
SmallVector<const SCEVPredicate *, 3> Predicates;
PredicatedSCEVRewrites[{SymbolicPHI, L}] = {SymbolicPHI, Predicates};
return None;
}
return Rewrite;
}
/// A helper function for createAddRecFromPHI to handle simple cases.
///
/// This function tries to find an AddRec expression for the simplest (yet most
@ -5904,6 +6217,16 @@ void ScalarEvolution::forgetLoop(const Loop *L) {
RemoveLoopFromBackedgeMap(BackedgeTakenCounts);
RemoveLoopFromBackedgeMap(PredicatedBackedgeTakenCounts);
// Drop information about predicated SCEV rewrites for this loop.
for (auto I = PredicatedSCEVRewrites.begin();
I != PredicatedSCEVRewrites.end();) {
std::pair<const SCEV *, const Loop *> Entry = I->first;
if (Entry.second == L)
PredicatedSCEVRewrites.erase(I++);
else
++I;
}
// Drop information about expressions based on loop-header PHIs.
SmallVector<Instruction *, 16> Worklist;
PushLoopPHIs(L, Worklist);
@ -10062,6 +10385,7 @@ ScalarEvolution::ScalarEvolution(ScalarEvolution &&Arg)
UniqueSCEVs(std::move(Arg.UniqueSCEVs)),
UniquePreds(std::move(Arg.UniquePreds)),
SCEVAllocator(std::move(Arg.SCEVAllocator)),
PredicatedSCEVRewrites(std::move(Arg.PredicatedSCEVRewrites)),
FirstUnknown(Arg.FirstUnknown) {
Arg.FirstUnknown = nullptr;
}
@ -10462,6 +10786,15 @@ void ScalarEvolution::forgetMemoizedResults(const SCEV *S) {
HasRecMap.erase(S);
MinTrailingZerosCache.erase(S);
for (auto I = PredicatedSCEVRewrites.begin();
I != PredicatedSCEVRewrites.end();) {
std::pair<const SCEV *, const Loop *> Entry = I->first;
if (Entry.first == S)
PredicatedSCEVRewrites.erase(I++);
else
++I;
}
auto RemoveSCEVFromBackedgeMap =
[S, this](DenseMap<const Loop *, BackedgeTakenInfo> &Map) {
for (auto I = Map.begin(), E = Map.end(); I != E;) {
@ -10621,10 +10954,11 @@ void ScalarEvolutionWrapperPass::getAnalysisUsage(AnalysisUsage &AU) const {
AU.addRequiredTransitive<TargetLibraryInfoWrapperPass>();
}
const SCEVPredicate *
ScalarEvolution::getEqualPredicate(const SCEVUnknown *LHS,
const SCEVConstant *RHS) {
const SCEVPredicate *ScalarEvolution::getEqualPredicate(const SCEV *LHS,
const SCEV *RHS) {
FoldingSetNodeID ID;
assert(LHS->getType() == RHS->getType() &&
"Type mismatch between LHS and RHS");
// Unique this node based on the arguments
ID.AddInteger(SCEVPredicate::P_Equal);
ID.AddPointer(LHS);
@ -10687,8 +11021,7 @@ class SCEVPredicateRewriter : public SCEVRewriteVisitor<SCEVPredicateRewriter> {
if (IPred->getLHS() == Expr)
return IPred->getRHS();
}
return Expr;
return convertToAddRecWithPreds(Expr);
}
const SCEV *visitZeroExtendExpr(const SCEVZeroExtendExpr *Expr) {
@ -10724,17 +11057,41 @@ class SCEVPredicateRewriter : public SCEVRewriteVisitor<SCEVPredicateRewriter> {
}
private:
bool addOverflowAssumption(const SCEVAddRecExpr *AR,
SCEVWrapPredicate::IncrementWrapFlags AddedFlags) {
auto *A = SE.getWrapPredicate(AR, AddedFlags);
bool addOverflowAssumption(const SCEVPredicate *P) {
if (!NewPreds) {
// Check if we've already made this assumption.
return Pred && Pred->implies(A);
return Pred && Pred->implies(P);
}
NewPreds->insert(A);
NewPreds->insert(P);
return true;
}
bool addOverflowAssumption(const SCEVAddRecExpr *AR,
SCEVWrapPredicate::IncrementWrapFlags AddedFlags) {
auto *A = SE.getWrapPredicate(AR, AddedFlags);
return addOverflowAssumption(A);
}
// If \p Expr represents a PHINode, we try to see if it can be represented
// as an AddRec, possibly under a predicate (PHISCEVPred). If it is possible
// to add this predicate as a runtime overflow check, we return the AddRec.
// If \p Expr does not meet these conditions (is not a PHI node, or we
// couldn't create an AddRec for it, or couldn't add the predicate), we just
// return \p Expr.
const SCEV *convertToAddRecWithPreds(const SCEVUnknown *Expr) {
if (!isa<PHINode>(Expr->getValue()))
return Expr;
Optional<std::pair<const SCEV *, SmallVector<const SCEVPredicate *, 3>>>
PredicatedRewrite = SE.createAddRecFromPHIWithCasts(Expr);
if (!PredicatedRewrite)
return Expr;
for (auto *P : PredicatedRewrite->second){
if (!addOverflowAssumption(P))
return Expr;
}
return PredicatedRewrite->first;
}
SmallPtrSetImpl<const SCEVPredicate *> *NewPreds;
SCEVUnionPredicate *Pred;
const Loop *L;
@ -10771,9 +11128,11 @@ SCEVPredicate::SCEVPredicate(const FoldingSetNodeIDRef ID,
: FastID(ID), Kind(Kind) {}
SCEVEqualPredicate::SCEVEqualPredicate(const FoldingSetNodeIDRef ID,
const SCEVUnknown *LHS,
const SCEVConstant *RHS)
: SCEVPredicate(ID, P_Equal), LHS(LHS), RHS(RHS) {}
const SCEV *LHS, const SCEV *RHS)
: SCEVPredicate(ID, P_Equal), LHS(LHS), RHS(RHS) {
assert(LHS->getType() == RHS->getType() && "LHS and RHS types don't match");
assert(LHS != RHS && "LHS and RHS are the same SCEV");
}
bool SCEVEqualPredicate::implies(const SCEVPredicate *N) const {
const auto *Op = dyn_cast<SCEVEqualPredicate>(N);

View File

@ -82,6 +82,11 @@ int TargetTransformInfo::getGEPCost(Type *PointeeType, const Value *Ptr,
return TTIImpl->getGEPCost(PointeeType, Ptr, Operands);
}
int TargetTransformInfo::getExtCost(const Instruction *I,
const Value *Src) const {
return TTIImpl->getExtCost(I, Src);
}
int TargetTransformInfo::getIntrinsicCost(
Intrinsic::ID IID, Type *RetTy, ArrayRef<const Value *> Arguments) const {
int Cost = TTIImpl->getIntrinsicCost(IID, RetTy, Arguments);

View File

@ -588,7 +588,7 @@ lltok::Kind LLLexer::LexIdentifier() {
KEYWORD(spir_func);
KEYWORD(intel_ocl_bicc);
KEYWORD(x86_64_sysvcc);
KEYWORD(x86_64_win64cc);
KEYWORD(win64cc);
KEYWORD(x86_regcallcc);
KEYWORD(webkit_jscc);
KEYWORD(swiftcc);

View File

@ -1670,7 +1670,7 @@ void LLParser::ParseOptionalDLLStorageClass(unsigned &Res) {
/// ::= 'spir_func'
/// ::= 'spir_kernel'
/// ::= 'x86_64_sysvcc'
/// ::= 'x86_64_win64cc'
/// ::= 'win64cc'
/// ::= 'webkit_jscc'
/// ::= 'anyregcc'
/// ::= 'preserve_mostcc'
@ -1712,7 +1712,7 @@ bool LLParser::ParseOptionalCallingConv(unsigned &CC) {
case lltok::kw_spir_func: CC = CallingConv::SPIR_FUNC; break;
case lltok::kw_intel_ocl_bicc: CC = CallingConv::Intel_OCL_BI; break;
case lltok::kw_x86_64_sysvcc: CC = CallingConv::X86_64_SysV; break;
case lltok::kw_x86_64_win64cc: CC = CallingConv::X86_64_Win64; break;
case lltok::kw_win64cc: CC = CallingConv::Win64; break;
case lltok::kw_webkit_jscc: CC = CallingConv::WebKit_JS; break;
case lltok::kw_anyregcc: CC = CallingConv::AnyReg; break;
case lltok::kw_preserve_mostcc:CC = CallingConv::PreserveMost; break;
@ -4411,13 +4411,15 @@ bool LLParser::ParseDIImportedEntity(MDNode *&Result, bool IsDistinct) {
REQUIRED(tag, DwarfTagField, ); \
REQUIRED(scope, MDField, ); \
OPTIONAL(entity, MDField, ); \
OPTIONAL(file, MDField, ); \
OPTIONAL(line, LineField, ); \
OPTIONAL(name, MDStringField, );
PARSE_MD_FIELDS();
#undef VISIT_MD_FIELDS
Result = GET_OR_DISTINCT(DIImportedEntity, (Context, tag.Val, scope.Val,
entity.Val, line.Val, name.Val));
Result = GET_OR_DISTINCT(
DIImportedEntity,
(Context, tag.Val, scope.Val, entity.Val, file.Val, line.Val, name.Val));
return false;
}

View File

@ -141,7 +141,7 @@ enum Kind {
kw_spir_kernel,
kw_spir_func,
kw_x86_64_sysvcc,
kw_x86_64_win64cc,
kw_win64cc,
kw_webkit_jscc,
kw_anyregcc,
kw_swiftcc,

View File

@ -1671,15 +1671,17 @@ Error MetadataLoader::MetadataLoaderImpl::parseOneMetadata(
break;
}
case bitc::METADATA_IMPORTED_ENTITY: {
if (Record.size() != 6)
if (Record.size() != 6 && Record.size() != 7)
return error("Invalid record");
IsDistinct = Record[0];
bool HasFile = (Record.size() == 7);
MetadataList.assignValue(
GET_OR_DISTINCT(DIImportedEntity,
(Context, Record[1], getMDOrNull(Record[2]),
getDITypeRefOrNull(Record[3]), Record[4],
getMDString(Record[5]))),
getDITypeRefOrNull(Record[3]),
HasFile ? getMDOrNull(Record[6]) : nullptr,
HasFile ? Record[4] : 0, getMDString(Record[5]))),
NextMetadataNo);
NextMetadataNo++;
break;

View File

@ -1718,6 +1718,7 @@ void ModuleBitcodeWriter::writeDIImportedEntity(
Record.push_back(VE.getMetadataOrNullID(N->getEntity()));
Record.push_back(N->getLine());
Record.push_back(VE.getMetadataOrNullID(N->getRawName()));
Record.push_back(VE.getMetadataOrNullID(N->getRawFile()));
Stream.EmitRecord(bitc::METADATA_IMPORTED_ENTITY, Record, Abbrev);
Record.clear();

View File

@ -664,8 +664,9 @@ DIE *DwarfCompileUnit::constructImportedEntityDIE(
else
EntityDie = getDIE(Entity);
assert(EntityDie);
addSourceLine(*IMDie, Module->getLine(), Module->getScope()->getFilename(),
Module->getScope()->getDirectory());
auto *File = Module->getFile();
addSourceLine(*IMDie, Module->getLine(), File ? File->getFilename() : "",
File ? File->getDirectory() : "");
addDIEEntry(*IMDie, dwarf::DW_AT_import, *EntityDie);
StringRef Name = Module->getName();
if (!Name.empty())

View File

@ -4267,9 +4267,7 @@ bool CodeGenPrepare::optimizeMemoryInst(Instruction *MemoryInst, Value *Addr,
// Use a worklist to iteratively look through PHI nodes, and ensure that
// the addressing mode obtained from the non-PHI roots of the graph
// are equivalent.
Value *Consensus = nullptr;
unsigned NumUsesConsensus = 0;
bool IsNumUsesConsensusValid = false;
bool AddrModeFound = false;
bool PhiSeen = false;
SmallVector<Instruction*, 16> AddrModeInsts;
ExtAddrMode AddrMode;
@ -4280,11 +4278,17 @@ bool CodeGenPrepare::optimizeMemoryInst(Instruction *MemoryInst, Value *Addr,
Value *V = worklist.back();
worklist.pop_back();
// Break use-def graph loops.
if (!Visited.insert(V).second) {
Consensus = nullptr;
break;
}
// We allow traversing cyclic Phi nodes.
// In case of success after this loop we ensure that traversing through
// Phi nodes ends up with all cases to compute address of the form
// BaseGV + Base + Scale * Index + Offset
// where Scale and Offset are constans and BaseGV, Base and Index
// are exactly the same Values in all cases.
// It means that BaseGV, Scale and Offset dominate our memory instruction
// and have the same value as they had in address computation represented
// as Phi. So we can safely sink address computation to memory instruction.
if (!Visited.insert(V).second)
continue;
// For a PHI node, push all of its incoming values.
if (PHINode *P = dyn_cast<PHINode>(V)) {
@ -4297,47 +4301,26 @@ bool CodeGenPrepare::optimizeMemoryInst(Instruction *MemoryInst, Value *Addr,
// For non-PHIs, determine the addressing mode being computed. Note that
// the result may differ depending on what other uses our candidate
// addressing instructions might have.
SmallVector<Instruction*, 16> NewAddrModeInsts;
AddrModeInsts.clear();
ExtAddrMode NewAddrMode = AddressingModeMatcher::Match(
V, AccessTy, AddrSpace, MemoryInst, NewAddrModeInsts, *TLI, *TRI,
InsertedInsts, PromotedInsts, TPT);
V, AccessTy, AddrSpace, MemoryInst, AddrModeInsts, *TLI, *TRI,
InsertedInsts, PromotedInsts, TPT);
// This check is broken into two cases with very similar code to avoid using
// getNumUses() as much as possible. Some values have a lot of uses, so
// calling getNumUses() unconditionally caused a significant compile-time
// regression.
if (!Consensus) {
Consensus = V;
if (!AddrModeFound) {
AddrModeFound = true;
AddrMode = NewAddrMode;
AddrModeInsts = NewAddrModeInsts;
continue;
} else if (NewAddrMode == AddrMode) {
if (!IsNumUsesConsensusValid) {
NumUsesConsensus = Consensus->getNumUses();
IsNumUsesConsensusValid = true;
}
// Ensure that the obtained addressing mode is equivalent to that obtained
// for all other roots of the PHI traversal. Also, when choosing one
// such root as representative, select the one with the most uses in order
// to keep the cost modeling heuristics in AddressingModeMatcher
// applicable.
unsigned NumUses = V->getNumUses();
if (NumUses > NumUsesConsensus) {
Consensus = V;
NumUsesConsensus = NumUses;
AddrModeInsts = NewAddrModeInsts;
}
continue;
}
if (NewAddrMode == AddrMode)
continue;
Consensus = nullptr;
AddrModeFound = false;
break;
}
// If the addressing mode couldn't be determined, or if multiple different
// ones were determined, bail out now.
if (!Consensus) {
if (!AddrModeFound) {
TPT.rollback(LastKnownGood);
return false;
}
@ -4847,25 +4830,7 @@ bool CodeGenPrepare::canFormExtLd(
if (!HasPromoted && LI->getParent() == Inst->getParent())
return false;
EVT VT = TLI->getValueType(*DL, Inst->getType());
EVT LoadVT = TLI->getValueType(*DL, LI->getType());
// If the load has other users and the truncate is not free, this probably
// isn't worthwhile.
if (!LI->hasOneUse() && (TLI->isTypeLegal(LoadVT) || !TLI->isTypeLegal(VT)) &&
!TLI->isTruncateFree(Inst->getType(), LI->getType()))
return false;
// Check whether the target supports casts folded into loads.
unsigned LType;
if (isa<ZExtInst>(Inst))
LType = ISD::ZEXTLOAD;
else {
assert(isa<SExtInst>(Inst) && "Unexpected ext type!");
LType = ISD::SEXTLOAD;
}
return TLI->isLoadExtLegal(LType, VT, LoadVT);
return TLI->isExtLoad(LI, Inst, *DL);
}
/// Move a zext or sext fed by a load into the same basic block as the load,

View File

@ -433,9 +433,12 @@ LegalizerHelper::widenScalar(MachineInstr &MI, unsigned TypeIdx, LLT WideTy) {
}
case TargetOpcode::G_SDIV:
case TargetOpcode::G_UDIV:
case TargetOpcode::G_SREM:
case TargetOpcode::G_UREM:
case TargetOpcode::G_ASHR:
case TargetOpcode::G_LSHR: {
unsigned ExtOp = MI.getOpcode() == TargetOpcode::G_SDIV ||
MI.getOpcode() == TargetOpcode::G_SREM ||
MI.getOpcode() == TargetOpcode::G_ASHR
? TargetOpcode::G_SEXT
: TargetOpcode::G_ZEXT;

View File

@ -11,8 +11,6 @@
// instructions do not lengthen the critical path or the resource depth.
//===----------------------------------------------------------------------===//
#define DEBUG_TYPE "machine-combiner"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/CodeGen/MachineDominators.h"
@ -32,6 +30,8 @@
using namespace llvm;
#define DEBUG_TYPE "machine-combiner"
STATISTIC(NumInstCombined, "Number of machineinst combined");
namespace {

View File

@ -15,7 +15,8 @@
using namespace llvm;
namespace llvm {
template class DominanceFrontierBase<MachineBasicBlock>;
template class DominanceFrontierBase<MachineBasicBlock, false>;
template class DominanceFrontierBase<MachineBasicBlock, true>;
template class ForwardDominanceFrontierBase<MachineBasicBlock>;
}

View File

@ -31,7 +31,7 @@ static cl::opt<bool, true> VerifyMachineDomInfoX(
namespace llvm {
template class DomTreeNodeBase<MachineBasicBlock>;
template class DominatorTreeBase<MachineBasicBlock>;
template class DominatorTreeBase<MachineBasicBlock, false>; // DomTreeBase
}
char MachineDominatorTree::ID = 0;
@ -49,7 +49,7 @@ void MachineDominatorTree::getAnalysisUsage(AnalysisUsage &AU) const {
bool MachineDominatorTree::runOnMachineFunction(MachineFunction &F) {
CriticalEdgesToSplit.clear();
NewBBs.clear();
DT.reset(new DominatorTreeBase<MachineBasicBlock>(false));
DT.reset(new DomTreeBase<MachineBasicBlock>());
DT->recalculate(F);
return false;
}
@ -144,7 +144,7 @@ void MachineDominatorTree::verifyDomTree() const {
return;
MachineFunction &F = *getRoot()->getParent();
DominatorTreeBase<MachineBasicBlock> OtherDT(false);
DomTreeBase<MachineBasicBlock> OtherDT;
OtherDT.recalculate(F);
if (getRootNode()->getBlock() != OtherDT.getRootNode()->getBlock() ||
DT->compare(OtherDT)) {

View File

@ -16,6 +16,10 @@
using namespace llvm;
namespace llvm {
template class DominatorTreeBase<MachineBasicBlock, true>; // PostDomTreeBase
}
char MachinePostDominatorTree::ID = 0;
//declare initializeMachinePostDominatorTreePass
@ -24,8 +28,7 @@ INITIALIZE_PASS(MachinePostDominatorTree, "machinepostdomtree",
MachinePostDominatorTree::MachinePostDominatorTree() : MachineFunctionPass(ID) {
initializeMachinePostDominatorTreePass(*PassRegistry::getPassRegistry());
DT = new DominatorTreeBase<MachineBasicBlock>(true); //true indicate
// postdominator
DT = new PostDomTreeBase<MachineBasicBlock>();
}
FunctionPass *

View File

@ -3889,9 +3889,8 @@ SDValue DAGCombiner::visitAND(SDNode *N) {
// Note: the SimplifyDemandedBits fold below can make an information-losing
// transform, and then we have no way to find this better fold.
if (N1C && N1C->isOne() && N0.getOpcode() == ISD::SUB) {
ConstantSDNode *SubLHS = isConstOrConstSplat(N0.getOperand(0));
SDValue SubRHS = N0.getOperand(1);
if (SubLHS && SubLHS->isNullValue()) {
if (isNullConstantOrNullSplatConstant(N0.getOperand(0))) {
SDValue SubRHS = N0.getOperand(1);
if (SubRHS.getOpcode() == ISD::ZERO_EXTEND &&
SubRHS.getOperand(0).getScalarValueSizeInBits() == 1)
return SubRHS;
@ -4586,6 +4585,20 @@ SDNode *DAGCombiner::MatchRotatePosNeg(SDValue Shifted, SDValue Pos,
return nullptr;
}
// if Left + Right == Sum (constant or constant splat vector)
static bool sumMatchConstant(SDValue Left, SDValue Right, unsigned Sum,
SelectionDAG &DAG, const SDLoc &DL) {
EVT ShiftVT = Left.getValueType();
if (ShiftVT != Right.getValueType()) return false;
SDValue ShiftSum = DAG.FoldConstantArithmetic(ISD::ADD, DL, ShiftVT,
Left.getNode(), Right.getNode());
if (!ShiftSum) return false;
ConstantSDNode *CSum = isConstOrConstSplat(ShiftSum);
return CSum && CSum->getZExtValue() == Sum;
}
// MatchRotate - Handle an 'or' of two operands. If this is one of the many
// idioms for rotate, and if the target supports rotation instructions, generate
// a rot[lr].
@ -4631,30 +4644,24 @@ SDNode *DAGCombiner::MatchRotate(SDValue LHS, SDValue RHS, const SDLoc &DL) {
// fold (or (shl x, C1), (srl x, C2)) -> (rotl x, C1)
// fold (or (shl x, C1), (srl x, C2)) -> (rotr x, C2)
if (isConstOrConstSplat(LHSShiftAmt) && isConstOrConstSplat(RHSShiftAmt)) {
uint64_t LShVal = isConstOrConstSplat(LHSShiftAmt)->getZExtValue();
uint64_t RShVal = isConstOrConstSplat(RHSShiftAmt)->getZExtValue();
if ((LShVal + RShVal) != EltSizeInBits)
return nullptr;
if (sumMatchConstant(LHSShiftAmt, RHSShiftAmt, EltSizeInBits, DAG, DL)) {
SDValue Rot = DAG.getNode(HasROTL ? ISD::ROTL : ISD::ROTR, DL, VT,
LHSShiftArg, HasROTL ? LHSShiftAmt : RHSShiftAmt);
// If there is an AND of either shifted operand, apply it to the result.
if (LHSMask.getNode() || RHSMask.getNode()) {
SDValue Mask = DAG.getAllOnesConstant(DL, VT);
SDValue AllOnes = DAG.getAllOnesConstant(DL, VT);
SDValue Mask = AllOnes;
if (LHSMask.getNode()) {
APInt RHSBits = APInt::getLowBitsSet(EltSizeInBits, LShVal);
SDValue RHSBits = DAG.getNode(ISD::SRL, DL, VT, AllOnes, RHSShiftAmt);
Mask = DAG.getNode(ISD::AND, DL, VT, Mask,
DAG.getNode(ISD::OR, DL, VT, LHSMask,
DAG.getConstant(RHSBits, DL, VT)));
DAG.getNode(ISD::OR, DL, VT, LHSMask, RHSBits));
}
if (RHSMask.getNode()) {
APInt LHSBits = APInt::getHighBitsSet(EltSizeInBits, RShVal);
SDValue LHSBits = DAG.getNode(ISD::SHL, DL, VT, AllOnes, LHSShiftAmt);
Mask = DAG.getNode(ISD::AND, DL, VT, Mask,
DAG.getNode(ISD::OR, DL, VT, RHSMask,
DAG.getConstant(LHSBits, DL, VT)));
DAG.getNode(ISD::OR, DL, VT, RHSMask, LHSBits));
}
Rot = DAG.getNode(ISD::AND, DL, VT, Rot, Mask);
@ -5272,11 +5279,21 @@ SDValue DAGCombiner::visitRotate(SDNode *N) {
SDValue N0 = N->getOperand(0);
SDValue N1 = N->getOperand(1);
EVT VT = N->getValueType(0);
unsigned Bitsize = VT.getScalarSizeInBits();
// fold (rot x, 0) -> x
if (isNullConstantOrNullSplatConstant(N1))
return N0;
// fold (rot x, c) -> (rot x, c % BitSize)
if (ConstantSDNode *Cst = isConstOrConstSplat(N1)) {
if (Cst->getAPIntValue().uge(Bitsize)) {
uint64_t RotAmt = Cst->getAPIntValue().urem(Bitsize);
return DAG.getNode(N->getOpcode(), dl, VT, N0,
DAG.getConstant(RotAmt, dl, N1.getValueType()));
}
}
// fold (rot* x, (trunc (and y, c))) -> (rot* x, (and (trunc y), (trunc c))).
if (N1.getOpcode() == ISD::TRUNCATE &&
N1.getOperand(0).getOpcode() == ISD::AND) {
@ -5286,22 +5303,24 @@ SDValue DAGCombiner::visitRotate(SDNode *N) {
unsigned NextOp = N0.getOpcode();
// fold (rot* (rot* x, c2), c1) -> (rot* x, c1 +- c2 % bitsize)
if (NextOp == ISD::ROTL || NextOp == ISD::ROTR)
if (SDNode *C1 = DAG.isConstantIntBuildVectorOrConstantInt(N1))
if (SDNode *C2 =
DAG.isConstantIntBuildVectorOrConstantInt(N0.getOperand(1))) {
bool SameSide = (N->getOpcode() == NextOp);
unsigned CombineOp = SameSide ? ISD::ADD : ISD::SUB;
if (SDValue CombinedShift =
DAG.FoldConstantArithmetic(CombineOp, dl, VT, C1, C2)) {
unsigned Bitsize = VT.getScalarSizeInBits();
SDValue BitsizeC = DAG.getConstant(Bitsize, dl, VT);
SDValue CombinedShiftNorm = DAG.FoldConstantArithmetic(
ISD::SREM, dl, VT, CombinedShift.getNode(), BitsizeC.getNode());
return DAG.getNode(
N->getOpcode(), dl, VT, N0->getOperand(0), CombinedShiftNorm);
}
if (NextOp == ISD::ROTL || NextOp == ISD::ROTR) {
SDNode *C1 = DAG.isConstantIntBuildVectorOrConstantInt(N1);
SDNode *C2 = DAG.isConstantIntBuildVectorOrConstantInt(N0.getOperand(1));
if (C1 && C2 && C1->getValueType(0) == C2->getValueType(0)) {
EVT ShiftVT = C1->getValueType(0);
bool SameSide = (N->getOpcode() == NextOp);
unsigned CombineOp = SameSide ? ISD::ADD : ISD::SUB;
if (SDValue CombinedShift =
DAG.FoldConstantArithmetic(CombineOp, dl, ShiftVT, C1, C2)) {
SDValue BitsizeC = DAG.getConstant(Bitsize, dl, ShiftVT);
SDValue CombinedShiftNorm = DAG.FoldConstantArithmetic(
ISD::SREM, dl, ShiftVT, CombinedShift.getNode(),
BitsizeC.getNode());
return DAG.getNode(N->getOpcode(), dl, VT, N0->getOperand(0),
CombinedShiftNorm);
}
}
}
return SDValue();
}
@ -7152,8 +7171,14 @@ SDValue DAGCombiner::visitSIGN_EXTEND(SDNode *N) {
SDValue Trunc = DAG.getNode(ISD::TRUNCATE, SDLoc(N0),
N0.getValueType(), ExtLoad);
ExtendSetCCUses(SetCCs, Trunc, ExtLoad, DL, ISD::SIGN_EXTEND);
CombineTo(N0.getNode(), Trunc, ExtLoad.getValue(1));
return CombineTo(N, ExtLoad); // Return N so it doesn't get rechecked!
// If the load value is used only by N, replace it via CombineTo N.
bool NoReplaceTrunc = SDValue(LN0, 0).hasOneUse();
CombineTo(N, ExtLoad);
if (NoReplaceTrunc)
DAG.ReplaceAllUsesOfValueWith(SDValue(LN0, 1), ExtLoad.getValue(1));
else
CombineTo(LN0, Trunc, ExtLoad.getValue(1));
return SDValue(N, 0);
}
}
@ -7210,8 +7235,13 @@ SDValue DAGCombiner::visitSIGN_EXTEND(SDNode *N) {
SDLoc(N0.getOperand(0)),
N0.getOperand(0).getValueType(), ExtLoad);
ExtendSetCCUses(SetCCs, Trunc, ExtLoad, DL, ISD::SIGN_EXTEND);
CombineTo(N0.getOperand(0).getNode(), Trunc, ExtLoad.getValue(1));
return CombineTo(N, And); // Return N so it doesn't get rechecked!
bool NoReplaceTrunc = SDValue(LN0, 0).hasOneUse();
CombineTo(N, And);
if (NoReplaceTrunc)
DAG.ReplaceAllUsesOfValueWith(SDValue(LN0, 1), ExtLoad.getValue(1));
else
CombineTo(LN0, Trunc, ExtLoad.getValue(1));
return SDValue(N,0); // Return N so it doesn't get rechecked!
}
}
}
@ -7451,8 +7481,14 @@ SDValue DAGCombiner::visitZERO_EXTEND(SDNode *N) {
SDValue Trunc = DAG.getNode(ISD::TRUNCATE, SDLoc(N0),
N0.getValueType(), ExtLoad);
ExtendSetCCUses(SetCCs, Trunc, ExtLoad, SDLoc(N), ISD::ZERO_EXTEND);
CombineTo(N0.getNode(), Trunc, ExtLoad.getValue(1));
return CombineTo(N, ExtLoad); // Return N so it doesn't get rechecked!
// If the load value is used only by N, replace it via CombineTo N.
bool NoReplaceTrunc = SDValue(LN0, 0).hasOneUse();
CombineTo(N, ExtLoad);
if (NoReplaceTrunc)
DAG.ReplaceAllUsesOfValueWith(SDValue(LN0, 1), ExtLoad.getValue(1));
else
CombineTo(LN0, Trunc, ExtLoad.getValue(1));
return SDValue(N, 0); // Return N so it doesn't get rechecked!
}
}
@ -7503,8 +7539,13 @@ SDValue DAGCombiner::visitZERO_EXTEND(SDNode *N) {
SDLoc(N0.getOperand(0)),
N0.getOperand(0).getValueType(), ExtLoad);
ExtendSetCCUses(SetCCs, Trunc, ExtLoad, DL, ISD::ZERO_EXTEND);
CombineTo(N0.getOperand(0).getNode(), Trunc, ExtLoad.getValue(1));
return CombineTo(N, And); // Return N so it doesn't get rechecked!
bool NoReplaceTrunc = SDValue(LN0, 0).hasOneUse();
CombineTo(N, And);
if (NoReplaceTrunc)
DAG.ReplaceAllUsesOfValueWith(SDValue(LN0, 1), ExtLoad.getValue(1));
else
CombineTo(LN0, Trunc, ExtLoad.getValue(1));
return SDValue(N,0); // Return N so it doesn't get rechecked!
}
}
}
@ -7676,13 +7717,18 @@ SDValue DAGCombiner::visitANY_EXTEND(SDNode *N) {
LN0->getChain(),
LN0->getBasePtr(), N0.getValueType(),
LN0->getMemOperand());
CombineTo(N, ExtLoad);
SDValue Trunc = DAG.getNode(ISD::TRUNCATE, SDLoc(N0),
N0.getValueType(), ExtLoad);
CombineTo(N0.getNode(), Trunc, ExtLoad.getValue(1));
ExtendSetCCUses(SetCCs, Trunc, ExtLoad, SDLoc(N),
ISD::ANY_EXTEND);
return SDValue(N, 0); // Return N so it doesn't get rechecked!
// If the load value is used only by N, replace it via CombineTo N.
bool NoReplaceTrunc = N0.hasOneUse();
CombineTo(N, ExtLoad);
if (NoReplaceTrunc)
DAG.ReplaceAllUsesOfValueWith(SDValue(LN0, 1), ExtLoad.getValue(1));
else
CombineTo(LN0, Trunc, ExtLoad.getValue(1));
return SDValue(N, 0); // Return N so it doesn't get rechecked!
}
}
@ -11373,12 +11419,8 @@ SDValue DAGCombiner::visitLOAD(SDNode *N) {
SDValue Token = DAG.getNode(ISD::TokenFactor, SDLoc(N),
MVT::Other, Chain, ReplLoad.getValue(1));
// Make sure the new and old chains are cleaned up.
AddToWorklist(Token.getNode());
// Replace uses with load result and token factor. Don't add users
// to work list.
return CombineTo(N, ReplLoad.getValue(0), Token, false);
// Replace uses with load result and token factor
return CombineTo(N, ReplLoad.getValue(0), Token);
}
}
@ -12744,7 +12786,12 @@ bool DAGCombiner::MergeConsecutiveStores(StoreSDNode *St) {
TLI.storeOfVectorConstantIsCheap(MemVT, i + 1, FirstStoreAS)) &&
!NoVectors) {
// Find a legal type for the vector store.
EVT Ty = EVT::getVectorVT(Context, MemVT, i + 1);
unsigned Elts = i + 1;
if (MemVT.isVector()) {
// When merging vector stores, get the total number of elements.
Elts *= MemVT.getVectorNumElements();
}
EVT Ty = EVT::getVectorVT(Context, MemVT.getScalarType(), Elts);
if (TLI.isTypeLegal(Ty) &&
TLI.canMergeStoresTo(FirstStoreAS, Ty, DAG) &&
TLI.allowsMemoryAccess(Context, DL, Ty, FirstStoreAS,
@ -13003,7 +13050,7 @@ bool DAGCombiner::MergeConsecutiveStores(StoreSDNode *St) {
SDValue NewStoreChain = getMergeStoreChains(StoreNodes, NumElem);
AddToWorklist(NewStoreChain.getNode());
MachineMemOperand::Flags MMOFlags = isDereferenceable ?
MachineMemOperand::Flags MMOFlags = isDereferenceable ?
MachineMemOperand::MODereferenceable:
MachineMemOperand::MONone;
@ -16703,6 +16750,20 @@ bool DAGCombiner::isAlias(LSBaseSDNode *Op0, LSBaseSDNode *Op1) const {
if (BasePtr0.equalBaseIndex(BasePtr1, DAG, PtrDiff))
return !((NumBytes0 <= PtrDiff) || (PtrDiff + NumBytes1 <= 0));
// If both BasePtr0 and BasePtr1 are FrameIndexes, we will not be
// able to calculate their relative offset if at least one arises
// from an alloca. However, these allocas cannot overlap and we
// can infer there is no alias.
if (auto *A = dyn_cast<FrameIndexSDNode>(BasePtr0.getBase()))
if (auto *B = dyn_cast<FrameIndexSDNode>(BasePtr1.getBase())) {
MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
// If the base are the same frame index but the we couldn't find a
// constant offset, (indices are different) be conservative.
if (A != B && (!MFI.isFixedObjectIndex(A->getIndex()) ||
!MFI.isFixedObjectIndex(B->getIndex())))
return false;
}
// FIXME: findBaseOffset and ConstantValue/GlobalValue/FrameIndex analysis
// modified to use BaseIndexOffset.

View File

@ -1827,10 +1827,11 @@ void DAGTypeLegalizer::ExpandIntRes_ADDSUB(SDNode *N,
TLI.isOperationLegalOrCustom(N->getOpcode() == ISD::ADD ?
ISD::UADDO : ISD::USUBO,
TLI.getTypeToExpandTo(*DAG.getContext(), NVT));
TargetLoweringBase::BooleanContent BoolType = TLI.getBooleanContents(NVT);
if (hasOVF) {
EVT OvfVT = getSetCCResultType(NVT);
SDVTList VTList = DAG.getVTList(NVT, OvfVT);
TargetLoweringBase::BooleanContent BoolType = TLI.getBooleanContents(NVT);
int RevOpc;
if (N->getOpcode() == ISD::ADD) {
RevOpc = ISD::SUB;
@ -1863,6 +1864,13 @@ void DAGTypeLegalizer::ExpandIntRes_ADDSUB(SDNode *N,
Hi = DAG.getNode(ISD::ADD, dl, NVT, makeArrayRef(HiOps, 2));
SDValue Cmp1 = DAG.getSetCC(dl, getSetCCResultType(NVT), Lo, LoOps[0],
ISD::SETULT);
if (BoolType == TargetLoweringBase::ZeroOrOneBooleanContent) {
SDValue Carry = DAG.getZExtOrTrunc(Cmp1, dl, NVT);
Hi = DAG.getNode(ISD::ADD, dl, NVT, Hi, Carry);
return;
}
SDValue Carry1 = DAG.getSelect(dl, NVT, Cmp1,
DAG.getConstant(1, dl, NVT),
DAG.getConstant(0, dl, NVT));
@ -1877,9 +1885,14 @@ void DAGTypeLegalizer::ExpandIntRes_ADDSUB(SDNode *N,
SDValue Cmp =
DAG.getSetCC(dl, getSetCCResultType(LoOps[0].getValueType()),
LoOps[0], LoOps[1], ISD::SETULT);
SDValue Borrow = DAG.getSelect(dl, NVT, Cmp,
DAG.getConstant(1, dl, NVT),
DAG.getConstant(0, dl, NVT));
SDValue Borrow;
if (BoolType == TargetLoweringBase::ZeroOrOneBooleanContent)
Borrow = DAG.getZExtOrTrunc(Cmp, dl, NVT);
else
Borrow = DAG.getSelect(dl, NVT, Cmp, DAG.getConstant(1, dl, NVT),
DAG.getConstant(0, dl, NVT));
Hi = DAG.getNode(ISD::SUB, dl, NVT, Hi, Borrow);
}
}

View File

@ -142,9 +142,9 @@ bool XRayInstrumentation::runOnMachineFunction(MachineFunction &MF) {
return false; // Invalid value for threshold.
// Count the number of MachineInstr`s in MachineFunction
int64_t MICount = 0;
for (const auto& MBB : MF)
MICount += MBB.size();
int64_t MICount = 0;
for (const auto& MBB : MF)
MICount += MBB.size();
// Check if we have a loop.
// FIXME: Maybe make this smarter, and see whether the loops are dependent

View File

@ -14,7 +14,6 @@
#include "llvm/DebugInfo/CodeView/TypeCollection.h"
#include "llvm/DebugInfo/CodeView/TypeDeserializer.h"
#include "llvm/DebugInfo/CodeView/TypeRecordMapping.h"
#include "llvm/DebugInfo/CodeView/TypeServerHandler.h"
#include "llvm/DebugInfo/CodeView/TypeVisitorCallbackPipeline.h"
#include "llvm/Support/BinaryByteStream.h"
#include "llvm/Support/BinaryStreamReader.h"
@ -42,13 +41,6 @@ static Error visitKnownMember(CVMemberRecord &Record,
return Error::success();
}
static Expected<TypeServer2Record> deserializeTypeServerRecord(CVType &Record) {
TypeServer2Record R(TypeRecordKind::TypeServer2);
if (auto EC = TypeDeserializer::deserializeAs(Record, R))
return std::move(EC);
return R;
}
static Error visitMemberRecord(CVMemberRecord &Record,
TypeVisitorCallbacks &Callbacks) {
if (auto EC = Callbacks.visitMemberBegin(Record))
@ -84,8 +76,6 @@ class CVTypeVisitor {
public:
explicit CVTypeVisitor(TypeVisitorCallbacks &Callbacks);
void addTypeServerHandler(TypeServerHandler &Handler);
Error visitTypeRecord(CVType &Record, TypeIndex Index);
Error visitTypeRecord(CVType &Record);
@ -98,45 +88,15 @@ class CVTypeVisitor {
Error visitFieldListMemberStream(BinaryStreamReader &Stream);
private:
Expected<bool> handleTypeServer(CVType &Record);
Error finishVisitation(CVType &Record);
/// The interface to the class that gets notified of each visitation.
TypeVisitorCallbacks &Callbacks;
TinyPtrVector<TypeServerHandler *> Handlers;
};
CVTypeVisitor::CVTypeVisitor(TypeVisitorCallbacks &Callbacks)
: Callbacks(Callbacks) {}
void CVTypeVisitor::addTypeServerHandler(TypeServerHandler &Handler) {
Handlers.push_back(&Handler);
}
Expected<bool> CVTypeVisitor::handleTypeServer(CVType &Record) {
if (Record.Type == TypeLeafKind::LF_TYPESERVER2 && !Handlers.empty()) {
auto TS = deserializeTypeServerRecord(Record);
if (!TS)
return TS.takeError();
for (auto Handler : Handlers) {
auto ExpectedResult = Handler->handle(*TS, Callbacks);
// If there was an error, return the error.
if (!ExpectedResult)
return ExpectedResult.takeError();
// If the handler processed the record, return success.
if (*ExpectedResult)
return true;
// Otherwise keep searching for a handler, eventually falling out and
// using the default record handler.
}
}
return false;
}
Error CVTypeVisitor::finishVisitation(CVType &Record) {
switch (Record.Type) {
default:
@ -163,12 +123,6 @@ Error CVTypeVisitor::finishVisitation(CVType &Record) {
}
Error CVTypeVisitor::visitTypeRecord(CVType &Record, TypeIndex Index) {
auto ExpectedResult = handleTypeServer(Record);
if (!ExpectedResult)
return ExpectedResult.takeError();
if (*ExpectedResult)
return Error::success();
if (auto EC = Callbacks.visitTypeBegin(Record, Index))
return EC;
@ -176,12 +130,6 @@ Error CVTypeVisitor::visitTypeRecord(CVType &Record, TypeIndex Index) {
}
Error CVTypeVisitor::visitTypeRecord(CVType &Record) {
auto ExpectedResult = handleTypeServer(Record);
if (!ExpectedResult)
return ExpectedResult.takeError();
if (*ExpectedResult)
return Error::success();
if (auto EC = Callbacks.visitTypeBegin(Record))
return EC;
@ -271,52 +219,37 @@ struct VisitHelper {
Error llvm::codeview::visitTypeRecord(CVType &Record, TypeIndex Index,
TypeVisitorCallbacks &Callbacks,
VisitorDataSource Source,
TypeServerHandler *TS) {
VisitorDataSource Source) {
VisitHelper V(Callbacks, Source);
if (TS)
V.Visitor.addTypeServerHandler(*TS);
return V.Visitor.visitTypeRecord(Record, Index);
}
Error llvm::codeview::visitTypeRecord(CVType &Record,
TypeVisitorCallbacks &Callbacks,
VisitorDataSource Source,
TypeServerHandler *TS) {
VisitorDataSource Source) {
VisitHelper V(Callbacks, Source);
if (TS)
V.Visitor.addTypeServerHandler(*TS);
return V.Visitor.visitTypeRecord(Record);
}
Error llvm::codeview::visitTypeStream(const CVTypeArray &Types,
TypeVisitorCallbacks &Callbacks,
VisitorDataSource Source,
TypeServerHandler *TS) {
VisitorDataSource Source) {
VisitHelper V(Callbacks, Source);
if (TS)
V.Visitor.addTypeServerHandler(*TS);
return V.Visitor.visitTypeStream(Types);
}
Error llvm::codeview::visitTypeStream(CVTypeRange Types,
TypeVisitorCallbacks &Callbacks,
TypeServerHandler *TS) {
TypeVisitorCallbacks &Callbacks) {
VisitHelper V(Callbacks, VDS_BytesPresent);
if (TS)
V.Visitor.addTypeServerHandler(*TS);
return V.Visitor.visitTypeStream(Types);
}
Error llvm::codeview::visitTypeStream(TypeCollection &Types,
TypeVisitorCallbacks &Callbacks,
TypeServerHandler *TS) {
TypeVisitorCallbacks &Callbacks) {
// When the internal visitor calls Types.getType(Index) the interface is
// required to return a CVType with the bytes filled out. So we can assume
// that the bytes will be present when individual records are visited.
VisitHelper V(Callbacks, VDS_BytesPresent);
if (TS)
V.Visitor.addTypeServerHandler(*TS);
return V.Visitor.visitTypeStream(Types);
}

View File

@ -168,18 +168,19 @@ Error CodeViewRecordIO::mapStringZ(StringRef &Value) {
return Error::success();
}
Error CodeViewRecordIO::mapGuid(StringRef &Guid) {
Error CodeViewRecordIO::mapGuid(GUID &Guid) {
constexpr uint32_t GuidSize = 16;
if (maxFieldLength() < GuidSize)
return make_error<CodeViewError>(cv_error_code::insufficient_buffer);
if (isWriting()) {
assert(Guid.size() == 16 && "Invalid Guid Size!");
if (auto EC = Writer->writeFixedString(Guid))
if (auto EC = Writer->writeBytes(Guid.Guid))
return EC;
} else {
if (auto EC = Reader->readFixedString(Guid, 16))
ArrayRef<uint8_t> GuidBytes;
if (auto EC = Reader->readBytes(GuidBytes, GuidSize))
return EC;
memcpy(Guid.Guid, GuidBytes.data(), GuidSize);
}
return Error::success();
}

View File

@ -9,6 +9,7 @@
#include "llvm/DebugInfo/CodeView/Formatters.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/DebugInfo/CodeView/GUID.h"
#include "llvm/Support/raw_ostream.h"
#include <algorithm>
#include <cassert>
@ -39,3 +40,9 @@ void GuidAdapter::format(raw_ostream &Stream, StringRef Style) {
}
Stream << "}";
}
raw_ostream &llvm::codeview::operator<<(raw_ostream &OS, const GUID &Guid) {
codeview::detail::GuidAdapter A(Guid.Guid);
A.format(OS, "");
return OS;
}

View File

@ -186,7 +186,7 @@ Error CVSymbolDumperImpl::visitKnownRecord(CVSymbol &CVR,
Error CVSymbolDumperImpl::visitKnownRecord(CVSymbol &CVR,
BuildInfoSym &BuildInfo) {
W.printNumber("BuildId", BuildInfo.BuildId);
printTypeIndex("BuildId", BuildInfo.BuildId);
return Error::success();
}

View File

@ -354,7 +354,7 @@ Error TypeDumpVisitor::visitKnownRecord(CVType &CVR, FuncIdRecord &Func) {
}
Error TypeDumpVisitor::visitKnownRecord(CVType &CVR, TypeServer2Record &TS) {
W->printString("Guid", formatv("{0}", fmt_guid(TS.getGuid())).str());
W->printString("Guid", formatv("{0}", TS.getGuid()).str());
W->printNumber("Age", TS.getAge());
W->printString("Name", TS.getName());
return Error::success();

View File

@ -10,13 +10,11 @@
#include "llvm/DebugInfo/CodeView/TypeStreamMerger.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/DebugInfo/CodeView/CVTypeVisitor.h"
#include "llvm/DebugInfo/CodeView/TypeDeserializer.h"
#include "llvm/DebugInfo/CodeView/TypeIndex.h"
#include "llvm/DebugInfo/CodeView/TypeIndexDiscovery.h"
#include "llvm/DebugInfo/CodeView/TypeRecord.h"
#include "llvm/DebugInfo/CodeView/TypeTableBuilder.h"
#include "llvm/DebugInfo/CodeView/TypeVisitorCallbacks.h"
#include "llvm/Support/Error.h"
#include "llvm/Support/ScopedPrinter.h"
@ -57,56 +55,35 @@ namespace {
/// streams: an item (or IPI) stream and a type stream, as this is what is
/// actually stored in the final PDB. We choose which records go where by
/// looking at the record kind.
class TypeStreamMerger : public TypeVisitorCallbacks {
class TypeStreamMerger {
public:
explicit TypeStreamMerger(SmallVectorImpl<TypeIndex> &SourceToDest,
TypeServerHandler *Handler)
: Handler(Handler), IndexMap(SourceToDest) {
explicit TypeStreamMerger(SmallVectorImpl<TypeIndex> &SourceToDest)
: IndexMap(SourceToDest) {
SourceToDest.clear();
}
static const TypeIndex Untranslated;
Error visitTypeBegin(CVType &Record) override;
Error visitTypeEnd(CVType &Record) override;
Error mergeTypesAndIds(TypeTableBuilder &DestIds, TypeTableBuilder &DestTypes,
const CVTypeArray &IdsAndTypes);
const CVTypeArray &IdsAndTypes);
Error mergeIdRecords(TypeTableBuilder &Dest,
ArrayRef<TypeIndex> TypeSourceToDest,
const CVTypeArray &Ids);
const CVTypeArray &Ids);
Error mergeTypeRecords(TypeTableBuilder &Dest, const CVTypeArray &Types);
private:
Error doit(const CVTypeArray &Types);
Error remapAllTypes(const CVTypeArray &Types);
Error remapType(const CVType &Type);
void addMapping(TypeIndex Idx);
bool remapTypeIndex(TypeIndex &Idx);
bool remapItemIndex(TypeIndex &Idx);
bool remapIndices(RemappedType &Record, ArrayRef<TiReference> Refs) {
auto OriginalData = Record.OriginalRecord.content();
bool Success = true;
for (auto &Ref : Refs) {
uint32_t Offset = Ref.Offset;
ArrayRef<uint8_t> Bytes =
OriginalData.slice(Ref.Offset, sizeof(TypeIndex));
ArrayRef<TypeIndex> TIs(reinterpret_cast<const TypeIndex *>(Bytes.data()),
Ref.Count);
for (auto TI : TIs) {
TypeIndex NewTI = TI;
bool ThisSuccess = (Ref.Kind == TiRefKind::IndexRef)
? remapItemIndex(NewTI)
: remapTypeIndex(NewTI);
if (ThisSuccess && NewTI != TI)
Record.Mappings.emplace_back(Offset, NewTI);
Offset += sizeof(TypeIndex);
Success &= ThisSuccess;
}
}
return Success;
}
bool remapIndices(RemappedType &Record, ArrayRef<TiReference> Refs);
bool remapIndex(TypeIndex &Idx, ArrayRef<TypeIndex> Map);
@ -128,21 +105,6 @@ class TypeStreamMerger : public TypeVisitorCallbacks {
return Error::success();
}
Error writeTypeRecord(const CVType &Record) {
TypeIndex DestIdx =
DestTypeStream->writeSerializedRecord(Record.RecordData);
addMapping(DestIdx);
return Error::success();
}
Error writeTypeRecord(const RemappedType &Record, bool RemapSuccess) {
return writeRecord(*DestTypeStream, Record, RemapSuccess);
}
Error writeIdRecord(const RemappedType &Record, bool RemapSuccess) {
return writeRecord(*DestIdStream, Record, RemapSuccess);
}
Optional<Error> LastError;
bool IsSecondPass = false;
@ -153,7 +115,6 @@ class TypeStreamMerger : public TypeVisitorCallbacks {
TypeTableBuilder *DestIdStream = nullptr;
TypeTableBuilder *DestTypeStream = nullptr;
TypeServerHandler *Handler = nullptr;
// If we're only mapping id records, this array contains the mapping for
// type records.
@ -168,12 +129,8 @@ class TypeStreamMerger : public TypeVisitorCallbacks {
const TypeIndex TypeStreamMerger::Untranslated(SimpleTypeKind::NotTranslated);
Error TypeStreamMerger::visitTypeBegin(CVType &Rec) {
RemappedType R(Rec);
SmallVector<TiReference, 32> Refs;
discoverTypeIndices(Rec.RecordData, Refs);
bool Success = remapIndices(R, Refs);
switch (Rec.kind()) {
static bool isIdRecord(TypeLeafKind K) {
switch (K) {
case TypeLeafKind::LF_FUNC_ID:
case TypeLeafKind::LF_MFUNC_ID:
case TypeLeafKind::LF_STRING_ID:
@ -181,19 +138,10 @@ Error TypeStreamMerger::visitTypeBegin(CVType &Rec) {
case TypeLeafKind::LF_BUILDINFO:
case TypeLeafKind::LF_UDT_SRC_LINE:
case TypeLeafKind::LF_UDT_MOD_SRC_LINE:
return writeIdRecord(R, Success);
return true;
default:
return writeTypeRecord(R, Success);
return false;
}
return Error::success();
}
Error TypeStreamMerger::visitTypeEnd(CVType &Rec) {
++CurIndex;
if (!IsSecondPass)
assert(IndexMap.size() == slotForIndex(CurIndex) &&
"visitKnownRecord should add one index map entry");
return Error::success();
}
void TypeStreamMerger::addMapping(TypeIndex Idx) {
@ -256,7 +204,7 @@ bool TypeStreamMerger::remapItemIndex(TypeIndex &Idx) {
}
Error TypeStreamMerger::mergeTypeRecords(TypeTableBuilder &Dest,
const CVTypeArray &Types) {
const CVTypeArray &Types) {
DestTypeStream = &Dest;
return doit(Types);
@ -264,7 +212,7 @@ Error TypeStreamMerger::mergeTypeRecords(TypeTableBuilder &Dest,
Error TypeStreamMerger::mergeIdRecords(TypeTableBuilder &Dest,
ArrayRef<TypeIndex> TypeSourceToDest,
const CVTypeArray &Ids) {
const CVTypeArray &Ids) {
DestIdStream = &Dest;
TypeLookup = TypeSourceToDest;
@ -273,25 +221,14 @@ Error TypeStreamMerger::mergeIdRecords(TypeTableBuilder &Dest,
Error TypeStreamMerger::mergeTypesAndIds(TypeTableBuilder &DestIds,
TypeTableBuilder &DestTypes,
const CVTypeArray &IdsAndTypes) {
const CVTypeArray &IdsAndTypes) {
DestIdStream = &DestIds;
DestTypeStream = &DestTypes;
return doit(IdsAndTypes);
}
Error TypeStreamMerger::doit(const CVTypeArray &Types) {
LastError = Error::success();
// We don't want to deserialize records. I guess this flag is poorly named,
// but it really means "Don't deserialize records before switching on the
// concrete type.
// FIXME: We can probably get even more speed here if we don't use the visitor
// pipeline here, but instead write the switch ourselves. I don't think it
// would buy us much since it's already pretty fast, but it's probably worth
// a few cycles.
if (auto EC =
codeview::visitTypeStream(Types, *this, VDS_BytesExternal, Handler))
if (auto EC = remapAllTypes(Types))
return EC;
// If we found bad indices but no other errors, try doing another pass and see
@ -301,50 +238,92 @@ Error TypeStreamMerger::doit(const CVTypeArray &Types) {
// topologically sorted. The standard library contains MASM-produced objects,
// so this is important to handle correctly, but we don't have to be too
// efficient. MASM type streams are usually very small.
while (!*LastError && NumBadIndices > 0) {
while (!LastError && NumBadIndices > 0) {
unsigned BadIndicesRemaining = NumBadIndices;
IsSecondPass = true;
NumBadIndices = 0;
CurIndex = TypeIndex(TypeIndex::FirstNonSimpleIndex);
if (auto EC =
codeview::visitTypeStream(Types, *this, VDS_BytesExternal, Handler))
if (auto EC = remapAllTypes(Types))
return EC;
assert(NumBadIndices <= BadIndicesRemaining &&
"second pass found more bad indices");
if (!*LastError && NumBadIndices == BadIndicesRemaining) {
if (!LastError && NumBadIndices == BadIndicesRemaining) {
return llvm::make_error<CodeViewError>(
cv_error_code::corrupt_record, "input type graph contains cycles");
}
}
Error Ret = std::move(*LastError);
LastError.reset();
return Ret;
if (LastError)
return std::move(*LastError);
return Error::success();
}
Error TypeStreamMerger::remapAllTypes(const CVTypeArray &Types) {
for (const CVType &Type : Types)
if (auto EC = remapType(Type))
return EC;
return Error::success();
}
Error TypeStreamMerger::remapType(const CVType &Type) {
RemappedType R(Type);
SmallVector<TiReference, 32> Refs;
discoverTypeIndices(Type.RecordData, Refs);
bool MappedAllIndices = remapIndices(R, Refs);
TypeTableBuilder &Dest =
isIdRecord(Type.kind()) ? *DestIdStream : *DestTypeStream;
if (auto EC = writeRecord(Dest, R, MappedAllIndices))
return EC;
++CurIndex;
assert((IsSecondPass || IndexMap.size() == slotForIndex(CurIndex)) &&
"visitKnownRecord should add one index map entry");
return Error::success();
}
bool TypeStreamMerger::remapIndices(RemappedType &Record,
ArrayRef<TiReference> Refs) {
ArrayRef<uint8_t> OriginalData = Record.OriginalRecord.content();
bool Success = true;
for (auto &Ref : Refs) {
uint32_t Offset = Ref.Offset;
ArrayRef<uint8_t> Bytes = OriginalData.slice(Ref.Offset, sizeof(TypeIndex));
ArrayRef<TypeIndex> TIs(reinterpret_cast<const TypeIndex *>(Bytes.data()),
Ref.Count);
for (auto TI : TIs) {
TypeIndex NewTI = TI;
bool ThisSuccess = (Ref.Kind == TiRefKind::IndexRef)
? remapItemIndex(NewTI)
: remapTypeIndex(NewTI);
if (ThisSuccess && NewTI != TI)
Record.Mappings.emplace_back(Offset, NewTI);
Offset += sizeof(TypeIndex);
Success &= ThisSuccess;
}
}
return Success;
}
Error llvm::codeview::mergeTypeRecords(TypeTableBuilder &Dest,
SmallVectorImpl<TypeIndex> &SourceToDest,
TypeServerHandler *Handler,
const CVTypeArray &Types) {
TypeStreamMerger M(SourceToDest, Handler);
const CVTypeArray &Types) {
TypeStreamMerger M(SourceToDest);
return M.mergeTypeRecords(Dest, Types);
}
Error llvm::codeview::mergeIdRecords(TypeTableBuilder &Dest,
ArrayRef<TypeIndex> TypeSourceToDest,
SmallVectorImpl<TypeIndex> &SourceToDest,
const CVTypeArray &Ids) {
TypeStreamMerger M(SourceToDest, nullptr);
const CVTypeArray &Ids) {
TypeStreamMerger M(SourceToDest);
return M.mergeIdRecords(Dest, TypeSourceToDest, Ids);
}
Error llvm::codeview::mergeTypeAndIdRecords(
TypeTableBuilder &DestIds, TypeTableBuilder &DestTypes,
SmallVectorImpl<TypeIndex> &SourceToDest, TypeServerHandler *Handler,
const CVTypeArray &IdsAndTypes) {
TypeStreamMerger M(SourceToDest, Handler);
SmallVectorImpl<TypeIndex> &SourceToDest, const CVTypeArray &IdsAndTypes) {
TypeStreamMerger M(SourceToDest);
return M.mergeTypesAndIds(DestIds, DestTypes, IdsAndTypes);
}

View File

@ -24,22 +24,166 @@ using namespace llvm;
using namespace dwarf;
using namespace object;
void DWARFVerifier::verifyDebugInfoAttribute(const DWARFDie &Die,
DWARFAttribute &AttrValue) {
bool DWARFVerifier::verifyUnitHeader(const DWARFDataExtractor DebugInfoData,
uint32_t *Offset, unsigned UnitIndex,
uint8_t &UnitType, bool &isUnitDWARF64) {
uint32_t AbbrOffset, Length;
uint8_t AddrSize = 0;
uint16_t Version;
bool Success = true;
bool ValidLength = false;
bool ValidVersion = false;
bool ValidAddrSize = false;
bool ValidType = true;
bool ValidAbbrevOffset = true;
uint32_t OffsetStart = *Offset;
Length = DebugInfoData.getU32(Offset);
if (Length == UINT32_MAX) {
isUnitDWARF64 = true;
OS << format(
"Unit[%d] is in 64-bit DWARF format; cannot verify from this point.\n",
UnitIndex);
return false;
}
Version = DebugInfoData.getU16(Offset);
if (Version >= 5) {
UnitType = DebugInfoData.getU8(Offset);
AddrSize = DebugInfoData.getU8(Offset);
AbbrOffset = DebugInfoData.getU32(Offset);
ValidType = DWARFUnit::isValidUnitType(UnitType);
} else {
UnitType = 0;
AbbrOffset = DebugInfoData.getU32(Offset);
AddrSize = DebugInfoData.getU8(Offset);
}
if (!DCtx.getDebugAbbrev()->getAbbreviationDeclarationSet(AbbrOffset))
ValidAbbrevOffset = false;
ValidLength = DebugInfoData.isValidOffset(OffsetStart + Length + 3);
ValidVersion = DWARFContext::isSupportedVersion(Version);
ValidAddrSize = AddrSize == 4 || AddrSize == 8;
if (!ValidLength || !ValidVersion || !ValidAddrSize || !ValidAbbrevOffset ||
!ValidType) {
Success = false;
OS << format("Units[%d] - start offset: 0x%08x \n", UnitIndex, OffsetStart);
if (!ValidLength)
OS << "\tError: The length for this unit is too "
"large for the .debug_info provided.\n";
if (!ValidVersion)
OS << "\tError: The 16 bit unit header version is not valid.\n";
if (!ValidType)
OS << "\tError: The unit type encoding is not valid.\n";
if (!ValidAbbrevOffset)
OS << "\tError: The offset into the .debug_abbrev section is "
"not valid.\n";
if (!ValidAddrSize)
OS << "\tError: The address size is unsupported.\n";
}
*Offset = OffsetStart + Length + 4;
return Success;
}
bool DWARFVerifier::verifyUnitContents(DWARFUnit Unit) {
uint32_t NumUnitErrors = 0;
unsigned NumDies = Unit.getNumDIEs();
for (unsigned I = 0; I < NumDies; ++I) {
auto Die = Unit.getDIEAtIndex(I);
if (Die.getTag() == DW_TAG_null)
continue;
for (auto AttrValue : Die.attributes()) {
NumUnitErrors += verifyDebugInfoAttribute(Die, AttrValue);
NumUnitErrors += verifyDebugInfoForm(Die, AttrValue);
}
}
return NumUnitErrors == 0;
}
bool DWARFVerifier::handleDebugInfo() {
OS << "Verifying .debug_info Unit Header Chain...\n";
DWARFDataExtractor DebugInfoData(DCtx.getInfoSection(), DCtx.isLittleEndian(),
0);
uint32_t NumDebugInfoErrors = 0;
uint32_t OffsetStart = 0, Offset = 0, UnitIdx = 0;
uint8_t UnitType = 0;
bool isUnitDWARF64 = false;
bool isHeaderChainValid = true;
bool hasDIE = DebugInfoData.isValidOffset(Offset);
while (hasDIE) {
OffsetStart = Offset;
if (!verifyUnitHeader(DebugInfoData, &Offset, UnitIdx, UnitType,
isUnitDWARF64)) {
isHeaderChainValid = false;
if (isUnitDWARF64)
break;
} else {
std::unique_ptr<DWARFUnit> Unit;
switch (UnitType) {
case dwarf::DW_UT_type:
case dwarf::DW_UT_split_type: {
DWARFUnitSection<DWARFTypeUnit> TUSection{};
Unit.reset(new DWARFTypeUnit(
DCtx, DCtx.getInfoSection(), DCtx.getDebugAbbrev(),
&DCtx.getRangeSection(), DCtx.getStringSection(),
DCtx.getStringOffsetSection(), &DCtx.getAppleObjCSection(),
DCtx.getLineSection(), DCtx.isLittleEndian(), false, TUSection,
nullptr));
break;
}
case dwarf::DW_UT_skeleton:
case dwarf::DW_UT_split_compile:
case dwarf::DW_UT_compile:
case dwarf::DW_UT_partial:
// UnitType = 0 means that we are
// verifying a compile unit in DWARF v4.
case 0: {
DWARFUnitSection<DWARFCompileUnit> CUSection{};
Unit.reset(new DWARFCompileUnit(
DCtx, DCtx.getInfoSection(), DCtx.getDebugAbbrev(),
&DCtx.getRangeSection(), DCtx.getStringSection(),
DCtx.getStringOffsetSection(), &DCtx.getAppleObjCSection(),
DCtx.getLineSection(), DCtx.isLittleEndian(), false, CUSection,
nullptr));
break;
}
default: { llvm_unreachable("Invalid UnitType."); }
}
Unit->extract(DebugInfoData, &OffsetStart);
if (!verifyUnitContents(*Unit))
++NumDebugInfoErrors;
}
hasDIE = DebugInfoData.isValidOffset(Offset);
++UnitIdx;
}
if (UnitIdx == 0 && !hasDIE) {
OS << "Warning: .debug_info is empty.\n";
isHeaderChainValid = true;
}
NumDebugInfoErrors += verifyDebugInfoReferences();
return (isHeaderChainValid && NumDebugInfoErrors == 0);
}
unsigned DWARFVerifier::verifyDebugInfoAttribute(const DWARFDie &Die,
DWARFAttribute &AttrValue) {
unsigned NumErrors = 0;
const auto Attr = AttrValue.Attr;
switch (Attr) {
case DW_AT_ranges:
// Make sure the offset in the DW_AT_ranges attribute is valid.
if (auto SectionOffset = AttrValue.Value.getAsSectionOffset()) {
if (*SectionOffset >= DCtx.getRangeSection().Data.size()) {
++NumDebugInfoErrors;
++NumErrors;
OS << "error: DW_AT_ranges offset is beyond .debug_ranges "
"bounds:\n";
Die.dump(OS, 0);
OS << "\n";
}
} else {
++NumDebugInfoErrors;
++NumErrors;
OS << "error: DIE has invalid DW_AT_ranges encoding:\n";
Die.dump(OS, 0);
OS << "\n";
@ -49,7 +193,7 @@ void DWARFVerifier::verifyDebugInfoAttribute(const DWARFDie &Die,
// Make sure the offset in the DW_AT_stmt_list attribute is valid.
if (auto SectionOffset = AttrValue.Value.getAsSectionOffset()) {
if (*SectionOffset >= DCtx.getLineSection().Data.size()) {
++NumDebugInfoErrors;
++NumErrors;
OS << "error: DW_AT_stmt_list offset is beyond .debug_line "
"bounds: "
<< format("0x%08" PRIx32, *SectionOffset) << "\n";
@ -57,7 +201,7 @@ void DWARFVerifier::verifyDebugInfoAttribute(const DWARFDie &Die,
OS << "\n";
}
} else {
++NumDebugInfoErrors;
++NumErrors;
OS << "error: DIE has invalid DW_AT_stmt_list encoding:\n";
Die.dump(OS, 0);
OS << "\n";
@ -67,10 +211,12 @@ void DWARFVerifier::verifyDebugInfoAttribute(const DWARFDie &Die,
default:
break;
}
return NumErrors;
}
void DWARFVerifier::verifyDebugInfoForm(const DWARFDie &Die,
DWARFAttribute &AttrValue) {
unsigned DWARFVerifier::verifyDebugInfoForm(const DWARFDie &Die,
DWARFAttribute &AttrValue) {
unsigned NumErrors = 0;
const auto Form = AttrValue.Value.getForm();
switch (Form) {
case DW_FORM_ref1:
@ -86,7 +232,7 @@ void DWARFVerifier::verifyDebugInfoForm(const DWARFDie &Die,
auto CUSize = DieCU->getNextUnitOffset() - DieCU->getOffset();
auto CUOffset = AttrValue.Value.getRawUValue();
if (CUOffset >= CUSize) {
++NumDebugInfoErrors;
++NumErrors;
OS << "error: " << FormEncodingString(Form) << " CU offset "
<< format("0x%08" PRIx32, CUOffset)
<< " is invalid (must be less than CU size of "
@ -108,7 +254,7 @@ void DWARFVerifier::verifyDebugInfoForm(const DWARFDie &Die,
assert(RefVal);
if (RefVal) {
if (*RefVal >= DCtx.getInfoSection().Data.size()) {
++NumDebugInfoErrors;
++NumErrors;
OS << "error: DW_FORM_ref_addr offset beyond .debug_info "
"bounds:\n";
Die.dump(OS, 0);
@ -125,7 +271,7 @@ void DWARFVerifier::verifyDebugInfoForm(const DWARFDie &Die,
auto SecOffset = AttrValue.Value.getAsSectionOffset();
assert(SecOffset); // DW_FORM_strp is a section offset.
if (SecOffset && *SecOffset >= DCtx.getStringSection().size()) {
++NumDebugInfoErrors;
++NumErrors;
OS << "error: DW_FORM_strp offset beyond .debug_str bounds:\n";
Die.dump(OS, 0);
OS << "\n";
@ -135,17 +281,19 @@ void DWARFVerifier::verifyDebugInfoForm(const DWARFDie &Die,
default:
break;
}
return NumErrors;
}
void DWARFVerifier::verifyDebugInfoReferences() {
unsigned DWARFVerifier::verifyDebugInfoReferences() {
// Take all references and make sure they point to an actual DIE by
// getting the DIE by offset and emitting an error
OS << "Verifying .debug_info references...\n";
unsigned NumErrors = 0;
for (auto Pair : ReferenceToDIEOffsets) {
auto Die = DCtx.getDIEForOffset(Pair.first);
if (Die)
continue;
++NumDebugInfoErrors;
++NumErrors;
OS << "error: invalid DIE reference " << format("0x%08" PRIx64, Pair.first)
<< ". Offset is in between DIEs:\n";
for (auto Offset : Pair.second) {
@ -155,26 +303,7 @@ void DWARFVerifier::verifyDebugInfoReferences() {
}
OS << "\n";
}
}
bool DWARFVerifier::handleDebugInfo() {
NumDebugInfoErrors = 0;
OS << "Verifying .debug_info...\n";
for (const auto &CU : DCtx.compile_units()) {
unsigned NumDies = CU->getNumDIEs();
for (unsigned I = 0; I < NumDies; ++I) {
auto Die = CU->getDIEAtIndex(I);
const auto Tag = Die.getTag();
if (Tag == DW_TAG_null)
continue;
for (auto AttrValue : Die.attributes()) {
verifyDebugInfoAttribute(Die, AttrValue);
verifyDebugInfoForm(Die, AttrValue);
}
}
}
verifyDebugInfoReferences();
return NumDebugInfoErrors == 0;
return NumErrors;
}
void DWARFVerifier::verifyDebugLineStmtOffsets() {

View File

@ -52,7 +52,6 @@ add_pdb_impl_folder(Native
Native/PDBFileBuilder.cpp
Native/PDBStringTable.cpp
Native/PDBStringTableBuilder.cpp
Native/PDBTypeServerHandler.cpp
Native/PublicsStream.cpp
Native/PublicsStreamBuilder.cpp
Native/RawError.cpp

View File

@ -125,16 +125,16 @@ PrivateGetDIAValue(IDiaSymbol *Symbol,
return Result8;
}
PDB_UniqueId
codeview::GUID
PrivateGetDIAValue(IDiaSymbol *Symbol,
HRESULT (__stdcall IDiaSymbol::*Method)(GUID *)) {
GUID Result;
if (S_OK != (Symbol->*Method)(&Result))
return PDB_UniqueId();
return codeview::GUID();
static_assert(sizeof(PDB_UniqueId) == sizeof(GUID),
"PDB_UniqueId is the wrong size!");
PDB_UniqueId IdResult;
static_assert(sizeof(codeview::GUID) == sizeof(GUID),
"GUID is the wrong size!");
codeview::GUID IdResult;
::memcpy(&IdResult, &Result, sizeof(GUID));
return IdResult;
}
@ -746,7 +746,7 @@ PDB_SymType DIARawSymbol::getSymTag() const {
&IDiaSymbol::get_symTag);
}
PDB_UniqueId DIARawSymbol::getGuid() const {
codeview::GUID DIARawSymbol::getGuid() const {
return PrivateGetDIAValue(Symbol, &IDiaSymbol::get_guid);
}

Some files were not shown because too many files have changed in this diff Show More