Merge llvm, clang, lld and lldb release_40 branch r292009. Also update

build glue.
This commit is contained in:
Dimitry Andric 2017-01-14 22:12:13 +00:00
commit f1a29dd344
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/projects/clang400-import/; revision=312197
373 changed files with 8982 additions and 3668 deletions

View File

@ -4,7 +4,7 @@ LLVM Release License
University of Illinois/NCSA
Open Source License
Copyright (c) 2003-2016 University of Illinois at Urbana-Champaign.
Copyright (c) 2003-2017 University of Illinois at Urbana-Champaign.
All rights reserved.
Developed by:

View File

@ -94,7 +94,7 @@ template <typename TagT, typename... MemberTs> class PointerSumType {
return HelperT::template Lookup<N>::TraitsT::getFromVoidPointer(getImpl());
}
operator bool() const { return Value & HelperT::PointerMask; }
explicit operator bool() const { return Value & HelperT::PointerMask; }
bool operator==(const PointerSumType &R) const { return Value == R.Value; }
bool operator!=(const PointerSumType &R) const { return Value != R.Value; }
bool operator<(const PointerSumType &R) const { return Value < R.Value; }

View File

@ -33,6 +33,32 @@ namespace llvm {
/// Another abstraction that this doesn't provide is implementing increment in
/// terms of addition of one. These aren't equivalent for all iterator
/// categories, and respecting that adds a lot of complexity for little gain.
///
/// Classes wishing to use `iterator_facade_base` should implement the following
/// methods:
///
/// Forward Iterators:
/// (All of the following methods)
/// - DerivedT &operator=(const DerivedT &R);
/// - bool operator==(const DerivedT &R) const;
/// - const T &operator*() const;
/// - T &operator*();
/// - DerivedT &operator++();
///
/// Bidirectional Iterators:
/// (All methods of forward iterators, plus the following)
/// - DerivedT &operator--();
///
/// Random-access Iterators:
/// (All methods of bidirectional iterators excluding the following)
/// - DerivedT &operator++();
/// - DerivedT &operator--();
/// (and plus the following)
/// - bool operator<(const DerivedT &RHS) const;
/// - DifferenceTypeT operator-(const DerivedT &R) const;
/// - DerivedT &operator+=(DifferenceTypeT N);
/// - DerivedT &operator-=(DifferenceTypeT N);
///
template <typename DerivedT, typename IteratorCategoryT, typename T,
typename DifferenceTypeT = std::ptrdiff_t, typename PointerT = T *,
typename ReferenceT = T &>

View File

@ -46,6 +46,30 @@ class AssumptionCache {
/// intrinsic.
SmallVector<WeakVH, 4> AssumeHandles;
class AffectedValueCallbackVH final : public CallbackVH {
AssumptionCache *AC;
void deleted() override;
void allUsesReplacedWith(Value *) override;
public:
using DMI = DenseMapInfo<Value *>;
AffectedValueCallbackVH(Value *V, AssumptionCache *AC = nullptr)
: CallbackVH(V), AC(AC) {}
};
friend AffectedValueCallbackVH;
/// \brief A map of values about which an assumption might be providing
/// information to the relevant set of assumptions.
using AffectedValuesMap =
DenseMap<AffectedValueCallbackVH, SmallVector<WeakVH, 1>,
AffectedValueCallbackVH::DMI>;
AffectedValuesMap AffectedValues;
/// Get the vector of assumptions which affect a value from the cache.
SmallVector<WeakVH, 1> &getAffectedValues(Value *V);
/// \brief Flag tracking whether we have scanned the function yet.
///
/// We want to be as lazy about this as possible, and so we scan the function
@ -66,11 +90,16 @@ class AssumptionCache {
/// not already be in the cache.
void registerAssumption(CallInst *CI);
/// \brief Update the cache of values being affected by this assumption (i.e.
/// the values about which this assumption provides information).
void updateAffectedValues(CallInst *CI);
/// \brief Clear the cache of @llvm.assume intrinsics for a function.
///
/// It will be re-scanned the next time it is requested.
void clear() {
AssumeHandles.clear();
AffectedValues.clear();
Scanned = false;
}
@ -87,6 +116,18 @@ class AssumptionCache {
scanFunction();
return AssumeHandles;
}
/// \brief Access the list of assumptions which affect this value.
MutableArrayRef<WeakVH> assumptionsFor(const Value *V) {
if (!Scanned)
scanFunction();
auto AVI = AffectedValues.find_as(const_cast<Value *>(V));
if (AVI == AffectedValues.end())
return MutableArrayRef<WeakVH>();
return AVI->second;
}
};
/// \brief A function analysis which provides an \c AssumptionCache.

View File

@ -15,8 +15,8 @@
#ifndef LLVM_ANALYSIS_IVUSERS_H
#define LLVM_ANALYSIS_IVUSERS_H
#include "llvm/Analysis/LoopAnalysisManager.h"
#include "llvm/Analysis/LoopPass.h"
#include "llvm/Analysis/LoopPassManager.h"
#include "llvm/Analysis/ScalarEvolutionNormalization.h"
#include "llvm/IR/ValueHandle.h"
@ -193,17 +193,10 @@ class IVUsersAnalysis : public AnalysisInfoMixin<IVUsersAnalysis> {
public:
typedef IVUsers Result;
IVUsers run(Loop &L, LoopAnalysisManager &AM);
IVUsers run(Loop &L, LoopAnalysisManager &AM,
LoopStandardAnalysisResults &AR);
};
/// Printer pass for the \c IVUsers for a loop.
class IVUsersPrinterPass : public PassInfoMixin<IVUsersPrinterPass> {
raw_ostream &OS;
public:
explicit IVUsersPrinterPass(raw_ostream &OS) : OS(OS) {}
PreservedAnalyses run(Loop &L, LoopAnalysisManager &AM);
};
}
#endif

View File

@ -148,7 +148,7 @@ class LazyCallGraph {
///
/// This happens when an edge has been deleted. We leave the edge objects
/// around but clear them.
operator bool() const;
explicit operator bool() const;
/// Returnss the \c Kind of the edge.
Kind getKind() const;

View File

@ -20,7 +20,7 @@
#include "llvm/ADT/SetVector.h"
#include "llvm/Analysis/AliasAnalysis.h"
#include "llvm/Analysis/AliasSetTracker.h"
#include "llvm/Analysis/LoopPassManager.h"
#include "llvm/Analysis/LoopAnalysisManager.h"
#include "llvm/Analysis/ScalarEvolutionExpressions.h"
#include "llvm/IR/DiagnosticInfo.h"
#include "llvm/IR/ValueHandle.h"
@ -753,18 +753,8 @@ class LoopAccessAnalysis
public:
typedef LoopAccessInfo Result;
Result run(Loop &, LoopAnalysisManager &);
static StringRef name() { return "LoopAccessAnalysis"; }
};
/// \brief Printer pass for the \c LoopAccessInfo results.
class LoopAccessInfoPrinterPass
: public PassInfoMixin<LoopAccessInfoPrinterPass> {
raw_ostream &OS;
public:
explicit LoopAccessInfoPrinterPass(raw_ostream &OS) : OS(OS) {}
PreservedAnalyses run(Loop &L, LoopAnalysisManager &AM);
Result run(Loop &L, LoopAnalysisManager &AM, LoopStandardAnalysisResults &AR);
};
inline Instruction *MemoryDepChecker::Dependence::getSource(

View File

@ -0,0 +1,155 @@
//===- LoopAnalysisManager.h - Loop analysis management ---------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
/// \file
///
/// This header provides classes for managing per-loop analyses. These are
/// typically used as part of a loop pass pipeline over the loop nests of
/// a function.
///
/// Loop analyses are allowed to make some simplifying assumptions:
/// 1) Loops are, where possible, in simplified form.
/// 2) Loops are *always* in LCSSA form.
/// 3) A collection of analysis results are available:
/// - LoopInfo
/// - DominatorTree
/// - ScalarEvolution
/// - AAManager
///
/// The primary mechanism to provide these invariants is the loop pass manager,
/// but they can also be manually provided in order to reason about a loop from
/// outside of a dedicated pass manager.
///
//===----------------------------------------------------------------------===//
#ifndef LLVM_ANALYSIS_LOOPANALYSISMANAGER_H
#define LLVM_ANALYSIS_LOOPANALYSISMANAGER_H
#include "llvm/ADT/PostOrderIterator.h"
#include "llvm/ADT/PriorityWorklist.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/Analysis/AliasAnalysis.h"
#include "llvm/Analysis/BasicAliasAnalysis.h"
#include "llvm/Analysis/GlobalsModRef.h"
#include "llvm/Analysis/LoopInfo.h"
#include "llvm/Analysis/ScalarEvolution.h"
#include "llvm/Analysis/ScalarEvolutionAliasAnalysis.h"
#include "llvm/Analysis/TargetLibraryInfo.h"
#include "llvm/Analysis/TargetTransformInfo.h"
#include "llvm/IR/Dominators.h"
#include "llvm/IR/PassManager.h"
namespace llvm {
/// The adaptor from a function pass to a loop pass computes these analyses and
/// makes them available to the loop passes "for free". Each loop pass is
/// expected expected to update these analyses if necessary to ensure they're
/// valid after it runs.
struct LoopStandardAnalysisResults {
AAResults &AA;
AssumptionCache &AC;
DominatorTree &DT;
LoopInfo &LI;
ScalarEvolution &SE;
TargetLibraryInfo &TLI;
TargetTransformInfo &TTI;
};
/// Extern template declaration for the analysis set for this IR unit.
extern template class AllAnalysesOn<Loop>;
extern template class AnalysisManager<Loop, LoopStandardAnalysisResults &>;
/// \brief The loop analysis manager.
///
/// See the documentation for the AnalysisManager template for detail
/// documentation. This typedef serves as a convenient way to refer to this
/// construct in the adaptors and proxies used to integrate this into the larger
/// pass manager infrastructure.
typedef AnalysisManager<Loop, LoopStandardAnalysisResults &>
LoopAnalysisManager;
/// A proxy from a \c LoopAnalysisManager to a \c Function.
typedef InnerAnalysisManagerProxy<LoopAnalysisManager, Function>
LoopAnalysisManagerFunctionProxy;
/// A specialized result for the \c LoopAnalysisManagerFunctionProxy which
/// retains a \c LoopInfo reference.
///
/// This allows it to collect loop objects for which analysis results may be
/// cached in the \c LoopAnalysisManager.
template <> class LoopAnalysisManagerFunctionProxy::Result {
public:
explicit Result(LoopAnalysisManager &InnerAM, LoopInfo &LI)
: InnerAM(&InnerAM), LI(&LI) {}
Result(Result &&Arg) : InnerAM(std::move(Arg.InnerAM)), LI(Arg.LI) {
// We have to null out the analysis manager in the moved-from state
// because we are taking ownership of the responsibilty to clear the
// analysis state.
Arg.InnerAM = nullptr;
}
Result &operator=(Result &&RHS) {
InnerAM = RHS.InnerAM;
LI = RHS.LI;
// We have to null out the analysis manager in the moved-from state
// because we are taking ownership of the responsibilty to clear the
// analysis state.
RHS.InnerAM = nullptr;
return *this;
}
~Result() {
// InnerAM is cleared in a moved from state where there is nothing to do.
if (!InnerAM)
return;
// Clear out the analysis manager if we're being destroyed -- it means we
// didn't even see an invalidate call when we got invalidated.
InnerAM->clear();
}
/// Accessor for the analysis manager.
LoopAnalysisManager &getManager() { return *InnerAM; }
/// Handler for invalidation of the proxy for a particular function.
///
/// If the proxy, \c LoopInfo, and associated analyses are preserved, this
/// will merely forward the invalidation event to any cached loop analysis
/// results for loops within this function.
///
/// If the necessary loop infrastructure is not preserved, this will forcibly
/// clear all of the cached analysis results that are keyed on the \c
/// LoopInfo for this function.
bool invalidate(Function &F, const PreservedAnalyses &PA,
FunctionAnalysisManager::Invalidator &Inv);
private:
LoopAnalysisManager *InnerAM;
LoopInfo *LI;
};
/// Provide a specialized run method for the \c LoopAnalysisManagerFunctionProxy
/// so it can pass the \c LoopInfo to the result.
template <>
LoopAnalysisManagerFunctionProxy::Result
LoopAnalysisManagerFunctionProxy::run(Function &F, FunctionAnalysisManager &AM);
// Ensure the \c LoopAnalysisManagerFunctionProxy is provided as an extern
// template.
extern template class InnerAnalysisManagerProxy<LoopAnalysisManager, Function>;
extern template class OuterAnalysisManagerProxy<FunctionAnalysisManager, Loop,
LoopStandardAnalysisResults &>;
/// A proxy from a \c FunctionAnalysisManager to a \c Loop.
typedef OuterAnalysisManagerProxy<FunctionAnalysisManager, Loop,
LoopStandardAnalysisResults &>
FunctionAnalysisManagerLoopProxy;
/// Returns the minimum set of Analyses that all loop passes must preserve.
PreservedAnalyses getLoopPassPreservedAnalyses();
}
#endif // LLVM_ANALYSIS_LOOPANALYSISMANAGER_H

View File

@ -853,17 +853,8 @@ class LoopInfoWrapperPass : public FunctionPass {
void getAnalysisUsage(AnalysisUsage &AU) const override;
};
/// \brief Pass for printing a loop's contents as LLVM's text IR assembly.
class PrintLoopPass : public PassInfoMixin<PrintLoopPass> {
raw_ostream &OS;
std::string Banner;
public:
PrintLoopPass();
PrintLoopPass(raw_ostream &OS, const std::string &Banner = "");
PreservedAnalyses run(Loop &L, AnalysisManager<Loop> &);
};
/// Function to print a loop's contents as LLVM's text IR assembly.
void printLoop(Loop &L, raw_ostream &OS, const std::string &Banner = "");
} // End llvm namespace

View File

@ -1,149 +0,0 @@
//===- LoopPassManager.h - Loop pass management -----------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
/// \file
///
/// This header provides classes for managing passes over loops in LLVM IR.
///
//===----------------------------------------------------------------------===//
#ifndef LLVM_ANALYSIS_LOOPPASSMANAGER_H
#define LLVM_ANALYSIS_LOOPPASSMANAGER_H
#include "llvm/ADT/STLExtras.h"
#include "llvm/Analysis/AliasAnalysis.h"
#include "llvm/Analysis/LoopInfo.h"
#include "llvm/Analysis/ScalarEvolution.h"
#include "llvm/Analysis/TargetLibraryInfo.h"
#include "llvm/IR/Dominators.h"
#include "llvm/IR/PassManager.h"
namespace llvm {
extern template class PassManager<Loop>;
/// \brief The loop pass manager.
///
/// See the documentation for the PassManager template for details. It runs a
/// sequency of loop passes over each loop that the manager is run over. This
/// typedef serves as a convenient way to refer to this construct.
typedef PassManager<Loop> LoopPassManager;
extern template class AnalysisManager<Loop>;
/// \brief The loop analysis manager.
///
/// See the documentation for the AnalysisManager template for detail
/// documentation. This typedef serves as a convenient way to refer to this
/// construct in the adaptors and proxies used to integrate this into the larger
/// pass manager infrastructure.
typedef AnalysisManager<Loop> LoopAnalysisManager;
/// A proxy from a \c LoopAnalysisManager to a \c Function.
typedef InnerAnalysisManagerProxy<LoopAnalysisManager, Function>
LoopAnalysisManagerFunctionProxy;
/// Specialization of the invalidate method for the \c
/// LoopAnalysisManagerFunctionProxy's result.
template <>
bool LoopAnalysisManagerFunctionProxy::Result::invalidate(
Function &F, const PreservedAnalyses &PA,
FunctionAnalysisManager::Invalidator &Inv);
// Ensure the \c LoopAnalysisManagerFunctionProxy is provided as an extern
// template.
extern template class InnerAnalysisManagerProxy<LoopAnalysisManager, Function>;
extern template class OuterAnalysisManagerProxy<FunctionAnalysisManager, Loop>;
/// A proxy from a \c FunctionAnalysisManager to a \c Loop.
typedef OuterAnalysisManagerProxy<FunctionAnalysisManager, Loop>
FunctionAnalysisManagerLoopProxy;
/// Returns the minimum set of Analyses that all loop passes must preserve.
PreservedAnalyses getLoopPassPreservedAnalyses();
/// \brief Adaptor that maps from a function to its loops.
///
/// Designed to allow composition of a LoopPass(Manager) and a
/// FunctionPassManager. Note that if this pass is constructed with a \c
/// FunctionAnalysisManager it will run the \c LoopAnalysisManagerFunctionProxy
/// analysis prior to running the loop passes over the function to enable a \c
/// LoopAnalysisManager to be used within this run safely.
template <typename LoopPassT>
class FunctionToLoopPassAdaptor
: public PassInfoMixin<FunctionToLoopPassAdaptor<LoopPassT>> {
public:
explicit FunctionToLoopPassAdaptor(LoopPassT Pass)
: Pass(std::move(Pass)) {}
/// \brief Runs the loop passes across every loop in the function.
PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM) {
// Setup the loop analysis manager from its proxy.
LoopAnalysisManager &LAM =
AM.getResult<LoopAnalysisManagerFunctionProxy>(F).getManager();
// Get the loop structure for this function
LoopInfo &LI = AM.getResult<LoopAnalysis>(F);
// Also precompute all of the function analyses used by loop passes.
// FIXME: These should be handed into the loop passes when the loop pass
// management layer is reworked to follow the design of CGSCC.
(void)AM.getResult<AAManager>(F);
(void)AM.getResult<DominatorTreeAnalysis>(F);
(void)AM.getResult<ScalarEvolutionAnalysis>(F);
(void)AM.getResult<TargetLibraryAnalysis>(F);
PreservedAnalyses PA = PreservedAnalyses::all();
// We want to visit the loops in reverse post-order. We'll build the stack
// of loops to visit in Loops by first walking the loops in pre-order.
SmallVector<Loop *, 2> Loops;
SmallVector<Loop *, 2> WorkList(LI.begin(), LI.end());
while (!WorkList.empty()) {
Loop *L = WorkList.pop_back_val();
WorkList.insert(WorkList.end(), L->begin(), L->end());
Loops.push_back(L);
}
// Now pop each element off of the stack to visit the loops in reverse
// post-order.
for (auto *L : reverse(Loops)) {
PreservedAnalyses PassPA = Pass.run(*L, LAM);
// FIXME: We should verify the set of analyses relevant to Loop passes
// are preserved.
// We know that the loop pass couldn't have invalidated any other loop's
// analyses (that's the contract of a loop pass), so directly handle the
// loop analysis manager's invalidation here.
LAM.invalidate(*L, PassPA);
// Then intersect the preserved set so that invalidation of module
// analyses will eventually occur when the module pass completes.
PA.intersect(std::move(PassPA));
}
// By definition we preserve the proxy. We also preserve all analyses on
// Loops. This precludes *any* invalidation of loop analyses by the proxy,
// but that's OK because we've taken care to invalidate analyses in the
// loop analysis manager incrementally above.
PA.preserveSet<AllAnalysesOn<Loop>>();
PA.preserve<LoopAnalysisManagerFunctionProxy>();
return PA;
}
private:
LoopPassT Pass;
};
/// \brief A function to deduce a loop pass type and wrap it in the templated
/// adaptor.
template <typename LoopPassT>
FunctionToLoopPassAdaptor<LoopPassT>
createFunctionToLoopPassAdaptor(LoopPassT Pass) {
return FunctionToLoopPassAdaptor<LoopPassT>(std::move(Pass));
}
}
#endif // LLVM_ANALYSIS_LOOPPASSMANAGER_H

View File

@ -302,6 +302,10 @@ class MemoryDependenceResults {
NonLocalPointerInfo() : Size(MemoryLocation::UnknownSize) {}
};
/// Cache storing single nonlocal def for the instruction.
/// It is set when nonlocal def would be found in function returning only
/// local dependencies.
DenseMap<Instruction *, NonLocalDepResult> NonLocalDefsCache;
/// This map stores the cached results of doing a pointer lookup at the
/// bottom of a block.
///
@ -441,9 +445,9 @@ class MemoryDependenceResults {
/// This analysis looks for other loads and stores with invariant.group
/// metadata and the same pointer operand. Returns Unknown if it does not
/// find anything, and Def if it can be assumed that 2 instructions load or
/// store the same value.
/// FIXME: This analysis works only on single block because of restrictions
/// at the call site.
/// store the same value and NonLocal which indicate that non-local Def was
/// found, which can be retrieved by calling getNonLocalPointerDependency
/// with the same queried instruction.
MemDepResult getInvariantGroupPointerDependency(LoadInst *LI, BasicBlock *BB);
/// Looks at a memory location for a load (specified by MemLocBase, Offs, and

View File

@ -55,6 +55,11 @@ struct MemIntrinsicInfo {
// Same Id is set by the target for corresponding load/store intrinsics.
unsigned short MatchingId;
int NumMemRefs;
/// This is the pointer that the intrinsic is loading from or storing to.
/// If this is non-null, then analysis/optimization passes can assume that
/// this intrinsic is functionally equivalent to a load/store from this
/// pointer.
Value *PtrVal;
};
@ -518,11 +523,15 @@ class TargetTransformInfo {
unsigned getMaxInterleaveFactor(unsigned VF) const;
/// \return The expected cost of arithmetic ops, such as mul, xor, fsub, etc.
/// \p Args is an optional argument which holds the instruction operands
/// values so the TTI can analyize those values searching for special
/// cases\optimizations based on those values.
int getArithmeticInstrCost(
unsigned Opcode, Type *Ty, OperandValueKind Opd1Info = OK_AnyValue,
OperandValueKind Opd2Info = OK_AnyValue,
OperandValueProperties Opd1PropInfo = OP_None,
OperandValueProperties Opd2PropInfo = OP_None) const;
OperandValueProperties Opd2PropInfo = OP_None,
ArrayRef<const Value *> Args = ArrayRef<const Value *>()) const;
/// \return The cost of a shuffle instruction of kind Kind and of type Tp.
/// The index and subtype parameters are used by the subvector insertion and
@ -763,7 +772,8 @@ class TargetTransformInfo::Concept {
getArithmeticInstrCost(unsigned Opcode, Type *Ty, OperandValueKind Opd1Info,
OperandValueKind Opd2Info,
OperandValueProperties Opd1PropInfo,
OperandValueProperties Opd2PropInfo) = 0;
OperandValueProperties Opd2PropInfo,
ArrayRef<const Value *> Args) = 0;
virtual int getShuffleCost(ShuffleKind Kind, Type *Tp, int Index,
Type *SubTp) = 0;
virtual int getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src) = 0;
@ -984,9 +994,10 @@ class TargetTransformInfo::Model final : public TargetTransformInfo::Concept {
getArithmeticInstrCost(unsigned Opcode, Type *Ty, OperandValueKind Opd1Info,
OperandValueKind Opd2Info,
OperandValueProperties Opd1PropInfo,
OperandValueProperties Opd2PropInfo) override {
OperandValueProperties Opd2PropInfo,
ArrayRef<const Value *> Args) override {
return Impl.getArithmeticInstrCost(Opcode, Ty, Opd1Info, Opd2Info,
Opd1PropInfo, Opd2PropInfo);
Opd1PropInfo, Opd2PropInfo, Args);
}
int getShuffleCost(ShuffleKind Kind, Type *Tp, int Index,
Type *SubTp) override {

View File

@ -306,7 +306,8 @@ class TargetTransformInfoImplBase {
TTI::OperandValueKind Opd1Info,
TTI::OperandValueKind Opd2Info,
TTI::OperandValueProperties Opd1PropInfo,
TTI::OperandValueProperties Opd2PropInfo) {
TTI::OperandValueProperties Opd2PropInfo,
ArrayRef<const Value *> Args) {
return 1;
}
@ -427,6 +428,63 @@ class TargetTransformInfoImplBase {
return VF;
}
protected:
// Obtain the minimum required size to hold the value (without the sign)
// In case of a vector it returns the min required size for one element.
unsigned minRequiredElementSize(const Value* Val, bool &isSigned) {
if (isa<ConstantDataVector>(Val) || isa<ConstantVector>(Val)) {
const auto* VectorValue = cast<Constant>(Val);
// In case of a vector need to pick the max between the min
// required size for each element
auto *VT = cast<VectorType>(Val->getType());
// Assume unsigned elements
isSigned = false;
// The max required size is the total vector width divided by num
// of elements in the vector
unsigned MaxRequiredSize = VT->getBitWidth() / VT->getNumElements();
unsigned MinRequiredSize = 0;
for(unsigned i = 0, e = VT->getNumElements(); i < e; ++i) {
if (auto* IntElement =
dyn_cast<ConstantInt>(VectorValue->getAggregateElement(i))) {
bool signedElement = IntElement->getValue().isNegative();
// Get the element min required size.
unsigned ElementMinRequiredSize =
IntElement->getValue().getMinSignedBits() - 1;
// In case one element is signed then all the vector is signed.
isSigned |= signedElement;
// Save the max required bit size between all the elements.
MinRequiredSize = std::max(MinRequiredSize, ElementMinRequiredSize);
}
else {
// not an int constant element
return MaxRequiredSize;
}
}
return MinRequiredSize;
}
if (const auto* CI = dyn_cast<ConstantInt>(Val)) {
isSigned = CI->getValue().isNegative();
return CI->getValue().getMinSignedBits() - 1;
}
if (const auto* Cast = dyn_cast<SExtInst>(Val)) {
isSigned = true;
return Cast->getSrcTy()->getScalarSizeInBits() - 1;
}
if (const auto* Cast = dyn_cast<ZExtInst>(Val)) {
isSigned = false;
return Cast->getSrcTy()->getScalarSizeInBits();
}
isSigned = false;
return Val->getType()->getScalarSizeInBits();
}
bool isStridedAccess(const SCEV *Ptr) {
return Ptr && isa<SCEVAddRecExpr>(Ptr);
}

View File

@ -169,8 +169,12 @@ template <typename T> class ArrayRef;
/// Return true if we can prove that the specified FP value is either a NaN or
/// never less than 0.0.
bool CannotBeOrderedLessThanZero(const Value *V, const TargetLibraryInfo *TLI,
unsigned Depth = 0);
/// If \p IncludeNeg0 is false, -0.0 is considered less than 0.0.
bool CannotBeOrderedLessThanZero(const Value *V, const TargetLibraryInfo *TLI);
/// \returns true if we can prove that the specified FP value has a 0 sign
/// bit.
bool SignBitMustBeZero(const Value *V, const TargetLibraryInfo *TLI);
/// If the specified value can be set by repeating the same byte in memory,
/// return the i8 value that it is represented with. This is true for all i8

View File

@ -308,7 +308,8 @@ class BasicTTIImplBase : public TargetTransformInfoImplCRTPBase<T> {
TTI::OperandValueKind Opd1Info = TTI::OK_AnyValue,
TTI::OperandValueKind Opd2Info = TTI::OK_AnyValue,
TTI::OperandValueProperties Opd1PropInfo = TTI::OP_None,
TTI::OperandValueProperties Opd2PropInfo = TTI::OP_None) {
TTI::OperandValueProperties Opd2PropInfo = TTI::OP_None,
ArrayRef<const Value *> Args = ArrayRef<const Value *>()) {
// Check if any of the operands are vector operands.
const TargetLoweringBase *TLI = getTLI();
int ISD = TLI->InstructionOpcodeToISD(Opcode);

View File

@ -52,13 +52,20 @@ class DIEAbbrevData {
/// Dwarf form code.
dwarf::Form Form;
/// Dwarf attribute value for DW_FORM_implicit_const
int64_t Value;
public:
DIEAbbrevData(dwarf::Attribute A, dwarf::Form F) : Attribute(A), Form(F) {}
DIEAbbrevData(dwarf::Attribute A, dwarf::Form F)
: Attribute(A), Form(F), Value(0) {}
DIEAbbrevData(dwarf::Attribute A, int64_t V)
: Attribute(A), Form(dwarf::DW_FORM_implicit_const), Value(V) {}
/// Accessors.
/// @{
dwarf::Attribute getAttribute() const { return Attribute; }
dwarf::Form getForm() const { return Form; }
int64_t getValue() const { return Value; }
/// @}
/// Used to gather unique data for the abbreviation folding set.
@ -102,6 +109,11 @@ class DIEAbbrev : public FoldingSetNode {
Data.push_back(DIEAbbrevData(Attribute, Form));
}
/// Adds attribute with DW_FORM_implicit_const value
void AddImplicitConstAttribute(dwarf::Attribute Attribute, int64_t Value) {
Data.push_back(DIEAbbrevData(Attribute, Value));
}
/// Used to gather unique data for the abbreviation folding set.
void Profile(FoldingSetNodeID &ID) const;

View File

@ -76,6 +76,7 @@ class MachineBlockFrequencyInfo;
class MachineRegisterInfo;
class TargetPassConfig;
class TargetRegisterInfo;
class raw_ostream;
/// This pass implements the reg bank selector pass used in the GlobalISel
/// pipeline. At the end of this pass, all register operands have been assigned
@ -450,6 +451,18 @@ class RegBankSelect : public MachineFunctionPass {
bool operator>(const MappingCost &Cost) const {
return *this != Cost && Cost < *this;
}
/// Print this on dbgs() stream.
void dump() const;
/// Print this on \p OS;
void print(raw_ostream &OS) const;
/// Overload the stream operator for easy debug printing.
friend raw_ostream &operator<<(raw_ostream &OS, const MappingCost &Cost) {
Cost.print(OS);
return OS;
}
};
/// Interface to the target lowering info related
@ -626,6 +639,7 @@ class RegBankSelect : public MachineFunctionPass {
/// \endcode
bool runOnMachineFunction(MachineFunction &MF) override;
};
} // End namespace llvm.
#endif

View File

@ -41,11 +41,8 @@ class RegisterBank {
friend RegisterBankInfo;
public:
/// The default constructor will leave the object in
/// an invalid state. I.e. isValid() == false.
/// The fields must be updated to fix that and only
/// RegisterBankInfo instances are allowed to do that
RegisterBank();
RegisterBank(unsigned ID, const char *Name, unsigned Size,
const uint32_t *ContainedRegClasses);
/// Get the identifier of this register bank.
unsigned getID() const { return ID; }

View File

@ -384,10 +384,6 @@ class RegisterBankInfo {
/// Create a RegisterBankInfo that can accomodate up to \p NumRegBanks
/// RegisterBank instances.
///
/// \note For the verify method to succeed all the \p NumRegBanks
/// must be initialized by createRegisterBank and updated with
/// addRegBankCoverage RegisterBank.
RegisterBankInfo(RegisterBank **RegBanks, unsigned NumRegBanks);
/// This constructor is meaningless.
@ -400,31 +396,6 @@ class RegisterBankInfo {
llvm_unreachable("This constructor should not be executed");
}
/// Create a new register bank with the given parameter and add it
/// to RegBanks.
/// \pre \p ID must not already be used.
/// \pre \p ID < NumRegBanks.
void createRegisterBank(unsigned ID, const char *Name);
/// Add \p RCId to the set of register class that the register bank,
/// identified \p ID, covers.
/// This method transitively adds all the sub classes and the subreg-classes
/// of \p RCId to the set of covered register classes.
/// It also adjusts the size of the register bank to reflect the maximal
/// size of a value that can be hold into that register bank.
///
/// \note This method does *not* add the super classes of \p RCId.
/// The rationale is if \p ID covers the registers of \p RCId, that
/// does not necessarily mean that \p ID covers the set of registers
/// of RCId's superclasses.
/// This method does *not* add the superreg classes as well for consistents.
/// The expected use is to add the coverage top-down with respect to the
/// register hierarchy.
///
/// \todo TableGen should just generate the BitSet vector for us.
void addRegBankCoverage(unsigned ID, unsigned RCId,
const TargetRegisterInfo &TRI);
/// Get the register bank identified by \p ID.
RegisterBank &getRegBank(unsigned ID) {
assert(ID < getNumRegBanks() && "Accessing an unknown register bank");

View File

@ -503,19 +503,6 @@ namespace ISD {
/// address spaces.
ADDRSPACECAST,
/// CONVERT_RNDSAT - This operator is used to support various conversions
/// between various types (float, signed, unsigned and vectors of those
/// types) with rounding and saturation. NOTE: Avoid using this operator as
/// most target don't support it and the operator might be removed in the
/// future. It takes the following arguments:
/// 0) value
/// 1) dest type (type to convert to)
/// 2) src type (type to convert from)
/// 3) rounding imm
/// 4) saturation imm
/// 5) ISD::CvtCode indicating the type of conversion to do
CONVERT_RNDSAT,
/// FP16_TO_FP, FP_TO_FP16 - These operators are used to perform promotions
/// and truncation for half-precision (16 bit) floating numbers. These nodes
/// form a semi-softened interface for dealing with f16 (as an i16), which
@ -927,21 +914,6 @@ namespace ISD {
/// SETCC_INVALID if it is not possible to represent the resultant comparison.
CondCode getSetCCAndOperation(CondCode Op1, CondCode Op2, bool isInteger);
//===--------------------------------------------------------------------===//
/// This enum defines the various converts CONVERT_RNDSAT supports.
enum CvtCode {
CVT_FF, /// Float from Float
CVT_FS, /// Float from Signed
CVT_FU, /// Float from Unsigned
CVT_SF, /// Signed from Float
CVT_UF, /// Unsigned from Float
CVT_SS, /// Signed from Signed
CVT_SU, /// Signed from Unsigned
CVT_US, /// Unsigned from Signed
CVT_UU, /// Unsigned from Unsigned
CVT_INVALID /// Marker - Invalid opcode
};
} // end llvm::ISD namespace
} // end llvm namespace

View File

@ -626,12 +626,6 @@ class SelectionDAG {
SDValue getCondCode(ISD::CondCode Cond);
/// Returns the ConvertRndSat Note: Avoid using this node because it may
/// disappear in the future and most targets don't support it.
SDValue getConvertRndSat(EVT VT, const SDLoc &dl, SDValue Val, SDValue DTy,
SDValue STy, SDValue Rnd, SDValue Sat,
ISD::CvtCode Code);
/// Return an ISD::VECTOR_SHUFFLE node. The number of elements in VT,
/// which must be a vector type, must match the number of mask elements
/// NumElts. An integer mask element equal to -1 is treated as undefined.

View File

@ -1860,26 +1860,6 @@ class CondCodeSDNode : public SDNode {
}
};
/// NOTE: avoid using this node as this may disappear in the
/// future and most targets don't support it.
class CvtRndSatSDNode : public SDNode {
ISD::CvtCode CvtCode;
friend class SelectionDAG;
explicit CvtRndSatSDNode(EVT VT, unsigned Order, const DebugLoc &dl,
ISD::CvtCode Code)
: SDNode(ISD::CONVERT_RNDSAT, Order, dl, getSDVTList(VT)), CvtCode(Code) {
}
public:
ISD::CvtCode getCvtCode() const { return CvtCode; }
static bool classof(const SDNode *N) {
return N->getOpcode() == ISD::CONVERT_RNDSAT;
}
};
/// This class is used to represent EVT's, which are used
/// to parameterize some operations.
class VTSDNode : public SDNode {
@ -2041,7 +2021,7 @@ class MaskedStoreSDNode : public MaskedLoadStoreSDNode {
friend class SelectionDAG;
MaskedStoreSDNode(unsigned Order, const DebugLoc &dl, SDVTList VTs,
bool isTrunc, bool isCompressing, EVT MemVT,
bool isTrunc, bool isCompressing, EVT MemVT,
MachineMemOperand *MMO)
: MaskedLoadStoreSDNode(ISD::MSTORE, Order, dl, VTs, MemVT, MMO) {
StoreSDNodeBits.IsTruncating = isTrunc;
@ -2054,8 +2034,8 @@ class MaskedStoreSDNode : public MaskedLoadStoreSDNode {
bool isTruncatingStore() const { return StoreSDNodeBits.IsTruncating; }
/// Returns true if the op does a compression to the vector before storing.
/// The node contiguously stores the active elements (integers or floats)
/// in src (those with their respective bit set in writemask k) to unaligned
/// The node contiguously stores the active elements (integers or floats)
/// in src (those with their respective bit set in writemask k) to unaligned
/// memory at base_addr.
bool isCompressingStore() const { return StoreSDNodeBits.IsCompressing; }

View File

@ -0,0 +1,56 @@
//===-- CVTypeDumper.h - CodeView type info dumper --------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_DEBUGINFO_CODEVIEW_CVTYPEDUMPER_H
#define LLVM_DEBUGINFO_CODEVIEW_CVTYPEDUMPER_H
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/StringSet.h"
#include "llvm/DebugInfo/CodeView/TypeDatabase.h"
#include "llvm/DebugInfo/CodeView/TypeIndex.h"
#include "llvm/DebugInfo/CodeView/TypeRecord.h"
#include "llvm/DebugInfo/CodeView/TypeVisitorCallbacks.h"
#include "llvm/Support/ScopedPrinter.h"
namespace llvm {
namespace codeview {
/// Dumper for CodeView type streams found in COFF object files and PDB files.
class CVTypeDumper {
public:
explicit CVTypeDumper(TypeDatabase &TypeDB) : TypeDB(TypeDB) {}
/// Dumps one type record. Returns false if there was a type parsing error,
/// and true otherwise. This should be called in order, since the dumper
/// maintains state about previous records which are necessary for cross
/// type references.
Error dump(const CVType &Record, TypeVisitorCallbacks &Dumper);
/// Dumps the type records in Types. Returns false if there was a type stream
/// parse error, and true otherwise.
Error dump(const CVTypeArray &Types, TypeVisitorCallbacks &Dumper);
/// Dumps the type records in Data. Returns false if there was a type stream
/// parse error, and true otherwise. Use this method instead of the
/// CVTypeArray overload when type records are laid out contiguously in
/// memory.
Error dump(ArrayRef<uint8_t> Data, TypeVisitorCallbacks &Dumper);
static void printTypeIndex(ScopedPrinter &Printer, StringRef FieldName,
TypeIndex TI, TypeDatabase &DB);
private:
TypeDatabase &TypeDB;
};
} // end namespace codeview
} // end namespace llvm
#endif // LLVM_DEBUGINFO_CODEVIEW_TYPEDUMPER_H

View File

@ -20,15 +20,15 @@ namespace llvm {
class ScopedPrinter;
namespace codeview {
class CVTypeDumper;
class TypeDatabase;
/// Dumper for CodeView symbol streams found in COFF object files and PDB files.
class CVSymbolDumper {
public:
CVSymbolDumper(ScopedPrinter &W, CVTypeDumper &CVTD,
CVSymbolDumper(ScopedPrinter &W, TypeDatabase &TypeDB,
std::unique_ptr<SymbolDumpDelegate> ObjDelegate,
bool PrintRecordBytes)
: W(W), CVTD(CVTD), ObjDelegate(std::move(ObjDelegate)),
: W(W), TypeDB(TypeDB), ObjDelegate(std::move(ObjDelegate)),
PrintRecordBytes(PrintRecordBytes) {}
/// Dumps one type record. Returns false if there was a type parsing error,
@ -43,7 +43,7 @@ class CVSymbolDumper {
private:
ScopedPrinter &W;
CVTypeDumper &CVTD;
TypeDatabase &TypeDB;
std::unique_ptr<SymbolDumpDelegate> ObjDelegate;
bool PrintRecordBytes;

View File

@ -0,0 +1,55 @@
//===- TypeDatabase.h - A collection of CodeView type records ---*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_DEBUGINFO_CODEVIEW_TYPEDATABASE_H
#define LLVM_DEBUGINFO_CODEVIEW_TYPEDATABASE_H
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/DebugInfo/CodeView/TypeIndex.h"
#include "llvm/DebugInfo/CodeView/TypeRecord.h"
#include "llvm/Support/Allocator.h"
#include "llvm/Support/StringSaver.h"
namespace llvm {
namespace codeview {
class TypeDatabase {
public:
TypeDatabase() : TypeNameStorage(Allocator) {}
/// Gets the type index for the next type record.
TypeIndex getNextTypeIndex() const;
/// Records the name of a type, and reserves its type index.
void recordType(StringRef Name, CVType Data);
/// Saves the name in a StringSet and creates a stable StringRef.
StringRef saveTypeName(StringRef TypeName);
StringRef getTypeName(TypeIndex Index) const;
bool containsTypeIndex(TypeIndex Index) const;
uint32_t size() const;
private:
BumpPtrAllocator Allocator;
/// All user defined type records in .debug$T live in here. Type indices
/// greater than 0x1000 are user defined. Subtract 0x1000 from the index to
/// index into this vector.
SmallVector<StringRef, 10> CVUDTNames;
SmallVector<CVType, 10> TypeRecords;
StringSaver TypeNameStorage;
};
}
}
#endif

View File

@ -0,0 +1,53 @@
//===-- TypeDatabaseVisitor.h -----------------------------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_DEBUGINFO_CODEVIEW_TYPEDATABASEVISITOR_H
#define LLVM_DEBUGINFO_CODEVIEW_TYPEDATABASEVISITOR_H
#include "llvm/DebugInfo/CodeView/TypeDatabase.h"
#include "llvm/DebugInfo/CodeView/TypeIndex.h"
#include "llvm/DebugInfo/CodeView/TypeRecord.h"
#include "llvm/DebugInfo/CodeView/TypeVisitorCallbacks.h"
namespace llvm {
namespace codeview {
/// Dumper for CodeView type streams found in COFF object files and PDB files.
class TypeDatabaseVisitor : public TypeVisitorCallbacks {
public:
explicit TypeDatabaseVisitor(TypeDatabase &TypeDB) : TypeDB(TypeDB) {}
/// Paired begin/end actions for all types. Receives all record data,
/// including the fixed-length record prefix.
Error visitTypeBegin(CVType &Record) override;
Error visitTypeEnd(CVType &Record) override;
Error visitMemberBegin(CVMemberRecord &Record) override;
Error visitMemberEnd(CVMemberRecord &Record) override;
#define TYPE_RECORD(EnumName, EnumVal, Name) \
Error visitKnownRecord(CVType &CVR, Name##Record &Record) override;
#define MEMBER_RECORD(EnumName, EnumVal, Name) \
Error visitKnownMember(CVMemberRecord &CVR, Name##Record &Record) override;
#define TYPE_RECORD_ALIAS(EnumName, EnumVal, Name, AliasName)
#define MEMBER_RECORD_ALIAS(EnumName, EnumVal, Name, AliasName)
#include "TypeRecords.def"
private:
bool IsInFieldList = false;
/// Name of the current type. Only valid before visitTypeEnd.
StringRef Name;
TypeDatabase &TypeDB;
};
} // end namespace codeview
} // end namespace llvm
#endif // LLVM_DEBUGINFO_CODEVIEW_TYPEDUMPER_H

View File

@ -0,0 +1,67 @@
//===-- TypeDumpVisitor.h - CodeView type info dumper -----------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_DEBUGINFO_CODEVIEW_TYPEDUMPVISITOR_H
#define LLVM_DEBUGINFO_CODEVIEW_TYPEDUMPVISITOR_H
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/StringSet.h"
#include "llvm/DebugInfo/CodeView/TypeDatabase.h"
#include "llvm/DebugInfo/CodeView/TypeIndex.h"
#include "llvm/DebugInfo/CodeView/TypeRecord.h"
#include "llvm/DebugInfo/CodeView/TypeVisitorCallbacks.h"
namespace llvm {
class ScopedPrinter;
namespace codeview {
/// Dumper for CodeView type streams found in COFF object files and PDB files.
class TypeDumpVisitor : public TypeVisitorCallbacks {
public:
TypeDumpVisitor(TypeDatabase &TypeDB, ScopedPrinter *W, bool PrintRecordBytes)
: W(W), PrintRecordBytes(PrintRecordBytes), TypeDB(TypeDB) {}
void printTypeIndex(StringRef FieldName, TypeIndex TI) const;
/// Action to take on unknown types. By default, they are ignored.
Error visitUnknownType(CVType &Record) override;
Error visitUnknownMember(CVMemberRecord &Record) override;
/// Paired begin/end actions for all types. Receives all record data,
/// including the fixed-length record prefix.
Error visitTypeBegin(CVType &Record) override;
Error visitTypeEnd(CVType &Record) override;
Error visitMemberBegin(CVMemberRecord &Record) override;
Error visitMemberEnd(CVMemberRecord &Record) override;
#define TYPE_RECORD(EnumName, EnumVal, Name) \
Error visitKnownRecord(CVType &CVR, Name##Record &Record) override;
#define MEMBER_RECORD(EnumName, EnumVal, Name) \
Error visitKnownMember(CVMemberRecord &CVR, Name##Record &Record) override;
#define TYPE_RECORD_ALIAS(EnumName, EnumVal, Name, AliasName)
#define MEMBER_RECORD_ALIAS(EnumName, EnumVal, Name, AliasName)
#include "TypeRecords.def"
private:
void printMemberAttributes(MemberAttributes Attrs);
void printMemberAttributes(MemberAccess Access, MethodKind Kind,
MethodOptions Options);
ScopedPrinter *W;
bool PrintRecordBytes = false;
TypeDatabase &TypeDB;
};
} // end namespace codeview
} // end namespace llvm
#endif

View File

@ -1,108 +0,0 @@
//===-- TypeDumper.h - CodeView type info dumper ----------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_DEBUGINFO_CODEVIEW_TYPEDUMPER_H
#define LLVM_DEBUGINFO_CODEVIEW_TYPEDUMPER_H
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/StringSet.h"
#include "llvm/DebugInfo/CodeView/TypeIndex.h"
#include "llvm/DebugInfo/CodeView/TypeRecord.h"
#include "llvm/DebugInfo/CodeView/TypeVisitorCallbacks.h"
namespace llvm {
class ScopedPrinter;
namespace codeview {
/// Dumper for CodeView type streams found in COFF object files and PDB files.
class CVTypeDumper : public TypeVisitorCallbacks {
public:
CVTypeDumper(ScopedPrinter *W, bool PrintRecordBytes)
: W(W), PrintRecordBytes(PrintRecordBytes) {}
StringRef getTypeName(TypeIndex TI);
void printTypeIndex(StringRef FieldName, TypeIndex TI);
/// Dumps one type record. Returns false if there was a type parsing error,
/// and true otherwise. This should be called in order, since the dumper
/// maintains state about previous records which are necessary for cross
/// type references.
Error dump(const CVRecord<TypeLeafKind> &Record);
/// Dumps the type records in Types. Returns false if there was a type stream
/// parse error, and true otherwise.
Error dump(const CVTypeArray &Types);
/// Dumps the type records in Data. Returns false if there was a type stream
/// parse error, and true otherwise. Use this method instead of the
/// CVTypeArray overload when type records are laid out contiguously in
/// memory.
Error dump(ArrayRef<uint8_t> Data);
/// Gets the type index for the next type record.
unsigned getNextTypeIndex() const {
return 0x1000 + CVUDTNames.size();
}
/// Records the name of a type, and reserves its type index.
void recordType(StringRef Name) { CVUDTNames.push_back(Name); }
/// Saves the name in a StringSet and creates a stable StringRef.
StringRef saveName(StringRef TypeName) {
return TypeNames.insert(TypeName).first->getKey();
}
void setPrinter(ScopedPrinter *P);
ScopedPrinter *getPrinter() { return W; }
/// Action to take on unknown types. By default, they are ignored.
Error visitUnknownType(CVType &Record) override;
Error visitUnknownMember(CVMemberRecord &Record) override;
/// Paired begin/end actions for all types. Receives all record data,
/// including the fixed-length record prefix.
Error visitTypeBegin(CVType &Record) override;
Error visitTypeEnd(CVType &Record) override;
Error visitMemberBegin(CVMemberRecord &Record) override;
Error visitMemberEnd(CVMemberRecord &Record) override;
#define TYPE_RECORD(EnumName, EnumVal, Name) \
Error visitKnownRecord(CVType &CVR, Name##Record &Record) override;
#define MEMBER_RECORD(EnumName, EnumVal, Name) \
Error visitKnownMember(CVMemberRecord &CVR, Name##Record &Record) override;
#define TYPE_RECORD_ALIAS(EnumName, EnumVal, Name, AliasName)
#define MEMBER_RECORD_ALIAS(EnumName, EnumVal, Name, AliasName)
#include "TypeRecords.def"
private:
void printMemberAttributes(MemberAttributes Attrs);
void printMemberAttributes(MemberAccess Access, MethodKind Kind,
MethodOptions Options);
ScopedPrinter *W;
bool IsInFieldList = false;
bool PrintRecordBytes = false;
/// Name of the current type. Only valid before visitTypeEnd.
StringRef Name;
/// All user defined type records in .debug$T live in here. Type indices
/// greater than 0x1000 are user defined. Subtract 0x1000 from the index to
/// index into this vector.
SmallVector<StringRef, 10> CVUDTNames;
StringSet<> TypeNames;
};
} // end namespace codeview
} // end namespace llvm
#endif // LLVM_DEBUGINFO_CODEVIEW_TYPEDUMPER_H

View File

@ -23,21 +23,32 @@ class raw_ostream;
class DWARFAbbreviationDeclaration {
public:
struct AttributeSpec {
AttributeSpec(dwarf::Attribute A, dwarf::Form F, Optional<uint8_t> S)
: Attr(A), Form(F), ByteSize(S) {}
AttributeSpec(dwarf::Attribute A, dwarf::Form F, Optional<int64_t> V)
: Attr(A), Form(F), ByteSizeOrValue(V) {}
dwarf::Attribute Attr;
dwarf::Form Form;
/// If ByteSize has a value, then it contains the fixed size in bytes for
/// the Form in this object. If ByteSize doesn't have a value, then the
/// byte size of Form either varies according to the DWARFUnit that it is
/// contained in or the value size varies and must be decoded from the
/// debug information in order to determine its size.
Optional<uint8_t> ByteSize;
/// The following field is used for ByteSize for non-implicit_const
/// attributes and as value for implicit_const ones, indicated by
/// Form == DW_FORM_implicit_const.
/// The following cases are distinguished:
/// * Form != DW_FORM_implicit_const and ByteSizeOrValue has a value:
/// ByteSizeOrValue contains the fixed size in bytes
/// for the Form in this object.
/// * Form != DW_FORM_implicit_const and ByteSizeOrValue is None:
/// byte size of Form either varies according to the DWARFUnit
/// that it is contained in or the value size varies and must be
/// decoded from the debug information in order to determine its size.
/// * Form == DW_FORM_implicit_const:
/// ByteSizeOrValue contains value for the implicit_const attribute.
Optional<int64_t> ByteSizeOrValue;
bool isImplicitConst() const {
return Form == dwarf::DW_FORM_implicit_const;
}
/// Get the fixed byte size of this Form if possible. This function might
/// use the DWARFUnit to calculate the size of the Form, like for
/// DW_AT_address and DW_AT_ref_addr, so this isn't just an accessor for
/// the ByteSize member.
Optional<uint8_t> getByteSize(const DWARFUnit &U) const;
Optional<int64_t> getByteSize(const DWARFUnit &U) const;
};
typedef SmallVector<AttributeSpec, 8> AttributeSpecVector;

View File

@ -140,21 +140,6 @@ class DWARFDie {
const char *getAttributeValueAsString(dwarf::Attribute Attr,
const char *FailValue) const;
/// Extract the specified attribute from this DIE as an address.
///
/// Extract an attribute value from this DIE only. This call doesn't look
/// for the attribute value in any DW_AT_specification or
/// DW_AT_abstract_origin referenced DIEs.
///
/// \param Attr the attribute to extract.
/// \param FailValue the value to return if this DIE doesn't have this
/// attribute.
/// \returns the address value of the attribute or FailValue if the
/// attribute doesn't exist or if the attribute's form isn't a form that
/// describes an address.
uint64_t getAttributeValueAsAddress(dwarf::Attribute Attr,
uint64_t FailValue) const;
/// Extract the specified attribute from this DIE as an address.
///
/// Extract an attribute value from this DIE only. This call doesn't look
@ -165,21 +150,6 @@ class DWARFDie {
/// \returns an optional value for the attribute.
Optional<uint64_t> getAttributeValueAsAddress(dwarf::Attribute Attr) const;
/// Extract the specified attribute from this DIE as a signed integer.
///
/// Extract an attribute value from this DIE only. This call doesn't look
/// for the attribute value in any DW_AT_specification or
/// DW_AT_abstract_origin referenced DIEs.
///
/// \param Attr the attribute to extract.
/// \param FailValue the value to return if this DIE doesn't have this
/// attribute.
/// \returns the signed integer constant value of the attribute or FailValue
/// if the attribute doesn't exist or if the attribute's form isn't a form
/// that describes a signed integer.
int64_t getAttributeValueAsSignedConstant(dwarf::Attribute Attr,
int64_t FailValue) const;
/// Extract the specified attribute from this DIE as a signed integer.
///
/// Extract an attribute value from this DIE only. This call doesn't look
@ -191,21 +161,6 @@ class DWARFDie {
Optional<int64_t>
getAttributeValueAsSignedConstant(dwarf::Attribute Attr) const;
/// Extract the specified attribute from this DIE as an unsigned integer.
///
/// Extract an attribute value from this DIE only. This call doesn't look
/// for the attribute value in any DW_AT_specification or
/// DW_AT_abstract_origin referenced DIEs.
///
/// \param Attr the attribute to extract.
/// \param FailValue the value to return if this DIE doesn't have this
/// attribute.
/// \returns the unsigned integer constant value of the attribute or FailValue
/// if the attribute doesn't exist or if the attribute's form isn't a form
/// that describes an unsigned integer.
uint64_t getAttributeValueAsUnsignedConstant(dwarf::Attribute Attr,
uint64_t FailValue) const;
/// Extract the specified attribute from this DIE as an unsigned integer.
///
/// Extract an attribute value from this DIE only. This call doesn't look
@ -217,21 +172,6 @@ class DWARFDie {
Optional<uint64_t>
getAttributeValueAsUnsignedConstant(dwarf::Attribute Attr) const;
/// Extract the specified attribute from this DIE as absolute DIE Offset.
///
/// Extract an attribute value from this DIE only. This call doesn't look
/// for the attribute value in any DW_AT_specification or
/// DW_AT_abstract_origin referenced DIEs.
///
/// \param Attr the attribute to extract.
/// \param FailValue the value to return if this DIE doesn't have this
/// attribute.
/// \returns the unsigned integer constant value of the attribute or FailValue
/// if the attribute doesn't exist or if the attribute's form isn't a form
/// that describes a reference.
uint64_t getAttributeValueAsReference(dwarf::Attribute Attr,
uint64_t FailValue) const;
/// Extract the specified attribute from this DIE as absolute DIE Offset.
///
/// Extract an attribute value from this DIE only. This call doesn't look
@ -242,20 +182,6 @@ class DWARFDie {
/// \returns an optional value for the attribute.
Optional<uint64_t> getAttributeValueAsReference(dwarf::Attribute Attr) const;
/// Extract the specified attribute from this DIE as absolute section offset.
///
/// Extract an attribute value from this DIE only. This call doesn't look
/// for the attribute value in any DW_AT_specification or
/// DW_AT_abstract_origin referenced DIEs.
///
/// \param Attr the attribute to extract.
/// \param FailValue the value to return if this DIE doesn't have this
/// attribute.
/// \returns the unsigned integer constant value of the attribute or FailValue
/// if the attribute doesn't exist or if the attribute's form isn't a form
/// that describes a section offset.
uint64_t getAttributeValueAsSectionOffset(dwarf::Attribute Attr,
uint64_t FailValue) const;
/// Extract the specified attribute from this DIE as absolute section offset.
///
/// Extract an attribute value from this DIE only. This call doesn't look

View File

@ -57,6 +57,9 @@ class DWARFFormValue {
DWARFFormValue(dwarf::Form F = dwarf::Form(0)) : Form(F), U(nullptr) {}
dwarf::Form getForm() const { return Form; }
void setForm(dwarf::Form F) { Form = F; }
void setUValue(uint64_t V) { Value.uval = V; }
void setSValue(int64_t V) { Value.sval = V; }
void setPValue(const char *V) { Value.cstr = V; }
bool isFormClass(FormClass FC) const;
const DWARFUnit *getUnit() const { return U; }
void dump(raw_ostream &OS) const;

View File

@ -153,30 +153,24 @@ class VarStreamArrayIterator
return ThisValue;
}
IterType &operator+=(std::ptrdiff_t N) {
while (N > 0) {
// We are done with the current record, discard it so that we are
// positioned at the next record.
IterRef = IterRef.drop_front(ThisLen);
if (IterRef.getLength() == 0) {
// There is nothing after the current record, we must make this an end
// iterator.
IterType &operator++() {
// We are done with the current record, discard it so that we are
// positioned at the next record.
IterRef = IterRef.drop_front(ThisLen);
if (IterRef.getLength() == 0) {
// There is nothing after the current record, we must make this an end
// iterator.
moveToEnd();
} else {
// There is some data after the current record.
auto EC = Extract(IterRef, ThisLen, ThisValue);
if (EC) {
consumeError(std::move(EC));
markError();
} else if (ThisLen == 0) {
// An empty record? Make this an end iterator.
moveToEnd();
return *this;
} else {
// There is some data after the current record.
auto EC = Extract(IterRef, ThisLen, ThisValue);
if (EC) {
consumeError(std::move(EC));
markError();
return *this;
} else if (ThisLen == 0) {
// An empty record? Make this an end iterator.
moveToEnd();
return *this;
}
}
--N;
}
return *this;
}

View File

@ -17,7 +17,9 @@
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/MapVector.h"
#include "llvm/ADT/None.h"
#include "llvm/ADT/SetVector.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/IR/DebugInfo.h"
@ -51,6 +53,10 @@ namespace llvm {
SmallVector<Metadata *, 4> AllSubprograms;
SmallVector<Metadata *, 4> AllGVs;
SmallVector<TrackingMDNodeRef, 4> AllImportedModules;
/// Map Macro parent (which can be DIMacroFile or nullptr) to a list of
/// Metadata all of type DIMacroNode.
/// DIMacroNode's with nullptr parent are DICompileUnit direct children.
MapVector<MDNode *, SetVector<Metadata *>> AllMacrosPerParent;
/// Track nodes that may be unresolved.
SmallVector<TrackingMDNodeRef, 4> UnresolvedNodes;
@ -116,6 +122,24 @@ namespace llvm {
DIFile::ChecksumKind CSKind = DIFile::CSK_None,
StringRef Checksum = StringRef());
/// Create debugging information entry for a macro.
/// \param Parent Macro parent (could be nullptr).
/// \param Line Source line number where the macro is defined.
/// \param MacroType DW_MACINFO_define or DW_MACINFO_undef.
/// \param Name Macro name.
/// \param Value Macro value.
DIMacro *createMacro(DIMacroFile *Parent, unsigned Line, unsigned MacroType,
StringRef Name, StringRef Value = StringRef());
/// Create debugging information temporary entry for a macro file.
/// List of macro node direct children will be calculated by DIBuilder,
/// using the \p Parent relationship.
/// \param Parent Macro file parent (could be nullptr).
/// \param Line Source line number where the macro file is included.
/// \param File File descriptor containing the name of the macro file.
DIMacroFile *createTempMacroFile(DIMacroFile *Parent, unsigned Line,
DIFile *File);
/// Create a single enumerator value.
DIEnumerator *createEnumerator(StringRef Name, int64_t Val);
@ -447,6 +471,9 @@ namespace llvm {
/// Get a DINodeArray, create one if required.
DINodeArray getOrCreateArray(ArrayRef<Metadata *> Elements);
/// Get a DIMacroNodeArray, create one if required.
DIMacroNodeArray getOrCreateMacroArray(ArrayRef<Metadata *> Elements);
/// Get a DITypeRefArray, create one if required.
DITypeRefArray getOrCreateTypeArray(ArrayRef<Metadata *> Elements);

View File

@ -1295,16 +1295,12 @@ class DILocation : public MDNode {
/// Check \c this can be discriminated from \c RHS in a linetable entry.
/// Scope and inlined-at chains are not recorded in the linetable, so they
/// cannot be used to distinguish basic blocks.
///
/// The current implementation is weaker than it should be, since it just
/// checks filename and line.
///
/// FIXME: Add a check for getDiscriminator().
/// FIXME: Add a check for getColumn().
/// FIXME: Change the getFilename() check to getFile() (or add one for
/// getDirectory()).
bool canDiscriminate(const DILocation &RHS) const {
return getFilename() != RHS.getFilename() || getLine() != RHS.getLine();
return getLine() != RHS.getLine() ||
getColumn() != RHS.getColumn() ||
getDiscriminator() != RHS.getDiscriminator() ||
getFilename() != RHS.getFilename() ||
getDirectory() != RHS.getDirectory();
}
/// Get the DWARF discriminator.
@ -1327,10 +1323,13 @@ class DILocation : public MDNode {
/// represented in a single line entry. In this case, no location
/// should be set.
///
/// Currently this function is simply a stub, and no location will be
/// used for all cases.
static DILocation *getMergedLocation(const DILocation *LocA,
const DILocation *LocB) {
/// Currently the function does not create a new location. If the locations
/// are the same, or cannot be discriminated, the first location is returned.
/// Otherwise an empty location will be used.
static const DILocation *getMergedLocation(const DILocation *LocA,
const DILocation *LocB) {
if (LocA && LocB && (LocA == LocB || !LocA->canDiscriminate(*LocB)))
return LocA;
return nullptr;
}

View File

@ -37,11 +37,11 @@ class GlobalObject : public GlobalValue {
setGlobalValueSubClassData(0);
}
std::string Section; // Section to emit this into, empty means default
Comdat *ObjComdat;
enum {
LastAlignmentBit = 4,
HasMetadataHashEntryBit,
HasSectionHashEntryBit,
GlobalObjectBits,
};
@ -66,8 +66,26 @@ class GlobalObject : public GlobalValue {
unsigned getGlobalObjectSubClassData() const;
void setGlobalObjectSubClassData(unsigned Val);
bool hasSection() const { return !getSection().empty(); }
StringRef getSection() const { return Section; }
/// Check if this global has a custom object file section.
///
/// This is more efficient than calling getSection() and checking for an empty
/// string.
bool hasSection() const {
return getGlobalValueSubClassData() & (1 << HasSectionHashEntryBit);
}
/// Get the custom section of this global if it has one.
///
/// If this global does not have a custom section, this will be empty and the
/// default object file section (.text, .data, etc) will be used.
StringRef getSection() const {
return hasSection() ? getSectionImpl() : StringRef();
}
/// Change the section for this global.
///
/// Setting the section to the empty string tells LLVM to choose an
/// appropriate default object file section.
void setSection(StringRef S);
bool hasComdat() const { return getComdat() != nullptr; }
@ -134,14 +152,20 @@ class GlobalObject : public GlobalValue {
void clearMetadata();
private:
void setGlobalObjectFlag(unsigned Bit, bool Val) {
unsigned Mask = 1 << Bit;
setGlobalValueSubClassData((~Mask & getGlobalValueSubClassData()) |
(Val ? Mask : 0u));
}
bool hasMetadataHashEntry() const {
return getGlobalValueSubClassData() & (1 << HasMetadataHashEntryBit);
}
void setHasMetadataHashEntry(bool HasEntry) {
unsigned Mask = 1 << HasMetadataHashEntryBit;
setGlobalValueSubClassData((~Mask & getGlobalValueSubClassData()) |
(HasEntry ? Mask : 0u));
setGlobalObjectFlag(HasMetadataHashEntryBit, HasEntry);
}
StringRef getSectionImpl() const;
};
} // end namespace llvm

View File

@ -578,8 +578,8 @@ def int_invariant_end : Intrinsic<[],
llvm_anyptr_ty],
[IntrArgMemOnly, NoCapture<2>]>;
def int_invariant_group_barrier : Intrinsic<[llvm_ptr_ty],
[llvm_ptr_ty],
def int_invariant_group_barrier : Intrinsic<[llvm_ptr_ty],
[llvm_ptr_ty],
[IntrNoMem]>;
//===------------------------ Stackmap Intrinsics -------------------------===//
@ -683,29 +683,6 @@ def int_convert_to_fp16 : Intrinsic<[llvm_i16_ty], [llvm_anyfloat_ty]>;
def int_convert_from_fp16 : Intrinsic<[llvm_anyfloat_ty], [llvm_i16_ty]>;
}
// These convert intrinsics are to support various conversions between
// various types with rounding and saturation. NOTE: avoid using these
// intrinsics as they might be removed sometime in the future and
// most targets don't support them.
def int_convertff : Intrinsic<[llvm_anyfloat_ty],
[llvm_anyfloat_ty, llvm_i32_ty, llvm_i32_ty]>;
def int_convertfsi : Intrinsic<[llvm_anyfloat_ty],
[llvm_anyint_ty, llvm_i32_ty, llvm_i32_ty]>;
def int_convertfui : Intrinsic<[llvm_anyfloat_ty],
[llvm_anyint_ty, llvm_i32_ty, llvm_i32_ty]>;
def int_convertsif : Intrinsic<[llvm_anyint_ty],
[llvm_anyfloat_ty, llvm_i32_ty, llvm_i32_ty]>;
def int_convertuif : Intrinsic<[llvm_anyint_ty],
[llvm_anyfloat_ty, llvm_i32_ty, llvm_i32_ty]>;
def int_convertss : Intrinsic<[llvm_anyint_ty],
[llvm_anyint_ty, llvm_i32_ty, llvm_i32_ty]>;
def int_convertsu : Intrinsic<[llvm_anyint_ty],
[llvm_anyint_ty, llvm_i32_ty, llvm_i32_ty]>;
def int_convertus : Intrinsic<[llvm_anyint_ty],
[llvm_anyint_ty, llvm_i32_ty, llvm_i32_ty]>;
def int_convertuu : Intrinsic<[llvm_anyint_ty],
[llvm_anyint_ty, llvm_i32_ty, llvm_i32_ty]>;
// Clear cache intrinsic, default to ignore (ie. emit nothing)
// maps to void __clear_cache() on supporting platforms
def int_clear_cache : Intrinsic<[], [llvm_ptr_ty, llvm_ptr_ty],

View File

@ -37,12 +37,6 @@ def int_aarch64_udiv : Intrinsic<[llvm_anyint_ty], [LLVMMatchType<0>,
def int_aarch64_hint : Intrinsic<[], [llvm_i32_ty]>;
//===----------------------------------------------------------------------===//
// RBIT
def int_aarch64_rbit : Intrinsic<[llvm_anyint_ty], [LLVMMatchType<0>],
[IntrNoMem]>;
//===----------------------------------------------------------------------===//
// Data Barrier Instructions

View File

@ -155,11 +155,6 @@ def int_arm_crc32cw : Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty],
def int_arm_hint : Intrinsic<[], [llvm_i32_ty]>;
def int_arm_dbg : Intrinsic<[], [llvm_i32_ty]>;
//===----------------------------------------------------------------------===//
// RBIT
def int_arm_rbit : Intrinsic<[llvm_i32_ty], [llvm_i32_ty], [IntrNoMem]>;
//===----------------------------------------------------------------------===//
// UND (reserved undefined sequence)

View File

@ -317,10 +317,10 @@ struct TypeTestResolution {
/// All-Ones Bit Vectors")
} TheKind = Unsat;
/// Range of the size expressed as a bit width. For example, if the size is in
/// range [0,256), this number will be 8. This helps generate the most compact
/// Range of size-1 expressed as a bit width. For example, if the size is in
/// range [1,256], this number will be 8. This helps generate the most compact
/// instruction sequences.
unsigned SizeBitWidth = 0;
unsigned SizeM1BitWidth = 0;
};
struct TypeIdSummary {

View File

@ -29,7 +29,7 @@ template <> struct ScalarEnumerationTraits<TypeTestResolution::Kind> {
template <> struct MappingTraits<TypeTestResolution> {
static void mapping(IO &io, TypeTestResolution &res) {
io.mapOptional("Kind", res.TheKind);
io.mapOptional("SizeBitWidth", res.SizeBitWidth);
io.mapOptional("SizeM1BitWidth", res.SizeM1BitWidth);
}
};

View File

@ -0,0 +1,64 @@
//===-- Decompressor.h ------------------------------------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===/
#ifndef LLVM_OBJECT_DECOMPRESSOR_H
#define LLVM_OBJECT_DECOMPRESSOR_H
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/Object/ObjectFile.h"
namespace llvm {
namespace object {
/// @brief Decompressor helps to handle decompression of compressed sections.
class Decompressor {
public:
/// @brief Create decompressor object.
/// @param Name Section name.
/// @param Data Section content.
/// @param IsLE Flag determines if Data is in little endian form.
/// @param Is64Bit Flag determines if object is 64 bit.
static Expected<Decompressor> create(StringRef Name, StringRef Data,
bool IsLE, bool Is64Bit);
/// @brief Resize the buffer and uncompress section data into it.
/// @param Out Destination buffer.
Error decompress(SmallString<32> &Out);
/// @brief Uncompress section data to raw buffer provided.
/// @param Buffer Destination buffer.
Error decompress(MutableArrayRef<char> Buffer);
/// @brief Return memory buffer size required for decompression.
uint64_t getDecompressedSize() { return DecompressedSize; }
/// @brief Return true if section is compressed, including gnu-styled case.
static bool isCompressed(const object::SectionRef &Section);
/// @brief Return true if section is a ELF compressed one.
static bool isCompressedELFSection(uint64_t Flags, StringRef Name);
/// @brief Return true if section name matches gnu style compressed one.
static bool isGnuStyle(StringRef Name);
private:
Decompressor(StringRef Data);
Error consumeCompressedGnuHeader();
Error consumeCompressedZLibHeader(bool Is64Bit, bool IsLittleEndian);
StringRef SectionData;
uint64_t DecompressedSize;
};
} // end namespace object
} // end namespace llvm
#endif // LLVM_OBJECT_DECOMPRESSOR_H

View File

@ -85,6 +85,41 @@ struct Unit {
std::vector<Entry> Entries;
};
struct File {
StringRef Name;
uint64_t DirIdx;
uint64_t ModTime;
uint64_t Length;
};
struct LineTableOpcode {
dwarf::LineNumberOps Opcode;
uint64_t ExtLen;
dwarf::LineNumberExtendedOps SubOpcode;
uint64_t Data;
int64_t SData;
File FileEntry;
std::vector<llvm::yaml::Hex8> UnknownOpcodeData;
std::vector<llvm::yaml::Hex64> StandardOpcodeData;
};
struct LineTable {
uint32_t TotalLength;
uint64_t TotalLength64;
uint16_t Version;
uint64_t PrologueLength;
uint8_t MinInstLength;
uint8_t MaxOpsPerInst;
uint8_t DefaultIsStmt;
uint8_t LineBase;
uint8_t LineRange;
uint8_t OpcodeBase;
std::vector<uint8_t> StandardOpcodeLengths;
std::vector<StringRef> IncludeDirs;
std::vector<File> Files;
std::vector<LineTableOpcode> Opcodes;
};
struct Data {
bool IsLittleEndian;
std::vector<Abbrev> AbbrevDecls;
@ -98,6 +133,8 @@ struct Data {
std::vector<Unit> CompileUnits;
std::vector<LineTable> DebugLines;
bool isEmpty() const;
};
@ -105,6 +142,7 @@ struct Data {
} // namespace llvm
LLVM_YAML_IS_SEQUENCE_VECTOR(uint8_t)
LLVM_YAML_IS_SEQUENCE_VECTOR(llvm::yaml::Hex64)
LLVM_YAML_IS_SEQUENCE_VECTOR(llvm::StringRef)
LLVM_YAML_IS_SEQUENCE_VECTOR(llvm::yaml::Hex8)
LLVM_YAML_IS_SEQUENCE_VECTOR(llvm::DWARFYAML::AttributeAbbrev)
@ -115,6 +153,9 @@ LLVM_YAML_IS_SEQUENCE_VECTOR(llvm::DWARFYAML::PubEntry)
LLVM_YAML_IS_SEQUENCE_VECTOR(llvm::DWARFYAML::Unit)
LLVM_YAML_IS_SEQUENCE_VECTOR(llvm::DWARFYAML::FormValue)
LLVM_YAML_IS_SEQUENCE_VECTOR(llvm::DWARFYAML::Entry)
LLVM_YAML_IS_SEQUENCE_VECTOR(llvm::DWARFYAML::File)
LLVM_YAML_IS_SEQUENCE_VECTOR(llvm::DWARFYAML::LineTable)
LLVM_YAML_IS_SEQUENCE_VECTOR(llvm::DWARFYAML::LineTableOpcode)
namespace llvm {
namespace yaml {
@ -159,6 +200,18 @@ template <> struct MappingTraits<DWARFYAML::FormValue> {
static void mapping(IO &IO, DWARFYAML::FormValue &FormValue);
};
template <> struct MappingTraits<DWARFYAML::File> {
static void mapping(IO &IO, DWARFYAML::File &File);
};
template <> struct MappingTraits<DWARFYAML::LineTableOpcode> {
static void mapping(IO &IO, DWARFYAML::LineTableOpcode &LineTableOpcode);
};
template <> struct MappingTraits<DWARFYAML::LineTable> {
static void mapping(IO &IO, DWARFYAML::LineTable &LineTable);
};
#define HANDLE_DW_TAG(unused, name) \
io.enumCase(value, "DW_TAG_" #name, dwarf::DW_TAG_##name);
@ -169,6 +222,26 @@ template <> struct ScalarEnumerationTraits<dwarf::Tag> {
}
};
#define HANDLE_DW_LNS(unused, name) \
io.enumCase(value, "DW_LNS_" #name, dwarf::DW_LNS_##name);
template <> struct ScalarEnumerationTraits<dwarf::LineNumberOps> {
static void enumeration(IO &io, dwarf::LineNumberOps &value) {
#include "llvm/Support/Dwarf.def"
io.enumFallback<Hex8>(value);
}
};
#define HANDLE_DW_LNE(unused, name) \
io.enumCase(value, "DW_LNE_" #name, dwarf::DW_LNE_##name);
template <> struct ScalarEnumerationTraits<dwarf::LineNumberExtendedOps> {
static void enumeration(IO &io, dwarf::LineNumberExtendedOps &value) {
#include "llvm/Support/Dwarf.def"
io.enumFallback<Hex16>(value);
}
};
#define HANDLE_DW_AT(unused, name) \
io.enumCase(value, "DW_AT_" #name, dwarf::DW_AT_##name);

View File

@ -139,7 +139,6 @@ struct UniversalBinary {
LLVM_YAML_IS_SEQUENCE_VECTOR(llvm::MachOYAML::LoadCommand)
LLVM_YAML_IS_SEQUENCE_VECTOR(llvm::MachOYAML::Section)
LLVM_YAML_IS_SEQUENCE_VECTOR(llvm::yaml::Hex64)
LLVM_YAML_IS_SEQUENCE_VECTOR(int64_t)
LLVM_YAML_IS_SEQUENCE_VECTOR(llvm::MachOYAML::RebaseOpcode)
LLVM_YAML_IS_SEQUENCE_VECTOR(llvm::MachOYAML::BindOpcode)

View File

@ -18,8 +18,8 @@
#include "llvm/ADT/Optional.h"
#include "llvm/Analysis/CGSCCPassManager.h"
#include "llvm/Analysis/LoopPassManager.h"
#include "llvm/IR/PassManager.h"
#include "llvm/Transforms/Scalar/LoopPassManager.h"
#include <vector>
namespace llvm {

View File

@ -230,6 +230,15 @@ class InstrProfSymtab;
/// bytes. This method decodes the string and populates the \c Symtab.
Error readPGOFuncNameStrings(StringRef NameStrings, InstrProfSymtab &Symtab);
/// Check if INSTR_PROF_RAW_VERSION_VAR is defined. This global is only being
/// set in IR PGO compilation.
bool isIRPGOFlagSet(const Module *M);
/// Check if we can safely rename this Comdat function. Instances of the same
/// comdat function may have different control flows thus can not share the
/// same counter variable.
bool canRenameComdatFunc(const Function &F, bool CheckAddressTaken = false);
enum InstrProfValueKind : uint32_t {
#define VALUE_PROF_KIND(Enumerator, Value) Enumerator = Value,
#include "llvm/ProfileData/InstrProfData.inc"

View File

@ -201,7 +201,7 @@ class SubCommand {
void reset();
operator bool() const;
explicit operator bool() const;
StringRef getName() const { return Name; }
StringRef getDescription() const { return Description; }

View File

@ -207,7 +207,7 @@ enum DiscriminantList {
};
/// Line Number Standard Opcode Encodings.
enum LineNumberOps {
enum LineNumberOps : uint8_t {
#define HANDLE_DW_LNS(ID, NAME) DW_LNS_##NAME = ID,
#include "llvm/Support/Dwarf.def"
};

View File

@ -78,11 +78,12 @@ class FileOutputBuffer {
FileOutputBuffer &operator=(const FileOutputBuffer &) = delete;
FileOutputBuffer(std::unique_ptr<llvm::sys::fs::mapped_file_region> R,
StringRef Path, StringRef TempPath);
StringRef Path, StringRef TempPath, bool IsRegular);
std::unique_ptr<llvm::sys::fs::mapped_file_region> Region;
SmallString<128> FinalPath;
SmallString<128> TempPath;
bool IsRegular;
};
} // end namespace llvm

View File

@ -571,9 +571,15 @@ template <class NodeT> class DominatorTreeBase : public DominatorBase<NodeT> {
// API to update (Post)DominatorTree information based on modifications to
// the CFG...
/// addNewBlock - Add a new node to the dominator tree information. This
/// creates a new node as a child of DomBB dominator node,linking it into
/// the children list of the immediate dominator.
/// Add a new node to the dominator tree information.
///
/// This creates a new node as a child of DomBB dominator node, linking it
/// into the children list of the immediate dominator.
///
/// \param BB New node in CFG.
/// \param DomBB CFG node that is dominator for BB.
/// \returns New dominator tree node that represents new CFG node.
///
DomTreeNodeBase<NodeT> *addNewBlock(NodeT *BB, NodeT *DomBB) {
assert(getNode(BB) == nullptr && "Block already in dominator tree!");
DomTreeNodeBase<NodeT> *IDomNode = getNode(DomBB);
@ -583,6 +589,31 @@ template <class NodeT> class DominatorTreeBase : public DominatorBase<NodeT> {
llvm::make_unique<DomTreeNodeBase<NodeT>>(BB, IDomNode))).get();
}
/// Add a new node to the forward dominator tree and make it a new root.
///
/// \param BB New node in CFG.
/// \returns New dominator tree node that represents new CFG node.
///
DomTreeNodeBase<NodeT> *setNewRoot(NodeT *BB) {
assert(getNode(BB) == nullptr && "Block already in dominator tree!");
assert(!this->isPostDominator() &&
"Cannot change root of post-dominator tree");
DFSInfoValid = false;
auto &Roots = DominatorBase<NodeT>::Roots;
DomTreeNodeBase<NodeT> *NewNode = (DomTreeNodes[BB] =
llvm::make_unique<DomTreeNodeBase<NodeT>>(BB, nullptr)).get();
if (Roots.empty()) {
addRoot(BB);
} else {
assert(Roots.size() == 1);
NodeT *OldRoot = Roots.front();
DomTreeNodes[OldRoot] =
NewNode->addChild(std::move(DomTreeNodes[OldRoot]));
Roots[0] = BB;
}
return RootNode = NewNode;
}
/// changeImmediateDominator - This method is used to update the dominator
/// tree information when a node's immediate dominator changes.
///

View File

@ -23,66 +23,80 @@
#ifndef LLVM_TARGET_TARGETLOWERING_H
#define LLVM_TARGET_TARGETLOWERING_H
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/CodeGen/DAGCombine.h"
#include "llvm/CodeGen/ISDOpcodes.h"
#include "llvm/CodeGen/MachineValueType.h"
#include "llvm/CodeGen/RuntimeLibcalls.h"
#include "llvm/CodeGen/SelectionDAGNodes.h"
#include "llvm/CodeGen/ValueTypes.h"
#include "llvm/IR/Attributes.h"
#include "llvm/IR/CallSite.h"
#include "llvm/IR/CallingConv.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/DerivedTypes.h"
#include "llvm/IR/IRBuilder.h"
#include "llvm/IR/InlineAsm.h"
#include "llvm/IR/Instruction.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/Type.h"
#include "llvm/MC/MCRegisterInfo.h"
#include "llvm/Support/AtomicOrdering.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Target/TargetCallingConv.h"
#include "llvm/Target/TargetMachine.h"
#include <algorithm>
#include <cassert>
#include <climits>
#include <cstdint>
#include <iterator>
#include <map>
#include <string>
#include <utility>
#include <vector>
namespace llvm {
class BranchProbability;
class CallInst;
class CCState;
class CCValAssign;
class FastISel;
class FunctionLoweringInfo;
class ImmutableCallSite;
class IntrinsicInst;
class MachineBasicBlock;
class MachineFunction;
class MachineInstr;
class MachineJumpTableInfo;
class MachineLoop;
class MachineRegisterInfo;
class Mangler;
class MCContext;
class MCExpr;
class MCSymbol;
template<typename T> class SmallVectorImpl;
class DataLayout;
class TargetRegisterClass;
class TargetLibraryInfo;
class TargetLoweringObjectFile;
class Value;
namespace Sched {
enum Preference {
None, // No preference
Source, // Follow source order.
RegPressure, // Scheduling for lowest register pressure.
Hybrid, // Scheduling for both latency and register pressure.
ILP, // Scheduling for ILP in low register pressure mode.
VLIW // Scheduling for VLIW targets.
};
}
class BranchProbability;
class CCState;
class CCValAssign;
class FastISel;
class FunctionLoweringInfo;
class IntrinsicInst;
class MachineBasicBlock;
class MachineFunction;
class MachineInstr;
class MachineJumpTableInfo;
class MachineLoop;
class MachineRegisterInfo;
class MCContext;
class MCExpr;
class TargetRegisterClass;
class TargetLibraryInfo;
class TargetRegisterInfo;
class Value;
namespace Sched {
enum Preference {
None, // No preference
Source, // Follow source order.
RegPressure, // Scheduling for lowest register pressure.
Hybrid, // Scheduling for both latency and register pressure.
ILP, // Scheduling for ILP in low register pressure mode.
VLIW // Scheduling for VLIW targets.
};
} // end namespace Sched
/// This base class for TargetLowering contains the SelectionDAG-independent
/// parts that can be used from the rest of CodeGen.
class TargetLoweringBase {
TargetLoweringBase(const TargetLoweringBase&) = delete;
void operator=(const TargetLoweringBase&) = delete;
public:
/// This enum indicates whether operations are valid for a target, and if not,
/// what action should be used to make them valid.
@ -166,7 +180,9 @@ class TargetLoweringBase {
/// NOTE: The TargetMachine owns TLOF.
explicit TargetLoweringBase(const TargetMachine &TM);
virtual ~TargetLoweringBase() {}
TargetLoweringBase(const TargetLoweringBase&) = delete;
void operator=(const TargetLoweringBase&) = delete;
virtual ~TargetLoweringBase() = default;
protected:
/// \brief Initialize all of the actions to default values.
@ -599,19 +615,18 @@ class TargetLoweringBase {
MVT &RegisterVT) const;
struct IntrinsicInfo {
unsigned opc; // target opcode
EVT memVT; // memory VT
const Value* ptrVal; // value representing memory location
int offset; // offset off of ptrVal
unsigned size; // the size of the memory location
// (taken from memVT if zero)
unsigned align; // alignment
bool vol; // is volatile?
bool readMem; // reads memory?
bool writeMem; // writes memory?
unsigned opc = 0; // target opcode
EVT memVT; // memory VT
const Value* ptrVal = nullptr; // value representing memory location
int offset = 0; // offset off of ptrVal
unsigned size = 0; // the size of the memory location
// (taken from memVT if zero)
unsigned align = 1; // alignment
bool vol = false; // is volatile?
bool readMem = false; // reads memory?
bool writeMem = false; // writes memory?
IntrinsicInfo() : opc(0), ptrVal(nullptr), offset(0), size(0), align(1),
vol(false), readMem(false), writeMem(false) {}
IntrinsicInfo() = default;
};
/// Given an intrinsic, checks if on the target the intrinsic will need to map
@ -823,7 +838,6 @@ class TargetLoweringBase {
getCondCodeAction(CC, VT) == Custom;
}
/// If the action for this operation is to promote, this method returns the
/// ValueType to promote to.
MVT getTypeToPromoteTo(unsigned Op, MVT VT) const {
@ -1643,11 +1657,11 @@ class TargetLoweringBase {
/// If Scale is zero, there is no ScaleReg. Scale of 1 indicates a reg with
/// no scale.
struct AddrMode {
GlobalValue *BaseGV;
int64_t BaseOffs;
bool HasBaseReg;
int64_t Scale;
AddrMode() : BaseGV(nullptr), BaseOffs(0), HasBaseReg(false), Scale(0) {}
GlobalValue *BaseGV = nullptr;
int64_t BaseOffs = 0;
bool HasBaseReg = false;
int64_t Scale = 0;
AddrMode() = default;
};
/// Return true if the addressing mode represented by AM is legal for this
@ -2093,8 +2107,6 @@ class TargetLoweringBase {
private:
LegalizeKind getTypeConversion(LLVMContext &Context, EVT VT) const;
private:
/// Targets can specify ISD nodes that they would like PerformDAGCombine
/// callbacks for by calling setTargetDAGCombine(), which sets a bit in this
/// array.
@ -2192,7 +2204,6 @@ class TargetLoweringBase {
/// \see enableExtLdPromotion.
bool EnableExtLdPromotion;
protected:
/// Return true if the value types that can be represented by the specified
/// register class are all legal.
bool isLegalRC(const TargetRegisterClass *RC) const;
@ -2209,12 +2220,12 @@ class TargetLoweringBase {
/// This class also defines callbacks that targets must implement to lower
/// target-specific constructs to SelectionDAG operators.
class TargetLowering : public TargetLoweringBase {
TargetLowering(const TargetLowering&) = delete;
void operator=(const TargetLowering&) = delete;
public:
struct DAGCombinerInfo;
TargetLowering(const TargetLowering&) = delete;
void operator=(const TargetLowering&) = delete;
/// NOTE: The TargetMachine owns TLOF.
explicit TargetLowering(const TargetMachine &TM);
@ -2376,6 +2387,7 @@ class TargetLowering : public TargetLoweringBase {
void *DC; // The DAG Combiner object.
CombineLevel Level;
bool CalledByLegalizer;
public:
SelectionDAG &DAG;
@ -2542,7 +2554,7 @@ class TargetLowering : public TargetLoweringBase {
ArgListEntry() : isSExt(false), isZExt(false), isInReg(false),
isSRet(false), isNest(false), isByVal(false), isInAlloca(false),
isReturned(false), isSwiftSelf(false), isSwiftError(false),
Alignment(0) { }
Alignment(0) {}
void setAttributes(ImmutableCallSite *CS, unsigned AttrIdx);
};
@ -2681,7 +2693,6 @@ class TargetLowering : public TargetLoweringBase {
ArgListTy &getArgs() {
return Args;
}
};
/// This function lowers an abstract call to a function into an actual call.
@ -3118,6 +3129,13 @@ class TargetLowering : public TargetLoweringBase {
EVT DataVT, SelectionDAG &DAG,
bool IsCompressedMemory) const;
/// Get a pointer to vector element \p Idx located in memory for a vector of
/// type \p VecVT starting at a base address of \p VecPtr. If \p Idx is out of
/// bounds the returned pointer is unspecified, but will be within the vector
/// bounds.
SDValue getVectorElementPointer(SelectionDAG &DAG, SDValue VecPtr, EVT VecVT,
SDValue Idx) const;
//===--------------------------------------------------------------------===//
// Instruction Emitting Hooks
//
@ -3169,6 +3187,6 @@ void GetReturnInfo(Type *ReturnType, AttributeSet attr,
SmallVectorImpl<ISD::OutputArg> &Outs,
const TargetLowering &TLI, const DataLayout &DL);
} // end llvm namespace
} // end namespace llvm
#endif
#endif // LLVM_TARGET_TARGETLOWERING_H

View File

@ -20,36 +20,26 @@
#include "llvm/Pass.h"
#include "llvm/Support/CodeGen.h"
#include "llvm/Target/TargetOptions.h"
#include <cassert>
#include <string>
namespace llvm {
class InstrItineraryData;
class GlobalValue;
class Mangler;
class MachineFunctionInitializer;
class MachineModuleInfo;
class Mangler;
class MCAsmInfo;
class MCContext;
class MCInstrInfo;
class MCRegisterInfo;
class MCSubtargetInfo;
class MCSymbol;
class Target;
class TargetLibraryInfo;
class TargetFrameLowering;
class TargetIRAnalysis;
class TargetIntrinsicInfo;
class TargetLowering;
class TargetPassConfig;
class TargetRegisterInfo;
class TargetSubtargetInfo;
class TargetTransformInfo;
class formatted_raw_ostream;
class raw_ostream;
class raw_pwrite_stream;
class Target;
class TargetIntrinsicInfo;
class TargetIRAnalysis;
class TargetLoweringObjectFile;
class TargetPassConfig;
class TargetSubtargetInfo;
// The old pass manager infrastructure is hidden in a legacy namespace now.
namespace legacy {
@ -64,8 +54,6 @@ using legacy::PassManagerBase;
/// interface.
///
class TargetMachine {
TargetMachine(const TargetMachine &) = delete;
void operator=(const TargetMachine &) = delete;
protected: // Can only create subclasses.
TargetMachine(const Target &T, StringRef DataLayoutString,
const Triple &TargetTriple, StringRef CPU, StringRef FS,
@ -103,8 +91,11 @@ class TargetMachine {
unsigned O0WantsFastISel : 1;
public:
const TargetOptions DefaultOptions;
mutable TargetOptions Options;
TargetMachine(const TargetMachine &) = delete;
void operator=(const TargetMachine &) = delete;
virtual ~TargetMachine();
const Target &getTarget() const { return TheTarget; }
@ -310,6 +301,6 @@ class LLVMTargetMachine : public TargetMachine {
bool DisableVerify = true) override;
};
} // End llvm namespace
} // end namespace llvm
#endif
#endif // LLVM_TARGET_TARGETMACHINE_H

View File

@ -575,9 +575,6 @@ def intrinsic_w_chain : SDNode<"ISD::INTRINSIC_W_CHAIN",
def intrinsic_wo_chain : SDNode<"ISD::INTRINSIC_WO_CHAIN",
SDTypeProfile<1, -1, [SDTCisPtrTy<1>]>, []>;
// Do not use cvt directly. Use cvt forms below
def cvt : SDNode<"ISD::CONVERT_RNDSAT", SDTConvertOp>;
def SDT_assertext : SDTypeProfile<1, 1,
[SDTCisInt<0>, SDTCisInt<1>, SDTCisSameAs<1, 0>]>;
def assertsext : SDNode<"ISD::AssertSext", SDT_assertext>;
@ -1084,54 +1081,6 @@ def atomic_load_64 :
return cast<AtomicSDNode>(N)->getMemoryVT() == MVT::i64;
}]>;
//===----------------------------------------------------------------------===//
// Selection DAG CONVERT_RNDSAT patterns
def cvtff : PatFrag<(ops node:$val, node:$dty, node:$sty, node:$rd, node:$sat),
(cvt node:$val, node:$dty, node:$sty, node:$rd, node:$sat), [{
return cast<CvtRndSatSDNode>(N)->getCvtCode() == ISD::CVT_FF;
}]>;
def cvtss : PatFrag<(ops node:$val, node:$dty, node:$sty, node:$rd, node:$sat),
(cvt node:$val, node:$dty, node:$sty, node:$rd, node:$sat), [{
return cast<CvtRndSatSDNode>(N)->getCvtCode() == ISD::CVT_SS;
}]>;
def cvtsu : PatFrag<(ops node:$val, node:$dty, node:$sty, node:$rd, node:$sat),
(cvt node:$val, node:$dty, node:$sty, node:$rd, node:$sat), [{
return cast<CvtRndSatSDNode>(N)->getCvtCode() == ISD::CVT_SU;
}]>;
def cvtus : PatFrag<(ops node:$val, node:$dty, node:$sty, node:$rd, node:$sat),
(cvt node:$val, node:$dty, node:$sty, node:$rd, node:$sat), [{
return cast<CvtRndSatSDNode>(N)->getCvtCode() == ISD::CVT_US;
}]>;
def cvtuu : PatFrag<(ops node:$val, node:$dty, node:$sty, node:$rd, node:$sat),
(cvt node:$val, node:$dty, node:$sty, node:$rd, node:$sat), [{
return cast<CvtRndSatSDNode>(N)->getCvtCode() == ISD::CVT_UU;
}]>;
def cvtsf : PatFrag<(ops node:$val, node:$dty, node:$sty, node:$rd, node:$sat),
(cvt node:$val, node:$dty, node:$sty, node:$rd, node:$sat), [{
return cast<CvtRndSatSDNode>(N)->getCvtCode() == ISD::CVT_SF;
}]>;
def cvtuf : PatFrag<(ops node:$val, node:$dty, node:$sty, node:$rd, node:$sat),
(cvt node:$val, node:$dty, node:$sty, node:$rd, node:$sat), [{
return cast<CvtRndSatSDNode>(N)->getCvtCode() == ISD::CVT_UF;
}]>;
def cvtfs : PatFrag<(ops node:$val, node:$dty, node:$sty, node:$rd, node:$sat),
(cvt node:$val, node:$dty, node:$sty, node:$rd, node:$sat), [{
return cast<CvtRndSatSDNode>(N)->getCvtCode() == ISD::CVT_FS;
}]>;
def cvtfu : PatFrag<(ops node:$val, node:$dty, node:$sty, node:$rd, node:$sat),
(cvt node:$val, node:$dty, node:$sty, node:$rd, node:$sat), [{
return cast<CvtRndSatSDNode>(N)->getCvtCode() == ISD::CVT_FU;
}]>;
//===----------------------------------------------------------------------===//
// Selection DAG Pattern Support.
//

View File

@ -14,23 +14,26 @@
#ifndef LLVM_TARGET_TARGETSUBTARGETINFO_H
#define LLVM_TARGET_TARGETSUBTARGETINFO_H
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/CodeGen/PBQPRAConstraint.h"
#include "llvm/CodeGen/SchedulerRegistry.h"
#include "llvm/CodeGen/ScheduleDAGMutation.h"
#include "llvm/MC/MCSubtargetInfo.h"
#include "llvm/Support/CodeGen.h"
#include <memory>
#include <vector>
namespace llvm {
class CallLowering;
class DataLayout;
class InstructionSelector;
class LegalizerInfo;
class MachineFunction;
class MachineInstr;
class RegisterBankInfo;
class SDep;
class SelectionDAGTargetInfo;
class SUnit;
class TargetFrameLowering;
class TargetInstrInfo;
@ -38,9 +41,7 @@ class TargetLowering;
class TargetRegisterClass;
class TargetRegisterInfo;
class TargetSchedModel;
class SelectionDAGTargetInfo;
struct MachineSchedPolicy;
template <typename T> class SmallVectorImpl;
//===----------------------------------------------------------------------===//
///
@ -49,10 +50,6 @@ template <typename T> class SmallVectorImpl;
/// be exposed through a TargetSubtargetInfo-derived class.
///
class TargetSubtargetInfo : public MCSubtargetInfo {
TargetSubtargetInfo(const TargetSubtargetInfo &) = delete;
void operator=(const TargetSubtargetInfo &) = delete;
TargetSubtargetInfo() = delete;
protected: // Can only create subclasses...
TargetSubtargetInfo(const Triple &TT, StringRef CPU, StringRef FS,
ArrayRef<SubtargetFeatureKV> PF,
@ -69,6 +66,9 @@ class TargetSubtargetInfo : public MCSubtargetInfo {
typedef enum { ANTIDEP_NONE, ANTIDEP_CRITICAL, ANTIDEP_ALL } AntiDepBreakMode;
typedef SmallVectorImpl<const TargetRegisterClass *> RegClassVector;
TargetSubtargetInfo() = delete;
TargetSubtargetInfo(const TargetSubtargetInfo &) = delete;
void operator=(const TargetSubtargetInfo &) = delete;
virtual ~TargetSubtargetInfo();
virtual bool isXRaySupported() const { return false; }
@ -229,6 +229,6 @@ class TargetSubtargetInfo : public MCSubtargetInfo {
virtual bool enableSubRegLiveness() const { return false; }
};
} // End llvm namespace
} // end namespace llvm
#endif
#endif // LLVM_TARGET_TARGETSUBTARGETINFO_H

View File

@ -0,0 +1,30 @@
//===- IVUsersPrinter.h - Induction Variable Users Printing -----*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_TRANSFORMS_SCALAR_IVUSERSPRINTER_H
#define LLVM_TRANSFORMS_SCALAR_IVUSERSPRINTER_H
#include "llvm/Analysis/IVUsers.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Transforms/Scalar/LoopPassManager.h"
namespace llvm {
/// Printer pass for the \c IVUsers for a loop.
class IVUsersPrinterPass : public PassInfoMixin<IVUsersPrinterPass> {
raw_ostream &OS;
public:
explicit IVUsersPrinterPass(raw_ostream &OS) : OS(OS) {}
PreservedAnalyses run(Loop &L, LoopAnalysisManager &AM,
LoopStandardAnalysisResults &AR, LPMUpdater &U);
};
}
#endif

View File

@ -16,14 +16,15 @@
#define LLVM_TRANSFORMS_SCALAR_INDVARSIMPLIFY_H
#include "llvm/Analysis/LoopInfo.h"
#include "llvm/Analysis/LoopPassManager.h"
#include "llvm/IR/PassManager.h"
#include "llvm/Transforms/Scalar/LoopPassManager.h"
namespace llvm {
class IndVarSimplifyPass : public PassInfoMixin<IndVarSimplifyPass> {
public:
PreservedAnalyses run(Loop &L, LoopAnalysisManager &AM);
PreservedAnalyses run(Loop &L, LoopAnalysisManager &AM,
LoopStandardAnalysisResults &AR, LPMUpdater &U);
};
}

View File

@ -34,15 +34,16 @@
#define LLVM_TRANSFORMS_SCALAR_LICM_H
#include "llvm/Analysis/LoopInfo.h"
#include "llvm/Analysis/LoopPassManager.h"
#include "llvm/IR/PassManager.h"
#include "llvm/Transforms/Scalar/LoopPassManager.h"
namespace llvm {
/// Performs Loop Invariant Code Motion Pass.
class LICMPass : public PassInfoMixin<LICMPass> {
public:
PreservedAnalyses run(Loop &L, LoopAnalysisManager &AM);
PreservedAnalyses run(Loop &L, LoopAnalysisManager &AM,
LoopStandardAnalysisResults &AR, LPMUpdater &U);
};
} // end namespace llvm

View File

@ -0,0 +1,31 @@
//===- llvm/Analysis/LoopAccessAnalysisPrinter.h ----------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_TRANSFORMS_SCALAR_LOOPACCESSANALYSISPRINTER_H
#define LLVM_TRANSFORMS_SCALAR_LOOPACCESSANALYSISPRINTER_H
#include "llvm/Support/raw_ostream.h"
#include "llvm/Transforms/Scalar/LoopPassManager.h"
namespace llvm {
/// \brief Printer pass for the \c LoopAccessInfo results.
class LoopAccessInfoPrinterPass
: public PassInfoMixin<LoopAccessInfoPrinterPass> {
raw_ostream &OS;
public:
explicit LoopAccessInfoPrinterPass(raw_ostream &OS) : OS(OS) {}
PreservedAnalyses run(Loop &L, LoopAnalysisManager &AM,
LoopStandardAnalysisResults &AR, LPMUpdater &U);
};
} // End llvm namespace
#endif

View File

@ -15,16 +15,17 @@
#define LLVM_TRANSFORMS_SCALAR_LOOPDELETION_H
#include "llvm/Analysis/LoopInfo.h"
#include "llvm/Analysis/LoopPassManager.h"
#include "llvm/Analysis/ScalarEvolution.h"
#include "llvm/IR/PassManager.h"
#include "llvm/Transforms/Scalar/LoopPassManager.h"
namespace llvm {
class LoopDeletionPass : public PassInfoMixin<LoopDeletionPass> {
public:
LoopDeletionPass() {}
PreservedAnalyses run(Loop &L, LoopAnalysisManager &AM);
PreservedAnalyses run(Loop &L, LoopAnalysisManager &AM,
LoopStandardAnalysisResults &AR, LPMUpdater &U);
bool runImpl(Loop *L, DominatorTree &DT, ScalarEvolution &SE,
LoopInfo &loopInfo);

View File

@ -17,15 +17,16 @@
#define LLVM_TRANSFORMS_SCALAR_LOOPIDIOMRECOGNIZE_H
#include "llvm/Analysis/LoopInfo.h"
#include "llvm/Analysis/LoopPassManager.h"
#include "llvm/IR/PassManager.h"
#include "llvm/Transforms/Scalar/LoopPassManager.h"
namespace llvm {
/// Performs Loop Idiom Recognize Pass.
class LoopIdiomRecognizePass : public PassInfoMixin<LoopIdiomRecognizePass> {
public:
PreservedAnalyses run(Loop &L, LoopAnalysisManager &AM);
PreservedAnalyses run(Loop &L, LoopAnalysisManager &AM,
LoopStandardAnalysisResults &AR, LPMUpdater &U);
};
} // end namespace llvm

View File

@ -15,15 +15,16 @@
#define LLVM_TRANSFORMS_SCALAR_LOOPINSTSIMPLIFY_H
#include "llvm/Analysis/LoopInfo.h"
#include "llvm/Analysis/LoopPassManager.h"
#include "llvm/IR/PassManager.h"
#include "llvm/Transforms/Scalar/LoopPassManager.h"
namespace llvm {
/// Performs Loop Inst Simplify Pass.
class LoopInstSimplifyPass : public PassInfoMixin<LoopInstSimplifyPass> {
public:
PreservedAnalyses run(Loop &L, LoopAnalysisManager &AM);
PreservedAnalyses run(Loop &L, LoopAnalysisManager &AM,
LoopStandardAnalysisResults &AR, LPMUpdater &U);
};
} // end namespace llvm

View File

@ -0,0 +1,363 @@
//===- LoopPassManager.h - Loop pass management -----------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
/// \file
///
/// This header provides classes for managing a pipeline of passes over loops
/// in LLVM IR.
///
/// The primary loop pass pipeline is managed in a very particular way to
/// provide a set of core guarantees:
/// 1) Loops are, where possible, in simplified form.
/// 2) Loops are *always* in LCSSA form.
/// 3) A collection of Loop-specific analysis results are available:
/// - LoopInfo
/// - DominatorTree
/// - ScalarEvolution
/// - AAManager
/// 4) All loop passes preserve #1 (where possible), #2, and #3.
/// 5) Loop passes run over each loop in the loop nest from the innermost to
/// the outermost. Specifically, all inner loops are processed before
/// passes run over outer loops. When running the pipeline across an inner
/// loop creates new inner loops, those are added and processed in this
/// order as well.
///
/// This process is designed to facilitate transformations which simplify,
/// reduce, and remove loops. For passes which are more oriented towards
/// optimizing loops, especially optimizing loop *nests* instead of single
/// loops in isolation, this framework is less interesting.
///
//===----------------------------------------------------------------------===//
#ifndef LLVM_TRANSFORMS_SCALAR_LOOPPASSMANAGER_H
#define LLVM_TRANSFORMS_SCALAR_LOOPPASSMANAGER_H
#include "llvm/ADT/PostOrderIterator.h"
#include "llvm/ADT/PriorityWorklist.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/Analysis/AliasAnalysis.h"
#include "llvm/Analysis/BasicAliasAnalysis.h"
#include "llvm/Analysis/GlobalsModRef.h"
#include "llvm/Analysis/LoopAnalysisManager.h"
#include "llvm/Analysis/LoopInfo.h"
#include "llvm/Analysis/ScalarEvolution.h"
#include "llvm/Analysis/ScalarEvolutionAliasAnalysis.h"
#include "llvm/Analysis/TargetLibraryInfo.h"
#include "llvm/Analysis/TargetTransformInfo.h"
#include "llvm/IR/Dominators.h"
#include "llvm/IR/PassManager.h"
namespace llvm {
// Forward declarations of an update tracking API used in the pass manager.
class LPMUpdater;
// Explicit specialization and instantiation declarations for the pass manager.
// See the comments on the definition of the specialization for details on how
// it differs from the primary template.
template <>
PreservedAnalyses
PassManager<Loop, LoopAnalysisManager, LoopStandardAnalysisResults &,
LPMUpdater &>::run(Loop &InitialL, LoopAnalysisManager &AM,
LoopStandardAnalysisResults &AnalysisResults,
LPMUpdater &U);
extern template class PassManager<Loop, LoopAnalysisManager,
LoopStandardAnalysisResults &, LPMUpdater &>;
/// \brief The Loop pass manager.
///
/// See the documentation for the PassManager template for details. It runs
/// a sequence of Loop passes over each Loop that the manager is run over. This
/// typedef serves as a convenient way to refer to this construct.
typedef PassManager<Loop, LoopAnalysisManager, LoopStandardAnalysisResults &,
LPMUpdater &>
LoopPassManager;
/// A partial specialization of the require analysis template pass to forward
/// the extra parameters from a transformation's run method to the
/// AnalysisManager's getResult.
template <typename AnalysisT>
struct RequireAnalysisPass<AnalysisT, Loop, LoopAnalysisManager,
LoopStandardAnalysisResults &, LPMUpdater &>
: PassInfoMixin<
RequireAnalysisPass<AnalysisT, Loop, LoopAnalysisManager,
LoopStandardAnalysisResults &, LPMUpdater &>> {
PreservedAnalyses run(Loop &L, LoopAnalysisManager &AM,
LoopStandardAnalysisResults &AR, LPMUpdater &) {
(void)AM.template getResult<AnalysisT>(L, AR);
return PreservedAnalyses::all();
}
};
/// An alias template to easily name a require analysis loop pass.
template <typename AnalysisT>
using RequireAnalysisLoopPass =
RequireAnalysisPass<AnalysisT, Loop, LoopAnalysisManager,
LoopStandardAnalysisResults &, LPMUpdater &>;
namespace internal {
/// Helper to implement appending of loops onto a worklist.
///
/// We want to process loops in postorder, but the worklist is a LIFO data
/// structure, so we append to it in *reverse* postorder.
///
/// For trees, a preorder traversal is a viable reverse postorder, so we
/// actually append using a preorder walk algorithm.
template <typename RangeT>
inline void appendLoopsToWorklist(RangeT &&Loops,
SmallPriorityWorklist<Loop *, 4> &Worklist) {
// We use an internal worklist to build up the preorder traversal without
// recursion.
SmallVector<Loop *, 4> PreOrderLoops, PreOrderWorklist;
// We walk the initial sequence of loops in reverse because we generally want
// to visit defs before uses and the worklist is LIFO.
for (Loop *RootL : reverse(Loops)) {
assert(PreOrderLoops.empty() && "Must start with an empty preorder walk.");
assert(PreOrderWorklist.empty() &&
"Must start with an empty preorder walk worklist.");
PreOrderWorklist.push_back(RootL);
do {
Loop *L = PreOrderWorklist.pop_back_val();
PreOrderWorklist.append(L->begin(), L->end());
PreOrderLoops.push_back(L);
} while (!PreOrderWorklist.empty());
Worklist.insert(std::move(PreOrderLoops));
PreOrderLoops.clear();
}
}
}
template <typename LoopPassT> class FunctionToLoopPassAdaptor;
/// This class provides an interface for updating the loop pass manager based
/// on mutations to the loop nest.
///
/// A reference to an instance of this class is passed as an argument to each
/// Loop pass, and Loop passes should use it to update LPM infrastructure if
/// they modify the loop nest structure.
class LPMUpdater {
public:
/// This can be queried by loop passes which run other loop passes (like pass
/// managers) to know whether the loop needs to be skipped due to updates to
/// the loop nest.
///
/// If this returns true, the loop object may have been deleted, so passes
/// should take care not to touch the object.
bool skipCurrentLoop() const { return SkipCurrentLoop; }
/// Loop passes should use this method to indicate they have deleted a loop
/// from the nest.
///
/// Note that this loop must either be the current loop or a subloop of the
/// current loop. This routine must be called prior to removing the loop from
/// the loop nest.
///
/// If this is called for the current loop, in addition to clearing any
/// state, this routine will mark that the current loop should be skipped by
/// the rest of the pass management infrastructure.
void markLoopAsDeleted(Loop &L) {
LAM.clear(L);
assert(CurrentL->contains(&L) && "Cannot delete a loop outside of the "
"subloop tree currently being processed.");
if (&L == CurrentL)
SkipCurrentLoop = true;
}
/// Loop passes should use this method to indicate they have added new child
/// loops of the current loop.
///
/// \p NewChildLoops must contain only the immediate children. Any nested
/// loops within them will be visited in postorder as usual for the loop pass
/// manager.
void addChildLoops(ArrayRef<Loop *> NewChildLoops) {
// Insert ourselves back into the worklist first, as this loop should be
// revisited after all the children have been processed.
Worklist.insert(CurrentL);
#ifndef NDEBUG
for (Loop *NewL : NewChildLoops)
assert(NewL->getParentLoop() == CurrentL && "All of the new loops must "
"be immediate children of "
"the current loop!");
#endif
internal::appendLoopsToWorklist(NewChildLoops, Worklist);
// Also skip further processing of the current loop--it will be revisited
// after all of its newly added children are accounted for.
SkipCurrentLoop = true;
}
/// Loop passes should use this method to indicate they have added new
/// sibling loops to the current loop.
///
/// \p NewSibLoops must only contain the immediate sibling loops. Any nested
/// loops within them will be visited in postorder as usual for the loop pass
/// manager.
void addSiblingLoops(ArrayRef<Loop *> NewSibLoops) {
#ifndef NDEBUG
for (Loop *NewL : NewSibLoops)
assert(NewL->getParentLoop() == ParentL &&
"All of the new loops must be siblings of the current loop!");
#endif
internal::appendLoopsToWorklist(NewSibLoops, Worklist);
// No need to skip the current loop or revisit it, as sibling loops
// shouldn't impact anything.
}
private:
template <typename LoopPassT> friend class llvm::FunctionToLoopPassAdaptor;
/// The \c FunctionToLoopPassAdaptor's worklist of loops to process.
SmallPriorityWorklist<Loop *, 4> &Worklist;
/// The analysis manager for use in the current loop nest.
LoopAnalysisManager &LAM;
Loop *CurrentL;
bool SkipCurrentLoop;
#ifndef NDEBUG
// In debug builds we also track the parent loop to implement asserts even in
// the face of loop deletion.
Loop *ParentL;
#endif
LPMUpdater(SmallPriorityWorklist<Loop *, 4> &Worklist,
LoopAnalysisManager &LAM)
: Worklist(Worklist), LAM(LAM) {}
};
/// \brief Adaptor that maps from a function to its loops.
///
/// Designed to allow composition of a LoopPass(Manager) and a
/// FunctionPassManager. Note that if this pass is constructed with a \c
/// FunctionAnalysisManager it will run the \c LoopAnalysisManagerFunctionProxy
/// analysis prior to running the loop passes over the function to enable a \c
/// LoopAnalysisManager to be used within this run safely.
template <typename LoopPassT>
class FunctionToLoopPassAdaptor
: public PassInfoMixin<FunctionToLoopPassAdaptor<LoopPassT>> {
public:
explicit FunctionToLoopPassAdaptor(LoopPassT Pass) : Pass(std::move(Pass)) {}
/// \brief Runs the loop passes across every loop in the function.
PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM) {
// Setup the loop analysis manager from its proxy.
LoopAnalysisManager &LAM =
AM.getResult<LoopAnalysisManagerFunctionProxy>(F).getManager();
// Get the loop structure for this function
LoopInfo &LI = AM.getResult<LoopAnalysis>(F);
// If there are no loops, there is nothing to do here.
if (LI.empty())
return PreservedAnalyses::all();
// Get the analysis results needed by loop passes.
LoopStandardAnalysisResults LAR = {AM.getResult<AAManager>(F),
AM.getResult<AssumptionAnalysis>(F),
AM.getResult<DominatorTreeAnalysis>(F),
AM.getResult<LoopAnalysis>(F),
AM.getResult<ScalarEvolutionAnalysis>(F),
AM.getResult<TargetLibraryAnalysis>(F),
AM.getResult<TargetIRAnalysis>(F)};
PreservedAnalyses PA = PreservedAnalyses::all();
// A postorder worklist of loops to process.
SmallPriorityWorklist<Loop *, 4> Worklist;
// Register the worklist and loop analysis manager so that loop passes can
// update them when they mutate the loop nest structure.
LPMUpdater Updater(Worklist, LAM);
// Add the loop nests in the reverse order of LoopInfo. For some reason,
// they are stored in RPO w.r.t. the control flow graph in LoopInfo. For
// the purpose of unrolling, loop deletion, and LICM, we largely want to
// work forward across the CFG so that we visit defs before uses and can
// propagate simplifications from one loop nest into the next.
// FIXME: Consider changing the order in LoopInfo.
internal::appendLoopsToWorklist(reverse(LI), Worklist);
do {
Loop *L = Worklist.pop_back_val();
// Reset the update structure for this loop.
Updater.CurrentL = L;
Updater.SkipCurrentLoop = false;
#ifndef NDEBUG
Updater.ParentL = L->getParentLoop();
#endif
PreservedAnalyses PassPA = Pass.run(*L, LAM, LAR, Updater);
// FIXME: We should verify the set of analyses relevant to Loop passes
// are preserved.
// If the loop hasn't been deleted, we need to handle invalidation here.
if (!Updater.skipCurrentLoop())
// We know that the loop pass couldn't have invalidated any other
// loop's analyses (that's the contract of a loop pass), so directly
// handle the loop analysis manager's invalidation here.
LAM.invalidate(*L, PassPA);
// Then intersect the preserved set so that invalidation of module
// analyses will eventually occur when the module pass completes.
PA.intersect(std::move(PassPA));
} while (!Worklist.empty());
// By definition we preserve the proxy. We also preserve all analyses on
// Loops. This precludes *any* invalidation of loop analyses by the proxy,
// but that's OK because we've taken care to invalidate analyses in the
// loop analysis manager incrementally above.
PA.preserveSet<AllAnalysesOn<Loop>>();
PA.preserve<LoopAnalysisManagerFunctionProxy>();
// We also preserve the set of standard analyses.
PA.preserve<AssumptionAnalysis>();
PA.preserve<DominatorTreeAnalysis>();
PA.preserve<LoopAnalysis>();
PA.preserve<ScalarEvolutionAnalysis>();
// FIXME: What we really want to do here is preserve an AA category, but
// that concept doesn't exist yet.
PA.preserve<AAManager>();
PA.preserve<BasicAA>();
PA.preserve<GlobalsAA>();
PA.preserve<SCEVAA>();
return PA;
}
private:
LoopPassT Pass;
};
/// \brief A function to deduce a loop pass type and wrap it in the templated
/// adaptor.
template <typename LoopPassT>
FunctionToLoopPassAdaptor<LoopPassT>
createFunctionToLoopPassAdaptor(LoopPassT Pass) {
return FunctionToLoopPassAdaptor<LoopPassT>(std::move(Pass));
}
/// \brief Pass for printing a loop's contents as textual IR.
class PrintLoopPass : public PassInfoMixin<PrintLoopPass> {
raw_ostream &OS;
std::string Banner;
public:
PrintLoopPass();
PrintLoopPass(raw_ostream &OS, const std::string &Banner = "");
PreservedAnalyses run(Loop &L, LoopAnalysisManager &,
LoopStandardAnalysisResults &, LPMUpdater &);
};
}
#endif // LLVM_TRANSFORMS_SCALAR_LOOPPASSMANAGER_H

View File

@ -15,8 +15,8 @@
#define LLVM_TRANSFORMS_SCALAR_LOOPROTATION_H
#include "llvm/Analysis/LoopInfo.h"
#include "llvm/Analysis/LoopPassManager.h"
#include "llvm/IR/PassManager.h"
#include "llvm/Transforms/Scalar/LoopPassManager.h"
namespace llvm {
@ -24,7 +24,8 @@ namespace llvm {
class LoopRotatePass : public PassInfoMixin<LoopRotatePass> {
public:
LoopRotatePass(bool EnableHeaderDuplication = true);
PreservedAnalyses run(Loop &L, LoopAnalysisManager &AM);
PreservedAnalyses run(Loop &L, LoopAnalysisManager &AM,
LoopStandardAnalysisResults &AR, LPMUpdater &U);
private:
const bool EnableHeaderDuplication;

View File

@ -18,15 +18,16 @@
#define LLVM_TRANSFORMS_SCALAR_LOOPSIMPLIFYCFG_H
#include "llvm/Analysis/LoopInfo.h"
#include "llvm/Analysis/LoopPassManager.h"
#include "llvm/IR/PassManager.h"
#include "llvm/Transforms/Scalar/LoopPassManager.h"
namespace llvm {
/// Performs basic CFG simplifications to assist other loop passes.
class LoopSimplifyCFGPass : public PassInfoMixin<LoopSimplifyCFGPass> {
public:
PreservedAnalyses run(Loop &L, LoopAnalysisManager &AM);
PreservedAnalyses run(Loop &L, LoopAnalysisManager &AM,
LoopStandardAnalysisResults &AR, LPMUpdater &U);
};
} // end namespace llvm

View File

@ -23,15 +23,16 @@
#define LLVM_TRANSFORMS_SCALAR_LOOPSTRENGTHREDUCE_H
#include "llvm/Analysis/LoopInfo.h"
#include "llvm/Analysis/LoopPassManager.h"
#include "llvm/IR/PassManager.h"
#include "llvm/Transforms/Scalar/LoopPassManager.h"
namespace llvm {
/// Performs Loop Strength Reduce Pass.
class LoopStrengthReducePass : public PassInfoMixin<LoopStrengthReducePass> {
public:
PreservedAnalyses run(Loop &L, LoopAnalysisManager &AM);
PreservedAnalyses run(Loop &L, LoopAnalysisManager &AM,
LoopStandardAnalysisResults &AR, LPMUpdater &U);
};
} // end namespace llvm

View File

@ -11,8 +11,8 @@
#define LLVM_TRANSFORMS_SCALAR_LOOPUNROLLPASS_H
#include "llvm/Analysis/LoopInfo.h"
#include "llvm/Analysis/LoopPassManager.h"
#include "llvm/IR/PassManager.h"
#include "llvm/Transforms/Scalar/LoopPassManager.h"
namespace llvm {
@ -23,7 +23,8 @@ struct LoopUnrollPass : public PassInfoMixin<LoopUnrollPass> {
Optional<bool> ProvidedRuntime;
Optional<bool> ProvidedUpperBound;
PreservedAnalyses run(Loop &L, LoopAnalysisManager &AM);
PreservedAnalyses run(Loop &L, LoopAnalysisManager &AM,
LoopStandardAnalysisResults &AR, LPMUpdater &U);
};
} // end namespace llvm

View File

@ -29,6 +29,7 @@ class DataLayout;
class DominatorTree;
class Loop;
class LoopInfo;
class OptimizationRemarkEmitter;
class Pass;
class PredicatedScalarEvolution;
class PredIteratorCache;
@ -404,11 +405,11 @@ bool formLCSSARecursively(Loop &L, DominatorTree &DT, LoopInfo *LI,
/// uses before definitions, allowing us to sink a loop body in one pass without
/// iteration. Takes DomTreeNode, AliasAnalysis, LoopInfo, DominatorTree,
/// DataLayout, TargetLibraryInfo, Loop, AliasSet information for all
/// instructions of the loop and loop safety information as arguments.
/// It returns changed status.
/// instructions of the loop and loop safety information as
/// arguments. Diagnostics is emitted via \p ORE. It returns changed status.
bool sinkRegion(DomTreeNode *, AliasAnalysis *, LoopInfo *, DominatorTree *,
TargetLibraryInfo *, Loop *, AliasSetTracker *,
LoopSafetyInfo *);
LoopSafetyInfo *, OptimizationRemarkEmitter *ORE);
/// \brief Walk the specified region of the CFG (defined by all blocks
/// dominated by the specified block, and that are in the current loop) in depth
@ -416,10 +417,11 @@ bool sinkRegion(DomTreeNode *, AliasAnalysis *, LoopInfo *, DominatorTree *,
/// before uses, allowing us to hoist a loop body in one pass without iteration.
/// Takes DomTreeNode, AliasAnalysis, LoopInfo, DominatorTree, DataLayout,
/// TargetLibraryInfo, Loop, AliasSet information for all instructions of the
/// loop and loop safety information as arguments. It returns changed status.
/// loop and loop safety information as arguments. Diagnostics is emitted via \p
/// ORE. It returns changed status.
bool hoistRegion(DomTreeNode *, AliasAnalysis *, LoopInfo *, DominatorTree *,
TargetLibraryInfo *, Loop *, AliasSetTracker *,
LoopSafetyInfo *);
LoopSafetyInfo *, OptimizationRemarkEmitter *ORE);
/// \brief Try to promote memory values to scalars by sinking stores out of
/// the loop and moving loads to before the loop. We do this by looping over
@ -427,12 +429,14 @@ bool hoistRegion(DomTreeNode *, AliasAnalysis *, LoopInfo *, DominatorTree *,
/// loop invariant. It takes AliasSet, Loop exit blocks vector, loop exit blocks
/// insertion point vector, PredIteratorCache, LoopInfo, DominatorTree, Loop,
/// AliasSet information for all instructions of the loop and loop safety
/// information as arguments. It returns changed status.
/// information as arguments. Diagnostics is emitted via \p ORE. It returns
/// changed status.
bool promoteLoopAccessesToScalars(AliasSet &, SmallVectorImpl<BasicBlock *> &,
SmallVectorImpl<Instruction *> &,
PredIteratorCache &, LoopInfo *,
DominatorTree *, const TargetLibraryInfo *,
Loop *, AliasSetTracker *, LoopSafetyInfo *);
Loop *, AliasSetTracker *, LoopSafetyInfo *,
OptimizationRemarkEmitter *);
/// \brief Computes safety information for a loop
/// checks loop body & header for the possibility of may throw
@ -478,11 +482,12 @@ void getLoopAnalysisUsage(AnalysisUsage &AU);
/// preheader to loop body (no speculation).
/// If SafetyInfo is not null, we are checking for hoisting/sinking
/// instructions from loop body to preheader/exit. Check if the instruction
/// can execute specultatively.
///
/// can execute speculatively.
/// If \p ORE is set use it to emit optimization remarks.
bool canSinkOrHoistInst(Instruction &I, AAResults *AA, DominatorTree *DT,
Loop *CurLoop, AliasSetTracker *CurAST,
LoopSafetyInfo *SafetyInfo);
LoopSafetyInfo *SafetyInfo,
OptimizationRemarkEmitter *ORE = nullptr);
}
#endif

View File

@ -33,6 +33,12 @@ class Pass;
class OptimizationRemarkEmitter;
class ScalarEvolution;
typedef SmallDenseMap<const Loop *, Loop *, 4> NewLoopsMap;
const Loop* addClonedBlockToLoopInfo(BasicBlock *OriginalBB,
BasicBlock *ClonedBB, LoopInfo *LI,
NewLoopsMap &NewLoops);
bool UnrollLoop(Loop *L, unsigned Count, unsigned TripCount, bool Force,
bool AllowRuntime, bool AllowExpensiveTripCount,
bool PreserveCondBr, bool PreserveOnlyFirst,

View File

@ -57,12 +57,12 @@
#include "llvm/Analysis/DemandedBits.h"
#include "llvm/Analysis/LoopAccessAnalysis.h"
#include "llvm/Analysis/LoopInfo.h"
#include "llvm/Analysis/LoopPassManager.h"
#include "llvm/Analysis/OptimizationDiagnosticInfo.h"
#include "llvm/Analysis/ScalarEvolution.h"
#include "llvm/Analysis/TargetTransformInfo.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/PassManager.h"
#include "llvm/Transforms/Scalar/LoopPassManager.h"
#include <functional>
namespace llvm {

View File

@ -0,0 +1,71 @@
//===- Trace.h - XRay Trace Abstraction -----------------------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// Defines the XRay Trace class representing records in an XRay trace file.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_XRAY_TRACE_H
#define LLVM_XRAY_TRACE_H
#include <cstdint>
#include <vector>
#include "llvm/ADT/StringRef.h"
#include "llvm/Support/Error.h"
#include "llvm/Support/FileSystem.h"
#include "llvm/XRay/XRayRecord.h"
namespace llvm {
namespace xray {
/// A Trace object represents the records that have been loaded from XRay
/// log files generated by instrumented binaries. We encapsulate the logic of
/// reading the traces in factory functions that populate the Trace object
/// appropriately.
///
/// Trace objects provide an accessor to an XRayFileHeader which says more about
/// details of the file from which the XRay trace was loaded from.
///
/// Usage:
///
/// if (auto TraceOrErr = loadTraceFile("xray-log.something.xray")) {
/// auto& T = *TraceOrErr;
/// // T.getFileHeader() will provide information from the trace header.
/// for (const XRayRecord &R : T) {
/// // ... do something with R here.
/// }
/// } else {
/// // Handle the error here.
/// }
///
class Trace {
XRayFileHeader FileHeader;
std::vector<XRayRecord> Records;
typedef std::vector<XRayRecord>::const_iterator citerator;
friend Expected<Trace> loadTraceFile(StringRef, bool);
public:
/// Provides access to the loaded XRay trace file header.
const XRayFileHeader &getFileHeader() const { return FileHeader; }
citerator begin() const { return Records.begin(); }
citerator end() const { return Records.end(); }
size_t size() const { return Records.size(); }
};
/// This function will attempt to load XRay trace records from the provided
/// |Filename|.
Expected<Trace> loadTraceFile(StringRef Filename, bool Sort = false);
} // namespace xray
} // namespace llvm
#endif // LLVM_XRAY_TRACE_H

View File

@ -0,0 +1,76 @@
//===- XRayRecord.h - XRay Trace Record -----------------------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file replicates the record definition for XRay log entries. This should
// follow the evolution of the log record versions supported in the compiler-rt
// xray project.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_XRAY_XRAY_RECORD_H
#define LLVM_XRAY_XRAY_RECORD_H
#include <cstdint>
namespace llvm {
namespace xray {
/// XRay traces all have a header providing some top-matter information useful
/// to help tools determine how to interpret the information available in the
/// trace.
struct XRayFileHeader {
/// Version of the XRay implementation that produced this file.
uint16_t Version = 0;
/// A numeric identifier for the type of file this is. Best used in
/// combination with Version.
uint16_t Type = 0;
/// Whether the CPU that produced the timestamp counters (TSC) move at a
/// constant rate.
bool ConstantTSC;
/// Whether the CPU that produced the timestamp counters (TSC) do not stop.
bool NonstopTSC;
/// The number of cycles per second for the CPU that produced the timestamp
/// counter (TSC) values. Useful for estimating the amount of time that
/// elapsed between two TSCs on some platforms.
uint64_t CycleFrequency = 0;
};
/// Determines the supported types of records that could be seen in XRay traces.
/// This may or may not correspond to actual record types in the raw trace (as
/// the loader implementation may synthesize this information in the process of
/// of loading).
enum class RecordTypes { ENTER, EXIT };
struct XRayRecord {
/// The type of record.
uint16_t RecordType;
/// The CPU where the thread is running. We assume number of CPUs <= 256.
uint8_t CPU;
/// Identifies the type of record.
RecordTypes Type;
/// The function ID for the record.
int32_t FuncId;
/// Get the full 8 bytes of the TSC when we get the log record.
uint64_t TSC;
/// The thread ID for the currently running thread.
uint32_t TId;
};
} // namespace xray
} // namespace llvm
#endif // LLVM_XRAY_XRAY_RECORD_H

View File

@ -0,0 +1,99 @@
//===- YAMLXRayRecord.h - XRay Record YAML Support Definitions ------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// Types and traits specialisations for YAML I/O of XRay log entries.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_XRAY_YAML_XRAY_RECORD_H
#define LLVM_XRAY_YAML_XRAY_RECORD_H
#include <type_traits>
#include "llvm/Support/YAMLTraits.h"
#include "llvm/XRay/XRayRecord.h"
namespace llvm {
namespace xray {
struct YAMLXRayFileHeader {
uint16_t Version;
uint16_t Type;
bool ConstantTSC;
bool NonstopTSC;
uint64_t CycleFrequency;
};
struct YAMLXRayRecord {
uint16_t RecordType;
uint8_t CPU;
RecordTypes Type;
int32_t FuncId;
std::string Function;
uint64_t TSC;
uint32_t TId;
};
struct YAMLXRayTrace {
YAMLXRayFileHeader Header;
std::vector<YAMLXRayRecord> Records;
};
} // namespace xray
namespace yaml {
// YAML Traits
// -----------
template <> struct ScalarEnumerationTraits<xray::RecordTypes> {
static void enumeration(IO &IO, xray::RecordTypes &Type) {
IO.enumCase(Type, "function-enter", xray::RecordTypes::ENTER);
IO.enumCase(Type, "function-exit", xray::RecordTypes::EXIT);
}
};
template <> struct MappingTraits<xray::YAMLXRayFileHeader> {
static void mapping(IO &IO, xray::YAMLXRayFileHeader &Header) {
IO.mapRequired("version", Header.Version);
IO.mapRequired("type", Header.Type);
IO.mapRequired("constant-tsc", Header.ConstantTSC);
IO.mapRequired("nonstop-tsc", Header.NonstopTSC);
IO.mapRequired("cycle-frequency", Header.CycleFrequency);
}
};
template <> struct MappingTraits<xray::YAMLXRayRecord> {
static void mapping(IO &IO, xray::YAMLXRayRecord &Record) {
// FIXME: Make this type actually be descriptive
IO.mapRequired("type", Record.RecordType);
IO.mapRequired("func-id", Record.FuncId);
IO.mapOptional("function", Record.Function);
IO.mapRequired("cpu", Record.CPU);
IO.mapRequired("thread", Record.TId);
IO.mapRequired("kind", Record.Type);
IO.mapRequired("tsc", Record.TSC);
}
static constexpr bool flow = true;
};
template <> struct MappingTraits<xray::YAMLXRayTrace> {
static void mapping(IO &IO, xray::YAMLXRayTrace &Trace) {
// A trace file contains two parts, the header and the list of all the
// trace records.
IO.mapRequired("header", Trace.Header);
IO.mapRequired("records", Trace.Records);
}
};
} // namespace yaml
} // namespace llvm
LLVM_YAML_IS_SEQUENCE_VECTOR(xray::YAMLXRayRecord)
#endif // LLVM_XRAY_YAML_XRAY_RECORD_H

View File

@ -24,6 +24,109 @@
using namespace llvm;
using namespace llvm::PatternMatch;
SmallVector<WeakVH, 1> &AssumptionCache::getAffectedValues(Value *V) {
// Try using find_as first to avoid creating extra value handles just for the
// purpose of doing the lookup.
auto AVI = AffectedValues.find_as(V);
if (AVI != AffectedValues.end())
return AVI->second;
auto AVIP = AffectedValues.insert({
AffectedValueCallbackVH(V, this), SmallVector<WeakVH, 1>()});
return AVIP.first->second;
}
void AssumptionCache::updateAffectedValues(CallInst *CI) {
// Note: This code must be kept in-sync with the code in
// computeKnownBitsFromAssume in ValueTracking.
SmallVector<Value *, 16> Affected;
auto AddAffected = [&Affected](Value *V) {
if (isa<Argument>(V)) {
Affected.push_back(V);
} else if (auto *I = dyn_cast<Instruction>(V)) {
Affected.push_back(I);
if (I->getOpcode() == Instruction::BitCast ||
I->getOpcode() == Instruction::PtrToInt) {
auto *Op = I->getOperand(0);
if (isa<Instruction>(Op) || isa<Argument>(Op))
Affected.push_back(Op);
}
}
};
Value *Cond = CI->getArgOperand(0), *A, *B;
AddAffected(Cond);
CmpInst::Predicate Pred;
if (match(Cond, m_ICmp(Pred, m_Value(A), m_Value(B)))) {
AddAffected(A);
AddAffected(B);
if (Pred == ICmpInst::ICMP_EQ) {
// For equality comparisons, we handle the case of bit inversion.
auto AddAffectedFromEq = [&AddAffected](Value *V) {
Value *A;
if (match(V, m_Not(m_Value(A)))) {
AddAffected(A);
V = A;
}
Value *B;
ConstantInt *C;
// (A & B) or (A | B) or (A ^ B).
if (match(V,
m_CombineOr(m_And(m_Value(A), m_Value(B)),
m_CombineOr(m_Or(m_Value(A), m_Value(B)),
m_Xor(m_Value(A), m_Value(B)))))) {
AddAffected(A);
AddAffected(B);
// (A << C) or (A >>_s C) or (A >>_u C) where C is some constant.
} else if (match(V,
m_CombineOr(m_Shl(m_Value(A), m_ConstantInt(C)),
m_CombineOr(m_LShr(m_Value(A), m_ConstantInt(C)),
m_AShr(m_Value(A),
m_ConstantInt(C)))))) {
AddAffected(A);
}
};
AddAffectedFromEq(A);
AddAffectedFromEq(B);
}
}
for (auto &AV : Affected) {
auto &AVV = getAffectedValues(AV);
if (std::find(AVV.begin(), AVV.end(), CI) == AVV.end())
AVV.push_back(CI);
}
}
void AssumptionCache::AffectedValueCallbackVH::deleted() {
auto AVI = AC->AffectedValues.find(getValPtr());
if (AVI != AC->AffectedValues.end())
AC->AffectedValues.erase(AVI);
// 'this' now dangles!
}
void AssumptionCache::AffectedValueCallbackVH::allUsesReplacedWith(Value *NV) {
if (!isa<Instruction>(NV) && !isa<Argument>(NV))
return;
// Any assumptions that affected this value now affect the new value.
auto &NAVV = AC->getAffectedValues(NV);
auto AVI = AC->AffectedValues.find(getValPtr());
if (AVI == AC->AffectedValues.end())
return;
for (auto &A : AVI->second)
if (std::find(NAVV.begin(), NAVV.end(), A) == NAVV.end())
NAVV.push_back(A);
}
void AssumptionCache::scanFunction() {
assert(!Scanned && "Tried to scan the function twice!");
assert(AssumeHandles.empty() && "Already have assumes when scanning!");
@ -37,6 +140,10 @@ void AssumptionCache::scanFunction() {
// Mark the scan as complete.
Scanned = true;
// Update affected values.
for (auto &A : AssumeHandles)
updateAffectedValues(cast<CallInst>(A));
}
void AssumptionCache::registerAssumption(CallInst *CI) {
@ -72,6 +179,8 @@ void AssumptionCache::registerAssumption(CallInst *CI) {
"Cache contains multiple copies of a call!");
}
#endif
updateAffectedValues(CI);
}
AnalysisKey AssumptionAnalysis::Key;

View File

@ -438,8 +438,11 @@ unsigned CostModelAnalysis::getInstructionCost(const Instruction *I) const {
getOperandInfo(I->getOperand(0));
TargetTransformInfo::OperandValueKind Op2VK =
getOperandInfo(I->getOperand(1));
SmallVector<const Value*, 2> Operands(I->operand_values());
return TTI->getArithmeticInstrCost(I->getOpcode(), I->getType(), Op1VK,
Op2VK);
Op2VK, TargetTransformInfo::OP_None,
TargetTransformInfo::OP_None,
Operands);
}
case Instruction::Select: {
const SelectInst *SI = cast<SelectInst>(I);

View File

@ -16,8 +16,8 @@
#include "llvm/ADT/STLExtras.h"
#include "llvm/Analysis/AssumptionCache.h"
#include "llvm/Analysis/CodeMetrics.h"
#include "llvm/Analysis/LoopAnalysisManager.h"
#include "llvm/Analysis/LoopPass.h"
#include "llvm/Analysis/LoopPassManager.h"
#include "llvm/Analysis/ScalarEvolutionExpressions.h"
#include "llvm/Analysis/ValueTracking.h"
#include "llvm/IR/Constants.h"
@ -36,20 +36,9 @@ using namespace llvm;
AnalysisKey IVUsersAnalysis::Key;
IVUsers IVUsersAnalysis::run(Loop &L, LoopAnalysisManager &AM) {
const auto &FAM =
AM.getResult<FunctionAnalysisManagerLoopProxy>(L).getManager();
Function *F = L.getHeader()->getParent();
return IVUsers(&L, FAM.getCachedResult<AssumptionAnalysis>(*F),
FAM.getCachedResult<LoopAnalysis>(*F),
FAM.getCachedResult<DominatorTreeAnalysis>(*F),
FAM.getCachedResult<ScalarEvolutionAnalysis>(*F));
}
PreservedAnalyses IVUsersPrinterPass::run(Loop &L, LoopAnalysisManager &AM) {
AM.getResult<IVUsersAnalysis>(L).print(OS);
return PreservedAnalyses::all();
IVUsers IVUsersAnalysis::run(Loop &L, LoopAnalysisManager &AM,
LoopStandardAnalysisResults &AR) {
return IVUsers(&L, &AR.AC, &AR.LI, &AR.DT, &AR.SE);
}
char IVUsersWrapperPass::ID = 0;

View File

@ -636,30 +636,27 @@ void CallAnalyzer::updateThreshold(CallSite CS, Function &Callee) {
else if (Caller->optForSize())
Threshold = MinIfValid(Threshold, Params.OptSizeThreshold);
bool HotCallsite = false;
uint64_t TotalWeight;
if (PSI && CS.getInstruction()->extractProfTotalWeight(TotalWeight) &&
PSI->isHotCount(TotalWeight)) {
HotCallsite = true;
// Adjust the threshold based on inlinehint attribute and profile based
// hotness information if the caller does not have MinSize attribute.
if (!Caller->optForMinSize()) {
if (Callee.hasFnAttribute(Attribute::InlineHint))
Threshold = MaxIfValid(Threshold, Params.HintThreshold);
if (PSI) {
uint64_t TotalWeight;
if (CS.getInstruction()->extractProfTotalWeight(TotalWeight) &&
PSI->isHotCount(TotalWeight)) {
Threshold = MaxIfValid(Threshold, Params.HotCallSiteThreshold);
} else if (PSI->isFunctionEntryHot(&Callee)) {
// If callsite hotness can not be determined, we may still know
// that the callee is hot and treat it as a weaker hint for threshold
// increase.
Threshold = MaxIfValid(Threshold, Params.HintThreshold);
} else if (PSI->isFunctionEntryCold(&Callee)) {
Threshold = MinIfValid(Threshold, Params.ColdThreshold);
}
}
}
// Listen to the inlinehint attribute or profile based hotness information
// when it would increase the threshold and the caller does not need to
// minimize its size.
bool InlineHint = Callee.hasFnAttribute(Attribute::InlineHint) ||
(PSI && PSI->isFunctionEntryHot(&Callee));
if (InlineHint && !Caller->optForMinSize())
Threshold = MaxIfValid(Threshold, Params.HintThreshold);
if (HotCallsite && !Caller->optForMinSize())
Threshold = MaxIfValid(Threshold, Params.HotCallSiteThreshold);
bool ColdCallee = PSI && PSI->isFunctionEntryCold(&Callee);
// For cold callees, use the ColdThreshold knob if it is available and reduces
// the threshold.
if (ColdCallee)
Threshold = MinIfValid(Threshold, Params.ColdThreshold);
// Finally, take the target-specific inlining threshold multiplier into
// account.
Threshold *= TTI.getInliningThresholdMultiplier();

View File

@ -3583,7 +3583,7 @@ static Value *simplifySelectBitTest(Value *TrueVal, Value *FalseVal, Value *X,
*Y == *C)
return TrueWhenUnset ? TrueVal : FalseVal;
}
return nullptr;
}
@ -3595,7 +3595,7 @@ static Value *simplifySelectWithFakeICmpEq(Value *CmpLHS, Value *TrueVal,
unsigned BitWidth = TrueVal->getType()->getScalarSizeInBits();
if (!BitWidth)
return nullptr;
APInt MinSignedValue;
Value *X;
if (match(CmpLHS, m_Trunc(m_Value(X))) && (X == TrueVal || X == FalseVal)) {
@ -4252,14 +4252,36 @@ static Value *SimplifyIntrinsic(Function *F, IterTy ArgBegin, IterTy ArgEnd,
const Query &Q, unsigned MaxRecurse) {
Intrinsic::ID IID = F->getIntrinsicID();
unsigned NumOperands = std::distance(ArgBegin, ArgEnd);
Type *ReturnType = F->getReturnType();
// Unary Ops
if (NumOperands == 1) {
// Perform idempotent optimizations
if (IsIdempotent(IID)) {
if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(*ArgBegin)) {
if (II->getIntrinsicID() == IID)
return II;
}
}
switch (IID) {
case Intrinsic::fabs: {
if (SignBitMustBeZero(*ArgBegin, Q.TLI))
return *ArgBegin;
}
default:
return nullptr;
}
}
// Binary Ops
if (NumOperands == 2) {
Value *LHS = *ArgBegin;
Value *RHS = *(ArgBegin + 1);
if (IID == Intrinsic::usub_with_overflow ||
IID == Intrinsic::ssub_with_overflow) {
Type *ReturnType = F->getReturnType();
switch (IID) {
case Intrinsic::usub_with_overflow:
case Intrinsic::ssub_with_overflow: {
// X - X -> { 0, false }
if (LHS == RHS)
return Constant::getNullValue(ReturnType);
@ -4268,17 +4290,19 @@ static Value *SimplifyIntrinsic(Function *F, IterTy ArgBegin, IterTy ArgEnd,
// undef - X -> undef
if (isa<UndefValue>(LHS) || isa<UndefValue>(RHS))
return UndefValue::get(ReturnType);
}
if (IID == Intrinsic::uadd_with_overflow ||
IID == Intrinsic::sadd_with_overflow) {
return nullptr;
}
case Intrinsic::uadd_with_overflow:
case Intrinsic::sadd_with_overflow: {
// X + undef -> undef
if (isa<UndefValue>(RHS))
return UndefValue::get(ReturnType);
}
if (IID == Intrinsic::umul_with_overflow ||
IID == Intrinsic::smul_with_overflow) {
return nullptr;
}
case Intrinsic::umul_with_overflow:
case Intrinsic::smul_with_overflow: {
// X * 0 -> { 0, false }
if (match(RHS, m_Zero()))
return Constant::getNullValue(ReturnType);
@ -4286,34 +4310,34 @@ static Value *SimplifyIntrinsic(Function *F, IterTy ArgBegin, IterTy ArgEnd,
// X * undef -> { 0, false }
if (match(RHS, m_Undef()))
return Constant::getNullValue(ReturnType);
}
if (IID == Intrinsic::load_relative && isa<Constant>(LHS) &&
isa<Constant>(RHS))
return SimplifyRelativeLoad(cast<Constant>(LHS), cast<Constant>(RHS),
Q.DL);
return nullptr;
}
case Intrinsic::load_relative: {
Constant *C0 = dyn_cast<Constant>(LHS);
Constant *C1 = dyn_cast<Constant>(RHS);
if (C0 && C1)
return SimplifyRelativeLoad(C0, C1, Q.DL);
return nullptr;
}
default:
return nullptr;
}
}
// Simplify calls to llvm.masked.load.*
if (IID == Intrinsic::masked_load) {
switch (IID) {
case Intrinsic::masked_load: {
Value *MaskArg = ArgBegin[2];
Value *PassthruArg = ArgBegin[3];
// If the mask is all zeros or undef, the "passthru" argument is the result.
if (maskIsAllZeroOrUndef(MaskArg))
return PassthruArg;
}
// Perform idempotent optimizations
if (!IsIdempotent(IID))
return nullptr;
// Unary Ops
if (NumOperands == 1)
if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(*ArgBegin))
if (II->getIntrinsicID() == IID)
return II;
return nullptr;
}
default:
return nullptr;
}
}
template <typename IterTy>

View File

@ -925,7 +925,7 @@ void LazyValueInfoImpl::intersectAssumeOrGuardBlockValueConstantRange(
if (!BBI)
return;
for (auto &AssumeVH : AC->assumptions()) {
for (auto &AssumeVH : AC->assumptionsFor(Val)) {
if (!AssumeVH)
continue;
auto *I = cast<CallInst>(AssumeVH);

View File

@ -12,22 +12,22 @@
//
//===----------------------------------------------------------------------===//
#include "llvm/Analysis/LoopAccessAnalysis.h"
#include "llvm/ADT/APInt.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/DepthFirstIterator.h"
#include "llvm/ADT/EquivalenceClasses.h"
#include "llvm/ADT/iterator_range.h"
#include "llvm/ADT/PointerIntPair.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SetVector.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallSet.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/iterator_range.h"
#include "llvm/Analysis/AliasAnalysis.h"
#include "llvm/Analysis/AliasSetTracker.h"
#include "llvm/Analysis/LoopAccessAnalysis.h"
#include "llvm/Analysis/LoopAnalysisManager.h"
#include "llvm/Analysis/LoopInfo.h"
#include "llvm/Analysis/LoopPassManager.h"
#include "llvm/Analysis/MemoryLocation.h"
#include "llvm/Analysis/OptimizationDiagnosticInfo.h"
#include "llvm/Analysis/ScalarEvolution.h"
@ -44,10 +44,10 @@
#include "llvm/IR/DiagnosticInfo.h"
#include "llvm/IR/Dominators.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/IRBuilder.h"
#include "llvm/IR/InstrTypes.h"
#include "llvm/IR/Instruction.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/IRBuilder.h"
#include "llvm/IR/Operator.h"
#include "llvm/IR/PassManager.h"
#include "llvm/IR/Type.h"
@ -2120,35 +2120,9 @@ INITIALIZE_PASS_END(LoopAccessLegacyAnalysis, LAA_NAME, laa_name, false, true)
AnalysisKey LoopAccessAnalysis::Key;
LoopAccessInfo LoopAccessAnalysis::run(Loop &L, LoopAnalysisManager &AM) {
const FunctionAnalysisManager &FAM =
AM.getResult<FunctionAnalysisManagerLoopProxy>(L).getManager();
Function &F = *L.getHeader()->getParent();
auto *SE = FAM.getCachedResult<ScalarEvolutionAnalysis>(F);
auto *TLI = FAM.getCachedResult<TargetLibraryAnalysis>(F);
auto *AA = FAM.getCachedResult<AAManager>(F);
auto *DT = FAM.getCachedResult<DominatorTreeAnalysis>(F);
auto *LI = FAM.getCachedResult<LoopAnalysis>(F);
if (!SE)
report_fatal_error(
"ScalarEvolution must have been cached at a higher level");
if (!AA)
report_fatal_error("AliasAnalysis must have been cached at a higher level");
if (!DT)
report_fatal_error("DominatorTree must have been cached at a higher level");
if (!LI)
report_fatal_error("LoopInfo must have been cached at a higher level");
return LoopAccessInfo(&L, SE, TLI, AA, DT, LI);
}
PreservedAnalyses LoopAccessInfoPrinterPass::run(Loop &L,
LoopAnalysisManager &AM) {
Function &F = *L.getHeader()->getParent();
auto &LAI = AM.getResult<LoopAccessAnalysis>(L);
OS << "Loop access info in function '" << F.getName() << "':\n";
OS.indent(2) << L.getHeader()->getName() << ":\n";
LAI.print(OS, 4);
return PreservedAnalyses::all();
LoopAccessInfo LoopAccessAnalysis::run(Loop &L, LoopAnalysisManager &AM,
LoopStandardAnalysisResults &AR) {
return LoopAccessInfo(&L, &AR.SE, &AR.TLI, &AR.AA, &AR.DT, &AR.LI);
}
namespace llvm {

View File

@ -0,0 +1,160 @@
//===- LoopAnalysisManager.cpp - Loop analysis management -----------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
#include "llvm/Analysis/LoopAnalysisManager.h"
#include "llvm/Analysis/BasicAliasAnalysis.h"
#include "llvm/Analysis/GlobalsModRef.h"
#include "llvm/Analysis/LoopInfo.h"
#include "llvm/Analysis/ScalarEvolution.h"
#include "llvm/Analysis/ScalarEvolutionAliasAnalysis.h"
#include "llvm/IR/Dominators.h"
using namespace llvm;
// Explicit template instantiations and specialization defininitions for core
// template typedefs.
namespace llvm {
template class AllAnalysesOn<Loop>;
template class AnalysisManager<Loop, LoopStandardAnalysisResults &>;
template class InnerAnalysisManagerProxy<LoopAnalysisManager, Function>;
template class OuterAnalysisManagerProxy<FunctionAnalysisManager, Loop,
LoopStandardAnalysisResults &>;
bool LoopAnalysisManagerFunctionProxy::Result::invalidate(
Function &F, const PreservedAnalyses &PA,
FunctionAnalysisManager::Invalidator &Inv) {
// First compute the sequence of IR units covered by this proxy. We will want
// to visit this in postorder, but because this is a tree structure we can do
// this by building a preorder sequence and walking it in reverse.
SmallVector<Loop *, 4> PreOrderLoops, PreOrderWorklist;
// Note that we want to walk the roots in reverse order because we will end
// up reversing the preorder sequence. However, it happens that the loop nest
// roots are in reverse order within the LoopInfo object. So we just walk
// forward here.
// FIXME: If we change the order of LoopInfo we will want to add a reverse
// here.
for (Loop *RootL : *LI) {
assert(PreOrderWorklist.empty() &&
"Must start with an empty preorder walk worklist.");
PreOrderWorklist.push_back(RootL);
do {
Loop *L = PreOrderWorklist.pop_back_val();
PreOrderWorklist.append(L->begin(), L->end());
PreOrderLoops.push_back(L);
} while (!PreOrderWorklist.empty());
}
// If this proxy or the loop info is going to be invalidated, we also need
// to clear all the keys coming from that analysis. We also completely blow
// away the loop analyses if any of the standard analyses provided by the
// loop pass manager go away so that loop analyses can freely use these
// without worrying about declaring dependencies on them etc.
// FIXME: It isn't clear if this is the right tradeoff. We could instead make
// loop analyses declare any dependencies on these and use the more general
// invalidation logic below to act on that.
auto PAC = PA.getChecker<LoopAnalysisManagerFunctionProxy>();
if (!(PAC.preserved() || PAC.preservedSet<AllAnalysesOn<Function>>()) ||
Inv.invalidate<AAManager>(F, PA) ||
Inv.invalidate<AssumptionAnalysis>(F, PA) ||
Inv.invalidate<DominatorTreeAnalysis>(F, PA) ||
Inv.invalidate<LoopAnalysis>(F, PA) ||
Inv.invalidate<ScalarEvolutionAnalysis>(F, PA)) {
// Note that the LoopInfo may be stale at this point, however the loop
// objects themselves remain the only viable keys that could be in the
// analysis manager's cache. So we just walk the keys and forcibly clear
// those results. Note that the order doesn't matter here as this will just
// directly destroy the results without calling methods on them.
for (Loop *L : PreOrderLoops)
InnerAM->clear(*L);
// We also need to null out the inner AM so that when the object gets
// destroyed as invalid we don't try to clear the inner AM again. At that
// point we won't be able to reliably walk the loops for this function and
// only clear results associated with those loops the way we do here.
// FIXME: Making InnerAM null at this point isn't very nice. Most analyses
// try to remain valid during invalidation. Maybe we should add an
// `IsClean` flag?
InnerAM = nullptr;
// Now return true to indicate this *is* invalid and a fresh proxy result
// needs to be built. This is especially important given the null InnerAM.
return true;
}
// Directly check if the relevant set is preserved so we can short circuit
// invalidating loops.
bool AreLoopAnalysesPreserved =
PA.allAnalysesInSetPreserved<AllAnalysesOn<Loop>>();
// Since we have a valid LoopInfo we can actually leave the cached results in
// the analysis manager associated with the Loop keys, but we need to
// propagate any necessary invalidation logic into them. We'd like to
// invalidate things in roughly the same order as they were put into the
// cache and so we walk the preorder list in reverse to form a valid
// postorder.
for (Loop *L : reverse(PreOrderLoops)) {
Optional<PreservedAnalyses> InnerPA;
// Check to see whether the preserved set needs to be adjusted based on
// function-level analysis invalidation triggering deferred invalidation
// for this loop.
if (auto *OuterProxy =
InnerAM->getCachedResult<FunctionAnalysisManagerLoopProxy>(*L))
for (const auto &OuterInvalidationPair :
OuterProxy->getOuterInvalidations()) {
AnalysisKey *OuterAnalysisID = OuterInvalidationPair.first;
const auto &InnerAnalysisIDs = OuterInvalidationPair.second;
if (Inv.invalidate(OuterAnalysisID, F, PA)) {
if (!InnerPA)
InnerPA = PA;
for (AnalysisKey *InnerAnalysisID : InnerAnalysisIDs)
InnerPA->abandon(InnerAnalysisID);
}
}
// Check if we needed a custom PA set. If so we'll need to run the inner
// invalidation.
if (InnerPA) {
InnerAM->invalidate(*L, *InnerPA);
continue;
}
// Otherwise we only need to do invalidation if the original PA set didn't
// preserve all Loop analyses.
if (!AreLoopAnalysesPreserved)
InnerAM->invalidate(*L, PA);
}
// Return false to indicate that this result is still a valid proxy.
return false;
}
template <>
LoopAnalysisManagerFunctionProxy::Result
LoopAnalysisManagerFunctionProxy::run(Function &F,
FunctionAnalysisManager &AM) {
return Result(*InnerAM, AM.getResult<LoopAnalysis>(F));
}
}
PreservedAnalyses llvm::getLoopPassPreservedAnalyses() {
PreservedAnalyses PA;
PA.preserve<AssumptionAnalysis>();
PA.preserve<DominatorTreeAnalysis>();
PA.preserve<LoopAnalysis>();
PA.preserve<LoopAnalysisManagerFunctionProxy>();
PA.preserve<ScalarEvolutionAnalysis>();
// TODO: What we really want to do here is preserve an AA category, but that
// concept doesn't exist yet.
PA.preserve<AAManager>();
PA.preserve<BasicAA>();
PA.preserve<GlobalsAA>();
PA.preserve<SCEVAA>();
return PA;
}

View File

@ -689,18 +689,13 @@ PreservedAnalyses LoopPrinterPass::run(Function &F,
return PreservedAnalyses::all();
}
PrintLoopPass::PrintLoopPass() : OS(dbgs()) {}
PrintLoopPass::PrintLoopPass(raw_ostream &OS, const std::string &Banner)
: OS(OS), Banner(Banner) {}
PreservedAnalyses PrintLoopPass::run(Loop &L, AnalysisManager<Loop> &) {
void llvm::printLoop(Loop &L, raw_ostream &OS, const std::string &Banner) {
OS << Banner;
for (auto *Block : L.blocks())
if (Block)
Block->print(OS);
else
OS << "Printing <null> block";
return PreservedAnalyses::all();
}
//===----------------------------------------------------------------------===//

View File

@ -14,7 +14,7 @@
//===----------------------------------------------------------------------===//
#include "llvm/Analysis/LoopPass.h"
#include "llvm/Analysis/LoopPassManager.h"
#include "llvm/Analysis/LoopAnalysisManager.h"
#include "llvm/IR/Dominators.h"
#include "llvm/IR/IRPrintingPasses.h"
#include "llvm/IR/LLVMContext.h"
@ -32,13 +32,14 @@ namespace {
/// PrintLoopPass - Print a Function corresponding to a Loop.
///
class PrintLoopPassWrapper : public LoopPass {
PrintLoopPass P;
raw_ostream &OS;
std::string Banner;
public:
static char ID;
PrintLoopPassWrapper() : LoopPass(ID) {}
PrintLoopPassWrapper() : LoopPass(ID), OS(dbgs()) {}
PrintLoopPassWrapper(raw_ostream &OS, const std::string &Banner)
: LoopPass(ID), P(OS, Banner) {}
: LoopPass(ID), OS(OS), Banner(Banner) {}
void getAnalysisUsage(AnalysisUsage &AU) const override {
AU.setPreservesAll();
@ -49,8 +50,7 @@ class PrintLoopPassWrapper : public LoopPass {
[](BasicBlock *BB) { return BB; });
if (BBI != L->blocks().end() &&
isFunctionInPrintList((*BBI)->getParent()->getName())) {
LoopAnalysisManager DummyLAM;
P.run(*L, DummyLAM);
printLoop(*L, OS, Banner);
}
return false;
}

View File

@ -1,59 +0,0 @@
//===- LoopPassManager.cpp - Loop pass management -------------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
#include "llvm/Analysis/LoopPassManager.h"
#include "llvm/Analysis/BasicAliasAnalysis.h"
#include "llvm/Analysis/GlobalsModRef.h"
#include "llvm/Analysis/LoopInfo.h"
#include "llvm/Analysis/ScalarEvolution.h"
#include "llvm/Analysis/ScalarEvolutionAliasAnalysis.h"
#include "llvm/IR/Dominators.h"
using namespace llvm;
// Explicit template instantiations and specialization defininitions for core
// template typedefs.
namespace llvm {
template class PassManager<Loop>;
template class AnalysisManager<Loop>;
template class InnerAnalysisManagerProxy<LoopAnalysisManager, Function>;
template class OuterAnalysisManagerProxy<FunctionAnalysisManager, Loop>;
template <>
bool LoopAnalysisManagerFunctionProxy::Result::invalidate(
Function &F, const PreservedAnalyses &PA,
FunctionAnalysisManager::Invalidator &Inv) {
// If this proxy isn't marked as preserved, the set of Function objects in
// the module may have changed. We therefore can't call
// InnerAM->invalidate(), because any pointers to Functions it has may be
// stale.
auto PAC = PA.getChecker<LoopAnalysisManagerFunctionProxy>();
if (!PAC.preserved() && !PAC.preservedSet<AllAnalysesOn<Loop>>())
InnerAM->clear();
// FIXME: Proper suppor for invalidation isn't yet implemented for the LPM.
// Return false to indicate that this result is still a valid proxy.
return false;
}
}
PreservedAnalyses llvm::getLoopPassPreservedAnalyses() {
PreservedAnalyses PA;
PA.preserve<DominatorTreeAnalysis>();
PA.preserve<LoopAnalysis>();
PA.preserve<ScalarEvolutionAnalysis>();
// TODO: What we really want to do here is preserve an AA category, but that
// concept doesn't exist yet.
PA.preserve<AAManager>();
PA.preserve<BasicAA>();
PA.preserve<GlobalsAA>();
PA.preserve<SCEVAA>();
return PA;
}

View File

@ -323,17 +323,28 @@ MemDepResult MemoryDependenceResults::getPointerDependencyFrom(
const MemoryLocation &MemLoc, bool isLoad, BasicBlock::iterator ScanIt,
BasicBlock *BB, Instruction *QueryInst, unsigned *Limit) {
MemDepResult InvariantGroupDependency = MemDepResult::getUnknown();
if (QueryInst != nullptr) {
if (auto *LI = dyn_cast<LoadInst>(QueryInst)) {
MemDepResult invariantGroupDependency =
getInvariantGroupPointerDependency(LI, BB);
InvariantGroupDependency = getInvariantGroupPointerDependency(LI, BB);
if (invariantGroupDependency.isDef())
return invariantGroupDependency;
if (InvariantGroupDependency.isDef())
return InvariantGroupDependency;
}
}
return getSimplePointerDependencyFrom(MemLoc, isLoad, ScanIt, BB, QueryInst,
Limit);
MemDepResult SimpleDep = getSimplePointerDependencyFrom(
MemLoc, isLoad, ScanIt, BB, QueryInst, Limit);
if (SimpleDep.isDef())
return SimpleDep;
// Non-local invariant group dependency indicates there is non local Def
// (it only returns nonLocal if it finds nonLocal def), which is better than
// local clobber and everything else.
if (InvariantGroupDependency.isNonLocal())
return InvariantGroupDependency;
assert(InvariantGroupDependency.isUnknown() &&
"InvariantGroupDependency should be only unknown at this point");
return SimpleDep;
}
MemDepResult
@ -358,6 +369,20 @@ MemoryDependenceResults::getInvariantGroupPointerDependency(LoadInst *LI,
// Queue to process all pointers that are equivalent to load operand.
SmallVector<const Value *, 8> LoadOperandsQueue;
LoadOperandsQueue.push_back(LoadOperand);
Instruction *ClosestDependency = nullptr;
// Order of instructions in uses list is unpredictible. In order to always
// get the same result, we will look for the closest dominance.
auto GetClosestDependency = [this](Instruction *Best, Instruction *Other) {
assert(Other && "Must call it with not null instruction");
if (Best == nullptr || DT.dominates(Best, Other))
return Other;
return Best;
};
// FIXME: This loop is O(N^2) because dominates can be O(n) and in worst case
// we will see all the instructions. This should be fixed in MSSA.
while (!LoadOperandsQueue.empty()) {
const Value *Ptr = LoadOperandsQueue.pop_back_val();
assert(Ptr && !isa<GlobalValue>(Ptr) &&
@ -388,12 +413,24 @@ MemoryDependenceResults::getInvariantGroupPointerDependency(LoadInst *LI,
// If we hit load/store with the same invariant.group metadata (and the
// same pointer operand) we can assume that value pointed by pointer
// operand didn't change.
if ((isa<LoadInst>(U) || isa<StoreInst>(U)) && U->getParent() == BB &&
if ((isa<LoadInst>(U) || isa<StoreInst>(U)) &&
U->getMetadata(LLVMContext::MD_invariant_group) == InvariantGroupMD)
return MemDepResult::getDef(U);
ClosestDependency = GetClosestDependency(ClosestDependency, U);
}
}
return MemDepResult::getUnknown();
if (!ClosestDependency)
return MemDepResult::getUnknown();
if (ClosestDependency->getParent() == BB)
return MemDepResult::getDef(ClosestDependency);
// Def(U) can't be returned here because it is non-local. If local
// dependency won't be found then return nonLocal counting that the
// user will call getNonLocalPointerDependency, which will return cached
// result.
NonLocalDefsCache.try_emplace(
LI, NonLocalDepResult(ClosestDependency->getParent(),
MemDepResult::getDef(ClosestDependency), nullptr));
return MemDepResult::getNonLocal();
}
MemDepResult MemoryDependenceResults::getSimplePointerDependencyFrom(
@ -877,7 +914,17 @@ void MemoryDependenceResults::getNonLocalPointerDependency(
assert(Loc.Ptr->getType()->isPointerTy() &&
"Can't get pointer deps of a non-pointer!");
Result.clear();
{
// Check if there is cached Def with invariant.group. FIXME: cache might be
// invalid if cached instruction would be removed between call to
// getPointerDependencyFrom and this function.
auto NonLocalDefIt = NonLocalDefsCache.find(QueryInst);
if (NonLocalDefIt != NonLocalDefsCache.end()) {
Result.push_back(std::move(NonLocalDefIt->second));
NonLocalDefsCache.erase(NonLocalDefIt);
return;
}
}
// This routine does not expect to deal with volatile instructions.
// Doing so would require piping through the QueryInst all the way through.
// TODO: volatiles can't be elided, but they can be reordered with other

View File

@ -7032,20 +7032,21 @@ static const SCEV *SolveLinEquationWithOverflow(const APInt &A, const APInt &B,
// 3. Compute I: the multiplicative inverse of (A / D) in arithmetic
// modulo (N / D).
//
// (N / D) may need BW+1 bits in its representation. Hence, we'll use this
// bit width during computations.
// If D == 1, (N / D) == N == 2^BW, so we need one extra bit to represent
// (N / D) in general. The inverse itself always fits into BW bits, though,
// so we immediately truncate it.
APInt AD = A.lshr(Mult2).zext(BW + 1); // AD = A / D
APInt Mod(BW + 1, 0);
Mod.setBit(BW - Mult2); // Mod = N / D
APInt I = AD.multiplicativeInverse(Mod);
APInt I = AD.multiplicativeInverse(Mod).trunc(BW);
// 4. Compute the minimum unsigned root of the equation:
// I * (B / D) mod (N / D)
APInt Result = (I * B.lshr(Mult2).zext(BW + 1)).urem(Mod);
// To simplify the computation, we factor out the divide by D:
// (I * B mod N) / D
APInt Result = (I * B).lshr(Mult2);
// The result is guaranteed to be less than 2^BW so we may truncate it to BW
// bits.
return SE.getConstant(Result.trunc(BW));
return SE.getConstant(Result);
}
/// Find the roots of the quadratic equation for the given quadratic chrec
@ -7206,17 +7207,25 @@ ScalarEvolution::howFarToZero(const SCEV *V, const Loop *L, bool ControlsExit,
// 1*N = -Start; -1*N = Start (mod 2^BW), so:
// N = Distance (as unsigned)
if (StepC->getValue()->equalsInt(1) || StepC->getValue()->isAllOnesValue()) {
ConstantRange CR = getUnsignedRange(Start);
const SCEV *MaxBECount;
if (!CountDown && CR.getUnsignedMin().isMinValue())
// When counting up, the worst starting value is 1, not 0.
MaxBECount = CR.getUnsignedMax().isMinValue()
? getConstant(APInt::getMinValue(CR.getBitWidth()))
: getConstant(APInt::getMaxValue(CR.getBitWidth()));
else
MaxBECount = getConstant(CountDown ? CR.getUnsignedMax()
: -CR.getUnsignedMin());
return ExitLimit(Distance, MaxBECount, false, Predicates);
APInt MaxBECount = getUnsignedRange(Distance).getUnsignedMax();
// When a loop like "for (int i = 0; i != n; ++i) { /* body */ }" is rotated,
// we end up with a loop whose backedge-taken count is n - 1. Detect this
// case, and see if we can improve the bound.
//
// Explicitly handling this here is necessary because getUnsignedRange
// isn't context-sensitive; it doesn't know that we only care about the
// range inside the loop.
const SCEV *Zero = getZero(Distance->getType());
const SCEV *One = getOne(Distance->getType());
const SCEV *DistancePlusOne = getAddExpr(Distance, One);
if (isLoopEntryGuardedByCond(L, ICmpInst::ICMP_NE, DistancePlusOne, Zero)) {
// If Distance + 1 doesn't overflow, we can compute the maximum distance
// as "unsigned_max(Distance + 1) - 1".
ConstantRange CR = getUnsignedRange(DistancePlusOne);
MaxBECount = APIntOps::umin(MaxBECount, CR.getUnsignedMax() - 1);
}
return ExitLimit(Distance, getConstant(MaxBECount), false, Predicates);
}
// As a special case, handle the instance where Step is a positive power of

View File

@ -277,9 +277,10 @@ unsigned TargetTransformInfo::getMaxInterleaveFactor(unsigned VF) const {
int TargetTransformInfo::getArithmeticInstrCost(
unsigned Opcode, Type *Ty, OperandValueKind Opd1Info,
OperandValueKind Opd2Info, OperandValueProperties Opd1PropInfo,
OperandValueProperties Opd2PropInfo) const {
OperandValueProperties Opd2PropInfo,
ArrayRef<const Value *> Args) const {
int Cost = TTIImpl->getArithmeticInstrCost(Opcode, Ty, Opd1Info, Opd2Info,
Opd1PropInfo, Opd2PropInfo);
Opd1PropInfo, Opd2PropInfo, Args);
assert(Cost >= 0 && "TTI should not produce negative costs!");
return Cost;
}

View File

@ -526,7 +526,10 @@ static void computeKnownBitsFromAssume(const Value *V, APInt &KnownZero,
unsigned BitWidth = KnownZero.getBitWidth();
for (auto &AssumeVH : Q.AC->assumptions()) {
// Note that the patterns below need to be kept in sync with the code
// in AssumptionCache::updateAffectedValues.
for (auto &AssumeVH : Q.AC->assumptionsFor(V)) {
if (!AssumeVH)
continue;
CallInst *I = cast<CallInst>(AssumeVH);
@ -2580,51 +2583,70 @@ bool llvm::CannotBeNegativeZero(const Value *V, const TargetLibraryInfo *TLI,
return false;
}
bool llvm::CannotBeOrderedLessThanZero(const Value *V,
const TargetLibraryInfo *TLI,
unsigned Depth) {
if (const ConstantFP *CFP = dyn_cast<ConstantFP>(V))
return !CFP->getValueAPF().isNegative() || CFP->getValueAPF().isZero();
/// If \p SignBitOnly is true, test for a known 0 sign bit rather than a
/// standard ordered compare. e.g. make -0.0 olt 0.0 be true because of the sign
/// bit despite comparing equal.
static bool cannotBeOrderedLessThanZeroImpl(const Value *V,
const TargetLibraryInfo *TLI,
bool SignBitOnly,
unsigned Depth) {
if (const ConstantFP *CFP = dyn_cast<ConstantFP>(V)) {
return !CFP->getValueAPF().isNegative() ||
(!SignBitOnly && CFP->getValueAPF().isZero());
}
if (Depth == MaxDepth)
return false; // Limit search depth.
return false; // Limit search depth.
const Operator *I = dyn_cast<Operator>(V);
if (!I) return false;
if (!I)
return false;
switch (I->getOpcode()) {
default: break;
default:
break;
// Unsigned integers are always nonnegative.
case Instruction::UIToFP:
return true;
case Instruction::FMul:
// x*x is always non-negative or a NaN.
if (I->getOperand(0) == I->getOperand(1))
if (I->getOperand(0) == I->getOperand(1) &&
(!SignBitOnly || cast<FPMathOperator>(I)->hasNoNaNs()))
return true;
LLVM_FALLTHROUGH;
case Instruction::FAdd:
case Instruction::FDiv:
case Instruction::FRem:
return CannotBeOrderedLessThanZero(I->getOperand(0), TLI, Depth + 1) &&
CannotBeOrderedLessThanZero(I->getOperand(1), TLI, Depth + 1);
return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly,
Depth + 1) &&
cannotBeOrderedLessThanZeroImpl(I->getOperand(1), TLI, SignBitOnly,
Depth + 1);
case Instruction::Select:
return CannotBeOrderedLessThanZero(I->getOperand(1), TLI, Depth + 1) &&
CannotBeOrderedLessThanZero(I->getOperand(2), TLI, Depth + 1);
return cannotBeOrderedLessThanZeroImpl(I->getOperand(1), TLI, SignBitOnly,
Depth + 1) &&
cannotBeOrderedLessThanZeroImpl(I->getOperand(2), TLI, SignBitOnly,
Depth + 1);
case Instruction::FPExt:
case Instruction::FPTrunc:
// Widening/narrowing never change sign.
return CannotBeOrderedLessThanZero(I->getOperand(0), TLI, Depth + 1);
return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly,
Depth + 1);
case Instruction::Call:
Intrinsic::ID IID = getIntrinsicForCallSite(cast<CallInst>(I), TLI);
switch (IID) {
default:
break;
case Intrinsic::maxnum:
return CannotBeOrderedLessThanZero(I->getOperand(0), TLI, Depth + 1) ||
CannotBeOrderedLessThanZero(I->getOperand(1), TLI, Depth + 1);
return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly,
Depth + 1) ||
cannotBeOrderedLessThanZeroImpl(I->getOperand(1), TLI, SignBitOnly,
Depth + 1);
case Intrinsic::minnum:
return CannotBeOrderedLessThanZero(I->getOperand(0), TLI, Depth + 1) &&
CannotBeOrderedLessThanZero(I->getOperand(1), TLI, Depth + 1);
return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly,
Depth + 1) &&
cannotBeOrderedLessThanZeroImpl(I->getOperand(1), TLI, SignBitOnly,
Depth + 1);
case Intrinsic::exp:
case Intrinsic::exp2:
case Intrinsic::fabs:
@ -2636,18 +2658,30 @@ bool llvm::CannotBeOrderedLessThanZero(const Value *V,
if (CI->getBitWidth() <= 64 && CI->getSExtValue() % 2u == 0)
return true;
}
return CannotBeOrderedLessThanZero(I->getOperand(0), TLI, Depth + 1);
return cannotBeOrderedLessThanZeroImpl(I->getOperand(0), TLI, SignBitOnly,
Depth + 1);
case Intrinsic::fma:
case Intrinsic::fmuladd:
// x*x+y is non-negative if y is non-negative.
return I->getOperand(0) == I->getOperand(1) &&
CannotBeOrderedLessThanZero(I->getOperand(2), TLI, Depth + 1);
(!SignBitOnly || cast<FPMathOperator>(I)->hasNoNaNs()) &&
cannotBeOrderedLessThanZeroImpl(I->getOperand(2), TLI, SignBitOnly,
Depth + 1);
}
break;
}
return false;
}
bool llvm::CannotBeOrderedLessThanZero(const Value *V,
const TargetLibraryInfo *TLI) {
return cannotBeOrderedLessThanZeroImpl(V, TLI, false, 0);
}
bool llvm::SignBitMustBeZero(const Value *V, const TargetLibraryInfo *TLI) {
return cannotBeOrderedLessThanZeroImpl(V, TLI, true, 0);
}
/// If the specified value can be set by repeating the same byte in memory,
/// return the i8 value that it is represented with. This is
/// true for all i8 values obviously, but is also true for i32 0, i32 -1,

View File

@ -13,11 +13,13 @@
#include "CodeViewDebug.h"
#include "llvm/ADT/TinyPtrVector.h"
#include "llvm/DebugInfo/CodeView/CVTypeDumper.h"
#include "llvm/DebugInfo/CodeView/CVTypeVisitor.h"
#include "llvm/DebugInfo/CodeView/CodeView.h"
#include "llvm/DebugInfo/CodeView/Line.h"
#include "llvm/DebugInfo/CodeView/SymbolRecord.h"
#include "llvm/DebugInfo/CodeView/TypeDumper.h"
#include "llvm/DebugInfo/CodeView/TypeDatabase.h"
#include "llvm/DebugInfo/CodeView/TypeDumpVisitor.h"
#include "llvm/DebugInfo/CodeView/TypeIndex.h"
#include "llvm/DebugInfo/CodeView/TypeRecord.h"
#include "llvm/DebugInfo/CodeView/TypeVisitorCallbacks.h"
@ -467,7 +469,8 @@ void CodeViewDebug::emitTypeInformation() {
CommentPrefix += ' ';
}
CVTypeDumper CVTD(nullptr, /*PrintRecordBytes=*/false);
TypeDatabase TypeDB;
CVTypeDumper CVTD(TypeDB);
TypeTable.ForEachRecord([&](TypeIndex Index, ArrayRef<uint8_t> Record) {
if (OS.isVerboseAsm()) {
// Emit a block comment describing the type record for readability.
@ -475,8 +478,8 @@ void CodeViewDebug::emitTypeInformation() {
raw_svector_ostream CommentOS(CommentBlock);
ScopedPrinter SP(CommentOS);
SP.setPrefix(CommentPrefix);
CVTD.setPrinter(&SP);
Error E = CVTD.dump(Record);
TypeDumpVisitor TDV(TypeDB, &SP, false);
Error E = CVTD.dump(Record, TDV);
if (E) {
logAllUnhandledErrors(std::move(E), errs(), "error: ");
llvm_unreachable("produced malformed type record");

View File

@ -79,6 +79,13 @@ void DIEAbbrev::Emit(const AsmPrinter *AP) const {
// Emit form type.
AP->EmitULEB128(AttrData.getForm(),
dwarf::FormEncodingString(AttrData.getForm()).data());
// Emit value for DW_FORM_implicit_const.
if (AttrData.getForm() == dwarf::DW_FORM_implicit_const) {
assert(AP->getDwarfVersion() >= 5 &&
"DW_FORM_implicit_const is supported starting from DWARFv5");
AP->EmitSLEB128(AttrData.getValue());
}
}
// Mark end of abbreviation.
@ -160,7 +167,11 @@ DIE *DIE::getParent() const {
DIEAbbrev DIE::generateAbbrev() const {
DIEAbbrev Abbrev(Tag, hasChildren());
for (const DIEValue &V : values())
Abbrev.AddAttribute(V.getAttribute(), V.getForm());
if (V.getForm() == dwarf::DW_FORM_implicit_const)
Abbrev.AddImplicitConstAttribute(V.getAttribute(),
V.getDIEInteger().getValue());
else
Abbrev.AddAttribute(V.getAttribute(), V.getForm());
return Abbrev;
}
@ -342,6 +353,8 @@ void DIEValue::dump() const {
///
void DIEInteger::EmitValue(const AsmPrinter *Asm, dwarf::Form Form) const {
switch (Form) {
case dwarf::DW_FORM_implicit_const:
LLVM_FALLTHROUGH;
case dwarf::DW_FORM_flag_present:
// Emit something to keep the lines and comments in sync.
// FIXME: Is there a better way to do this?
@ -406,6 +419,7 @@ void DIEInteger::EmitValue(const AsmPrinter *Asm, dwarf::Form Form) const {
///
unsigned DIEInteger::SizeOf(const AsmPrinter *AP, dwarf::Form Form) const {
switch (Form) {
case dwarf::DW_FORM_implicit_const: LLVM_FALLTHROUGH;
case dwarf::DW_FORM_flag_present: return 0;
case dwarf::DW_FORM_flag: LLVM_FALLTHROUGH;
case dwarf::DW_FORM_ref1: LLVM_FALLTHROUGH;

View File

@ -200,6 +200,8 @@ void DwarfUnit::addUInt(DIEValueList &Die, dwarf::Attribute Attribute,
Optional<dwarf::Form> Form, uint64_t Integer) {
if (!Form)
Form = DIEInteger::BestForm(false, Integer);
assert(Form != dwarf::DW_FORM_implicit_const &&
"DW_FORM_implicit_const is used only for signed integers");
Die.addValue(DIEValueAllocator, Attribute, *Form, DIEInteger(Integer));
}

View File

@ -223,6 +223,7 @@ RegisterBankInfo::InstructionMapping &RegBankSelect::findBestMapping(
for (RegisterBankInfo::InstructionMapping &CurMapping : PossibleMappings) {
MappingCost CurCost = computeMapping(MI, CurMapping, LocalRepairPts, &Cost);
if (CurCost < Cost) {
DEBUG(dbgs() << "New best: " << CurCost << '\n');
Cost = CurCost;
BestMapping = &CurMapping;
RepairPts.clear();
@ -377,8 +378,10 @@ RegBankSelect::MappingCost RegBankSelect::computeMapping(
DEBUG(dbgs() << "Evaluating mapping cost for: " << MI);
DEBUG(dbgs() << "With: " << InstrMapping << '\n');
RepairPts.clear();
if (BestCost && Cost > *BestCost)
if (BestCost && Cost > *BestCost) {
DEBUG(dbgs() << "Mapping is too expensive from the start\n");
return Cost;
}
// Moreover, to realize this mapping, the register bank of each operand must
// match this mapping. In other words, we may need to locally reassign the
@ -392,17 +395,17 @@ RegBankSelect::MappingCost RegBankSelect::computeMapping(
unsigned Reg = MO.getReg();
if (!Reg)
continue;
DEBUG(dbgs() << "Opd" << OpIdx);
DEBUG(dbgs() << "Opd" << OpIdx << '\n');
const RegisterBankInfo::ValueMapping &ValMapping =
InstrMapping.getOperandMapping(OpIdx);
// If Reg is already properly mapped, this is free.
bool Assign;
if (assignmentMatch(Reg, ValMapping, Assign)) {
DEBUG(dbgs() << " is free (match).\n");
DEBUG(dbgs() << "=> is free (match).\n");
continue;
}
if (Assign) {
DEBUG(dbgs() << " is free (simple assignment).\n");
DEBUG(dbgs() << "=> is free (simple assignment).\n");
RepairPts.emplace_back(RepairingPlacement(MI, OpIdx, *TRI, *this,
RepairingPlacement::Reassign));
continue;
@ -420,8 +423,10 @@ RegBankSelect::MappingCost RegBankSelect::computeMapping(
tryAvoidingSplit(RepairPt, MO, ValMapping);
// Check that the materialization of the repairing is possible.
if (!RepairPt.canMaterialize())
if (!RepairPt.canMaterialize()) {
DEBUG(dbgs() << "Mapping involves impossible repairing\n");
return MappingCost::ImpossibleCost();
}
// Account for the split cost and repair cost.
// Unless the cost is already saturated or we do not care about the cost.
@ -476,8 +481,10 @@ RegBankSelect::MappingCost RegBankSelect::computeMapping(
// Stop looking into what it takes to repair, this is already
// too expensive.
if (BestCost && Cost > *BestCost)
if (BestCost && Cost > *BestCost) {
DEBUG(dbgs() << "Mapping is too expensive, stop processing\n");
return Cost;
}
// No need to accumulate more cost information.
// We need to still gather the repairing information though.
@ -485,6 +492,7 @@ RegBankSelect::MappingCost RegBankSelect::computeMapping(
break;
}
}
DEBUG(dbgs() << "Total cost is: " << Cost << "\n");
return Cost;
}
@ -550,7 +558,7 @@ bool RegBankSelect::assignInstr(MachineInstr &MI) {
// Make sure the mapping is valid for MI.
assert(BestMapping.verify(MI) && "Invalid instruction mapping");
DEBUG(dbgs() << "Mapping: " << BestMapping << '\n');
DEBUG(dbgs() << "Best Mapping: " << BestMapping << '\n');
// After this call, MI may not be valid anymore.
// Do not use it.
@ -959,3 +967,20 @@ bool RegBankSelect::MappingCost::operator==(const MappingCost &Cost) const {
return LocalCost == Cost.LocalCost && NonLocalCost == Cost.NonLocalCost &&
LocalFreq == Cost.LocalFreq;
}
void RegBankSelect::MappingCost::dump() const {
print(dbgs());
dbgs() << '\n';
}
void RegBankSelect::MappingCost::print(raw_ostream &OS) const {
if (*this == ImpossibleCost()) {
OS << "impossible";
return;
}
if (isSaturated()) {
OS << "saturated";
return;
}
OS << LocalFreq << " * " << LocalCost << " + " << NonLocalCost;
}

View File

@ -19,12 +19,15 @@ using namespace llvm;
const unsigned RegisterBank::InvalidID = UINT_MAX;
RegisterBank::RegisterBank() : ID(InvalidID), Name(nullptr), Size(0) {}
RegisterBank::RegisterBank(unsigned ID, const char *Name, unsigned Size,
const uint32_t *CoveredClasses)
: ID(ID), Name(Name), Size(Size) {
ContainedRegClasses.resize(200);
ContainedRegClasses.setBitsInMask(CoveredClasses);
}
bool RegisterBank::verify(const TargetRegisterInfo &TRI) const {
assert(isValid() && "Invalid register bank");
assert(ContainedRegClasses.size() == TRI.getNumRegClasses() &&
"TRI does not match the initialization process?");
for (unsigned RCId = 0, End = TRI.getNumRegClasses(); RCId != End; ++RCId) {
const TargetRegisterClass &RC = *TRI.getRegClass(RCId);

View File

@ -56,8 +56,10 @@ RegisterBankInfo::RegisterBankInfo(RegisterBank **RegBanks,
unsigned NumRegBanks)
: RegBanks(RegBanks), NumRegBanks(NumRegBanks) {
#ifndef NDEBUG
for (unsigned Idx = 0, End = getNumRegBanks(); Idx != End; ++Idx)
for (unsigned Idx = 0, End = getNumRegBanks(); Idx != End; ++Idx) {
assert(RegBanks[Idx] != nullptr && "Invalid RegisterBank");
assert(RegBanks[Idx]->isValid() && "RegisterBank should be valid");
}
#endif // NDEBUG
}
@ -74,116 +76,13 @@ bool RegisterBankInfo::verify(const TargetRegisterInfo &TRI) const {
const RegisterBank &RegBank = getRegBank(Idx);
assert(Idx == RegBank.getID() &&
"ID does not match the index in the array");
dbgs() << "Verify " << RegBank << '\n';
DEBUG(dbgs() << "Verify " << RegBank << '\n');
assert(RegBank.verify(TRI) && "RegBank is invalid");
}
#endif // NDEBUG
return true;
}
void RegisterBankInfo::createRegisterBank(unsigned ID, const char *Name) {
DEBUG(dbgs() << "Create register bank: " << ID << " with name \"" << Name
<< "\"\n");
RegisterBank &RegBank = getRegBank(ID);
assert(RegBank.getID() == RegisterBank::InvalidID &&
"A register bank should be created only once");
RegBank.ID = ID;
RegBank.Name = Name;
}
void RegisterBankInfo::addRegBankCoverage(unsigned ID, unsigned RCId,
const TargetRegisterInfo &TRI) {
RegisterBank &RB = getRegBank(ID);
unsigned NbOfRegClasses = TRI.getNumRegClasses();
DEBUG(dbgs() << "Add coverage for: " << RB << '\n');
// Check if RB is underconstruction.
if (!RB.isValid())
RB.ContainedRegClasses.resize(NbOfRegClasses);
else if (RB.covers(*TRI.getRegClass(RCId)))
// If RB already covers this register class, there is nothing
// to do.
return;
BitVector &Covered = RB.ContainedRegClasses;
SmallVector<unsigned, 8> WorkList;
WorkList.push_back(RCId);
Covered.set(RCId);
unsigned &MaxSize = RB.Size;
do {
unsigned RCId = WorkList.pop_back_val();
const TargetRegisterClass &CurRC = *TRI.getRegClass(RCId);
DEBUG(dbgs() << "Examine: " << TRI.getRegClassName(&CurRC)
<< "(Size*8: " << (CurRC.getSize() * 8) << ")\n");
// Remember the biggest size in bits.
MaxSize = std::max(MaxSize, CurRC.getSize() * 8);
// Walk through all sub register classes and push them into the worklist.
bool First = true;
for (BitMaskClassIterator It(CurRC.getSubClassMask(), TRI); It.isValid();
++It) {
unsigned SubRCId = It.getID();
if (!Covered.test(SubRCId)) {
if (First)
DEBUG(dbgs() << " Enqueue sub-class: ");
DEBUG(dbgs() << TRI.getRegClassName(TRI.getRegClass(SubRCId)) << ", ");
WorkList.push_back(SubRCId);
// Remember that we saw the sub class.
Covered.set(SubRCId);
First = false;
}
}
if (!First)
DEBUG(dbgs() << '\n');
// Push also all the register classes that can be accessed via a
// subreg index, i.e., its subreg-class (which is different than
// its subclass).
//
// Note: It would probably be faster to go the other way around
// and have this method add only super classes, since this
// information is available in a more efficient way. However, it
// feels less natural for the client of this APIs plus we will
// TableGen the whole bitset at some point, so compile time for
// the initialization is not very important.
First = true;
for (unsigned SubRCId = 0; SubRCId < NbOfRegClasses; ++SubRCId) {
if (Covered.test(SubRCId))
continue;
bool Pushed = false;
const TargetRegisterClass *SubRC = TRI.getRegClass(SubRCId);
for (SuperRegClassIterator SuperRCIt(SubRC, &TRI); SuperRCIt.isValid();
++SuperRCIt) {
if (Pushed)
break;
for (BitMaskClassIterator It(SuperRCIt.getMask(), TRI); It.isValid();
++It) {
unsigned SuperRCId = It.getID();
if (SuperRCId == RCId) {
if (First)
DEBUG(dbgs() << " Enqueue subreg-class: ");
DEBUG(dbgs() << TRI.getRegClassName(SubRC) << ", ");
WorkList.push_back(SubRCId);
// Remember that we saw the sub class.
Covered.set(SubRCId);
Pushed = true;
First = false;
break;
}
}
}
}
if (!First)
DEBUG(dbgs() << '\n');
} while (!WorkList.empty());
}
const RegisterBank *
RegisterBankInfo::getRegBank(unsigned Reg, const MachineRegisterInfo &MRI,
const TargetRegisterInfo &TRI) const {

View File

@ -1840,7 +1840,8 @@ void MachineInstr::print(raw_ostream &OS, ModuleSlotTracker &MST,
OS << "!\"" << DIV->getName() << '\"';
else
MO.print(OS, MST, TRI);
} else if (TRI && (isInsertSubreg() || isRegSequence()) && MO.isImm()) {
} else if (TRI && (isInsertSubreg() || isRegSequence() ||
(isSubregToReg() && i == 3)) && MO.isImm()) {
OS << TRI->getSubRegIndexName(MO.getImm());
} else if (i == AsmDescOp && MO.isImm()) {
// Pretty print the inline asm operand descriptor.

View File

@ -1715,7 +1715,8 @@ ValueTrackerResult ValueTracker::getNextSourceFromBitcast() {
// Bitcasts with more than one def are not supported.
if (Def->getDesc().getNumDefs() != 1)
return ValueTrackerResult();
if (Def->getOperand(DefIdx).getSubReg() != DefSubReg)
const MachineOperand DefOp = Def->getOperand(DefIdx);
if (DefOp.getSubReg() != DefSubReg)
// If we look for a different subreg, it means we want a subreg of the src.
// Bails as we do not support composing subregs yet.
return ValueTrackerResult();
@ -1735,6 +1736,14 @@ ValueTrackerResult ValueTracker::getNextSourceFromBitcast() {
return ValueTrackerResult();
SrcIdx = OpIdx;
}
// Stop when any user of the bitcast is a SUBREG_TO_REG, replacing with a COPY
// will break the assumed guarantees for the upper bits.
for (const MachineInstr &UseMI : MRI.use_nodbg_instructions(DefOp.getReg())) {
if (UseMI.isSubregToReg())
return ValueTrackerResult();
}
const MachineOperand &Src = Def->getOperand(SrcIdx);
return ValueTrackerResult(Src.getReg(), Src.getSubReg());
}

View File

@ -310,19 +310,19 @@ void SUnit::biasCriticalPath() {
}
#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
static void dumpSUIdentifier(const ScheduleDAG &DAG, const SUnit &SU) {
if (&SU == &DAG.ExitSU)
dbgs() << "ExitSU";
else if (&SU == &DAG.EntrySU)
dbgs() << "EntrySU";
void SUnit::print(raw_ostream &OS, const ScheduleDAG *DAG) const {
if (this == &DAG->ExitSU)
OS << "ExitSU";
else if (this == &DAG->EntrySU)
OS << "EntrySU";
else
dbgs() << "SU(" << SU.NodeNum << ")";
OS << "SU(" << NodeNum << ")";
}
/// SUnit - Scheduling unit. It's an wrapper around either a single SDNode or
/// a group of nodes flagged together.
void SUnit::dump(const ScheduleDAG *G) const {
dumpSUIdentifier(*G, *this);
print(dbgs(), G);
dbgs() << ": ";
G->dumpNode(this);
}
@ -352,7 +352,7 @@ void SUnit::dumpAll(const ScheduleDAG *G) const {
case SDep::Output: dbgs() << "out "; break;
case SDep::Order: dbgs() << "ord "; break;
}
dumpSUIdentifier(*G, *I->getSUnit());
I->getSUnit()->print(dbgs(), G);
if (I->isArtificial())
dbgs() << " *";
dbgs() << ": Latency=" << I->getLatency();
@ -372,7 +372,7 @@ void SUnit::dumpAll(const ScheduleDAG *G) const {
case SDep::Output: dbgs() << "out "; break;
case SDep::Order: dbgs() << "ord "; break;
}
dumpSUIdentifier(*G, *I->getSUnit());
I->getSUnit()->print(dbgs(), G);
if (I->isArtificial())
dbgs() << " *";
dbgs() << ": Latency=" << I->getLatency();

View File

@ -5361,8 +5361,9 @@ SDValue DAGCombiner::visitSELECT(SDNode *N) {
// fold (select false, X, Y) -> Y
return !N0C->isNullValue() ? N1 : N2;
}
// fold (select C, 1, X) -> (or C, X)
if (VT == MVT::i1 && isOneConstant(N1))
// fold (select X, X, Y) -> (or X, Y)
// fold (select X, 1, Y) -> (or C, Y)
if (VT == VT0 && VT == MVT::i1 && (N0 == N1 || isOneConstant(N1)))
return DAG.getNode(ISD::OR, SDLoc(N), VT, N0, N2);
if (SDValue V = foldSelectOfConstants(N))
@ -5380,16 +5381,9 @@ SDValue DAGCombiner::visitSELECT(SDNode *N) {
AddToWorklist(NOTNode.getNode());
return DAG.getNode(ISD::OR, SDLoc(N), VT, NOTNode, N1);
}
// fold (select C, X, 0) -> (and C, X)
if (VT == MVT::i1 && isNullConstant(N2))
return DAG.getNode(ISD::AND, SDLoc(N), VT, N0, N1);
// fold (select X, X, Y) -> (or X, Y)
// fold (select X, 1, Y) -> (or X, Y)
if (VT == MVT::i1 && (N0 == N1 || isOneConstant(N1)))
return DAG.getNode(ISD::OR, SDLoc(N), VT, N0, N2);
// fold (select X, Y, X) -> (and X, Y)
// fold (select X, Y, 0) -> (and X, Y)
if (VT == MVT::i1 && (N0 == N2 || isNullConstant(N2)))
if (VT == VT0 && VT == MVT::i1 && (N0 == N2 || isNullConstant(N2)))
return DAG.getNode(ISD::AND, SDLoc(N), VT, N0, N1);
// If we can fold this based on the true/false value, do so.
@ -5470,7 +5464,6 @@ SDValue DAGCombiner::visitSELECT(SDNode *N) {
}
// select (xor Cond, 1), X, Y -> select Cond, Y, X
// select (xor Cond, 0), X, Y -> selext Cond, X, Y
if (VT0 == MVT::i1) {
if (N0->getOpcode() == ISD::XOR) {
if (auto *C = dyn_cast<ConstantSDNode>(N0->getOperand(1))) {
@ -5478,9 +5471,6 @@ SDValue DAGCombiner::visitSELECT(SDNode *N) {
if (C->isOne())
return DAG.getNode(ISD::SELECT, SDLoc(N), N1.getValueType(),
Cond0, N2, N1);
else
return DAG.getNode(ISD::SELECT, SDLoc(N), N1.getValueType(),
Cond0, N1, N2);
}
}
}
@ -8136,7 +8126,8 @@ SDValue DAGCombiner::visitFADDForFMACombine(SDNode *N) {
if ((AllowFusion || HasFMAD) && Aggressive) {
// fold (fadd (fma x, y, (fmul u, v)), z) -> (fma x, y (fma u, v, z))
if (N0.getOpcode() == PreferredFusedOpcode &&
N0.getOperand(2).getOpcode() == ISD::FMUL) {
N0.getOperand(2).getOpcode() == ISD::FMUL &&
N0->hasOneUse() && N0.getOperand(2)->hasOneUse()) {
return DAG.getNode(PreferredFusedOpcode, SL, VT,
N0.getOperand(0), N0.getOperand(1),
DAG.getNode(PreferredFusedOpcode, SL, VT,
@ -8147,7 +8138,8 @@ SDValue DAGCombiner::visitFADDForFMACombine(SDNode *N) {
// fold (fadd x, (fma y, z, (fmul u, v)) -> (fma y, z (fma u, v, x))
if (N1->getOpcode() == PreferredFusedOpcode &&
N1.getOperand(2).getOpcode() == ISD::FMUL) {
N1.getOperand(2).getOpcode() == ISD::FMUL &&
N1->hasOneUse() && N1.getOperand(2)->hasOneUse()) {
return DAG.getNode(PreferredFusedOpcode, SL, VT,
N1.getOperand(0), N1.getOperand(1),
DAG.getNode(PreferredFusedOpcode, SL, VT,
@ -8379,7 +8371,8 @@ SDValue DAGCombiner::visitFSUBForFMACombine(SDNode *N) {
// fold (fsub (fma x, y, (fmul u, v)), z)
// -> (fma x, y (fma u, v, (fneg z)))
if (N0.getOpcode() == PreferredFusedOpcode &&
N0.getOperand(2).getOpcode() == ISD::FMUL) {
N0.getOperand(2).getOpcode() == ISD::FMUL &&
N0->hasOneUse() && N0.getOperand(2)->hasOneUse()) {
return DAG.getNode(PreferredFusedOpcode, SL, VT,
N0.getOperand(0), N0.getOperand(1),
DAG.getNode(PreferredFusedOpcode, SL, VT,

View File

@ -330,8 +330,6 @@ SDValue SelectionDAGLegalize::PerformInsertVectorEltInMemory(SDValue Vec,
// supported by the target.
EVT VT = Tmp1.getValueType();
EVT EltVT = VT.getVectorElementType();
EVT IdxVT = Tmp3.getValueType();
EVT PtrVT = TLI.getPointerTy(DAG.getDataLayout());
SDValue StackPtr = DAG.CreateStackTemporary(VT);
int SPFI = cast<FrameIndexSDNode>(StackPtr.getNode())->getIndex();
@ -341,13 +339,8 @@ SDValue SelectionDAGLegalize::PerformInsertVectorEltInMemory(SDValue Vec,
DAG.getEntryNode(), dl, Tmp1, StackPtr,
MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), SPFI));
// Truncate or zero extend offset to target pointer type.
Tmp3 = DAG.getZExtOrTrunc(Tmp3, dl, PtrVT);
// Add the offset to the index.
unsigned EltSize = EltVT.getSizeInBits()/8;
Tmp3 = DAG.getNode(ISD::MUL, dl, IdxVT, Tmp3,
DAG.getConstant(EltSize, dl, IdxVT));
SDValue StackPtr2 = DAG.getNode(ISD::ADD, dl, IdxVT, Tmp3, StackPtr);
SDValue StackPtr2 = TLI.getVectorElementPointer(DAG, StackPtr, VT, Tmp3);
// Store the scalar value.
Ch = DAG.getTruncStore(Ch, dl, Tmp2, StackPtr2, MachinePointerInfo(), EltVT);
// Load the updated vector.
@ -1209,20 +1202,16 @@ SDValue SelectionDAGLegalize::ExpandExtractFromVectorThroughStack(SDValue Op) {
}
}
EVT VecVT = Vec.getValueType();
if (!Ch.getNode()) {
// Store the value to a temporary stack slot, then LOAD the returned part.
StackPtr = DAG.CreateStackTemporary(Vec.getValueType());
StackPtr = DAG.CreateStackTemporary(VecVT);
Ch = DAG.getStore(DAG.getEntryNode(), dl, Vec, StackPtr,
MachinePointerInfo());
}
// Add the offset to the index.
unsigned EltSize = Vec.getScalarValueSizeInBits() / 8;
Idx = DAG.getNode(ISD::MUL, dl, Idx.getValueType(), Idx,
DAG.getConstant(EltSize, SDLoc(Vec), Idx.getValueType()));
Idx = DAG.getZExtOrTrunc(Idx, dl, TLI.getPointerTy(DAG.getDataLayout()));
StackPtr = DAG.getNode(ISD::ADD, dl, Idx.getValueType(), Idx, StackPtr);
StackPtr = TLI.getVectorElementPointer(DAG, StackPtr, VecVT, Idx);
SDValue NewLoad;
@ -1232,7 +1221,7 @@ SDValue SelectionDAGLegalize::ExpandExtractFromVectorThroughStack(SDValue Op) {
else
NewLoad = DAG.getExtLoad(ISD::EXTLOAD, dl, Op.getValueType(), Ch, StackPtr,
MachinePointerInfo(),
Vec.getValueType().getVectorElementType());
VecVT.getVectorElementType());
// Replace the chain going out of the store, by the one out of the load.
DAG.ReplaceAllUsesOfValueWith(Ch, SDValue(NewLoad.getNode(), 1));
@ -1256,8 +1245,8 @@ SDValue SelectionDAGLegalize::ExpandInsertToVectorThroughStack(SDValue Op) {
SDLoc dl(Op);
// Store the value to a temporary stack slot, then LOAD the returned part.
SDValue StackPtr = DAG.CreateStackTemporary(Vec.getValueType());
EVT VecVT = Vec.getValueType();
SDValue StackPtr = DAG.CreateStackTemporary(VecVT);
int FI = cast<FrameIndexSDNode>(StackPtr.getNode())->getIndex();
MachinePointerInfo PtrInfo =
MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI);
@ -1266,16 +1255,7 @@ SDValue SelectionDAGLegalize::ExpandInsertToVectorThroughStack(SDValue Op) {
SDValue Ch = DAG.getStore(DAG.getEntryNode(), dl, Vec, StackPtr, PtrInfo);
// Then store the inserted part.
// Add the offset to the index.
unsigned EltSize = Vec.getScalarValueSizeInBits() / 8;
Idx = DAG.getNode(ISD::MUL, dl, Idx.getValueType(), Idx,
DAG.getConstant(EltSize, SDLoc(Vec), Idx.getValueType()));
Idx = DAG.getZExtOrTrunc(Idx, dl, TLI.getPointerTy(DAG.getDataLayout()));
SDValue SubStackPtr = DAG.getNode(ISD::ADD, dl, Idx.getValueType(), Idx,
StackPtr);
SDValue SubStackPtr = TLI.getVectorElementPointer(DAG, StackPtr, VecVT, Idx);
// Store the subvector.
Ch = DAG.getStore(Ch, dl, Part, SubStackPtr, MachinePointerInfo());

View File

@ -57,8 +57,6 @@ void DAGTypeLegalizer::PromoteIntegerResult(SDNode *N, unsigned ResNo) {
case ISD::BSWAP: Res = PromoteIntRes_BSWAP(N); break;
case ISD::BUILD_PAIR: Res = PromoteIntRes_BUILD_PAIR(N); break;
case ISD::Constant: Res = PromoteIntRes_Constant(N); break;
case ISD::CONVERT_RNDSAT:
Res = PromoteIntRes_CONVERT_RNDSAT(N); break;
case ISD::CTLZ_ZERO_UNDEF:
case ISD::CTLZ: Res = PromoteIntRes_CTLZ(N); break;
case ISD::CTPOP: Res = PromoteIntRes_CTPOP(N); break;
@ -354,18 +352,6 @@ SDValue DAGTypeLegalizer::PromoteIntRes_Constant(SDNode *N) {
return Result;
}
SDValue DAGTypeLegalizer::PromoteIntRes_CONVERT_RNDSAT(SDNode *N) {
ISD::CvtCode CvtCode = cast<CvtRndSatSDNode>(N)->getCvtCode();
assert ((CvtCode == ISD::CVT_SS || CvtCode == ISD::CVT_SU ||
CvtCode == ISD::CVT_US || CvtCode == ISD::CVT_UU ||
CvtCode == ISD::CVT_SF || CvtCode == ISD::CVT_UF) &&
"can only promote integers");
EVT OutVT = TLI.getTypeToTransformTo(*DAG.getContext(), N->getValueType(0));
return DAG.getConvertRndSat(OutVT, SDLoc(N), N->getOperand(0),
N->getOperand(1), N->getOperand(2),
N->getOperand(3), N->getOperand(4), CvtCode);
}
SDValue DAGTypeLegalizer::PromoteIntRes_CTLZ(SDNode *N) {
// Zero extend to the promoted type and do the count there.
SDValue Op = ZExtPromotedInteger(N->getOperand(0));
@ -512,7 +498,7 @@ SDValue DAGTypeLegalizer::PromoteIntRes_MGATHER(MaskedGatherSDNode *N) {
N->getIndex()};
SDValue Res = DAG.getMaskedGather(DAG.getVTList(NVT, MVT::Other),
N->getMemoryVT(), dl, Ops,
N->getMemOperand());
N->getMemOperand());
// Legalize the chain result - switch anything that used the old chain to
// use the new one.
ReplaceValueWith(SDValue(N, 1), Res.getValue(1));
@ -887,8 +873,6 @@ bool DAGTypeLegalizer::PromoteIntegerOperand(SDNode *N, unsigned OpNo) {
case ISD::BUILD_VECTOR: Res = PromoteIntOp_BUILD_VECTOR(N); break;
case ISD::CONCAT_VECTORS: Res = PromoteIntOp_CONCAT_VECTORS(N); break;
case ISD::EXTRACT_VECTOR_ELT: Res = PromoteIntOp_EXTRACT_VECTOR_ELT(N); break;
case ISD::CONVERT_RNDSAT:
Res = PromoteIntOp_CONVERT_RNDSAT(N); break;
case ISD::INSERT_VECTOR_ELT:
Res = PromoteIntOp_INSERT_VECTOR_ELT(N, OpNo);break;
case ISD::SCALAR_TO_VECTOR:
@ -1068,18 +1052,6 @@ SDValue DAGTypeLegalizer::PromoteIntOp_BUILD_VECTOR(SDNode *N) {
return SDValue(DAG.UpdateNodeOperands(N, NewOps), 0);
}
SDValue DAGTypeLegalizer::PromoteIntOp_CONVERT_RNDSAT(SDNode *N) {
ISD::CvtCode CvtCode = cast<CvtRndSatSDNode>(N)->getCvtCode();
assert ((CvtCode == ISD::CVT_SS || CvtCode == ISD::CVT_SU ||
CvtCode == ISD::CVT_US || CvtCode == ISD::CVT_UU ||
CvtCode == ISD::CVT_FS || CvtCode == ISD::CVT_FU) &&
"can only promote integer arguments");
SDValue InOp = GetPromotedInteger(N->getOperand(0));
return DAG.getConvertRndSat(N->getValueType(0), SDLoc(N), InOp,
N->getOperand(1), N->getOperand(2),
N->getOperand(3), N->getOperand(4), CvtCode);
}
SDValue DAGTypeLegalizer::PromoteIntOp_INSERT_VECTOR_ELT(SDNode *N,
unsigned OpNo) {
if (OpNo == 1) {

View File

@ -1021,22 +1021,6 @@ void DAGTypeLegalizer::GetPairElements(SDValue Pair,
DAG.getIntPtrConstant(1, dl));
}
SDValue DAGTypeLegalizer::GetVectorElementPointer(SDValue VecPtr, EVT EltVT,
SDValue Index) {
SDLoc dl(Index);
// Make sure the index type is big enough to compute in.
Index = DAG.getZExtOrTrunc(Index, dl, TLI.getPointerTy(DAG.getDataLayout()));
// Calculate the element offset and add it to the pointer.
unsigned EltSize = EltVT.getSizeInBits() / 8; // FIXME: should be ABI size.
assert(EltSize * 8 == EltVT.getSizeInBits() &&
"Converting bits to bytes lost precision");
Index = DAG.getNode(ISD::MUL, dl, Index.getValueType(), Index,
DAG.getConstant(EltSize, dl, Index.getValueType()));
return DAG.getNode(ISD::ADD, dl, Index.getValueType(), Index, VecPtr);
}
/// Build an integer with low bits Lo and high bits Hi.
SDValue DAGTypeLegalizer::JoinIntegers(SDValue Lo, SDValue Hi) {
// Arbitrarily use dlHi for result SDLoc

Some files were not shown because too many files have changed in this diff Show More