Update llvm/clang to r241361.

This commit is contained in:
Dimitry Andric 2015-07-05 22:34:42 +00:00
commit 3dac3a9bad
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/projects/clang-trunk/; revision=285181
1569 changed files with 29905 additions and 14367 deletions

View File

@ -40,7 +40,7 @@ typedef bool lto_bool_t;
* @{
*/
#define LTO_API_VERSION 15
#define LTO_API_VERSION 16
/**
* \since prior to LTO_API_VERSION=3
@ -280,39 +280,15 @@ lto_module_get_symbol_attribute(lto_module_t mod, unsigned int index);
/**
* Returns the number of dependent libraries in the object module.
* Returns the module's linker options.
*
* \since LTO_API_VERSION=8
*/
extern unsigned int
lto_module_get_num_deplibs(lto_module_t mod);
/**
* Returns the ith dependent library in the module.
* The linker options may consist of multiple flags. It is the linker's
* responsibility to split the flags using a platform-specific mechanism.
*
* \since LTO_API_VERSION=8
* \since LTO_API_VERSION=16
*/
extern const char*
lto_module_get_deplib(lto_module_t mod, unsigned int index);
/**
* Returns the number of linker options in the object module.
*
* \since LTO_API_VERSION=8
*/
extern unsigned int
lto_module_get_num_linkeropts(lto_module_t mod);
/**
* Returns the ith linker option in the module.
*
* \since LTO_API_VERSION=8
*/
extern const char*
lto_module_get_linkeropt(lto_module_t mod, unsigned int index);
lto_module_get_linkeropts(lto_module_t mod);
/**

View File

@ -1038,7 +1038,9 @@ class APInt {
/// the validity of the less-than relationship.
///
/// \returns true if *this < RHS when considered unsigned.
bool ult(uint64_t RHS) const { return ult(APInt(getBitWidth(), RHS)); }
bool ult(uint64_t RHS) const {
return getActiveBits() > 64 ? false : getZExtValue() < RHS;
}
/// \brief Signed less than comparison
///
@ -1054,7 +1056,9 @@ class APInt {
/// the validity of the less-than relationship.
///
/// \returns true if *this < RHS when considered signed.
bool slt(uint64_t RHS) const { return slt(APInt(getBitWidth(), RHS)); }
bool slt(int64_t RHS) const {
return getMinSignedBits() > 64 ? isNegative() : getSExtValue() < RHS;
}
/// \brief Unsigned less or equal comparison
///
@ -1070,7 +1074,7 @@ class APInt {
/// the validity of the less-or-equal relationship.
///
/// \returns true if *this <= RHS when considered unsigned.
bool ule(uint64_t RHS) const { return ule(APInt(getBitWidth(), RHS)); }
bool ule(uint64_t RHS) const { return !ugt(RHS); }
/// \brief Signed less or equal comparison
///
@ -1086,7 +1090,7 @@ class APInt {
/// validity of the less-or-equal relationship.
///
/// \returns true if *this <= RHS when considered signed.
bool sle(uint64_t RHS) const { return sle(APInt(getBitWidth(), RHS)); }
bool sle(uint64_t RHS) const { return !sgt(RHS); }
/// \brief Unsigned greather than comparison
///
@ -1102,7 +1106,9 @@ class APInt {
/// the validity of the greater-than relationship.
///
/// \returns true if *this > RHS when considered unsigned.
bool ugt(uint64_t RHS) const { return ugt(APInt(getBitWidth(), RHS)); }
bool ugt(uint64_t RHS) const {
return getActiveBits() > 64 ? true : getZExtValue() > RHS;
}
/// \brief Signed greather than comparison
///
@ -1118,7 +1124,9 @@ class APInt {
/// the validity of the greater-than relationship.
///
/// \returns true if *this > RHS when considered signed.
bool sgt(uint64_t RHS) const { return sgt(APInt(getBitWidth(), RHS)); }
bool sgt(int64_t RHS) const {
return getMinSignedBits() > 64 ? !isNegative() : getSExtValue() > RHS;
}
/// \brief Unsigned greater or equal comparison
///
@ -1134,7 +1142,7 @@ class APInt {
/// the validity of the greater-or-equal relationship.
///
/// \returns true if *this >= RHS when considered unsigned.
bool uge(uint64_t RHS) const { return uge(APInt(getBitWidth(), RHS)); }
bool uge(uint64_t RHS) const { return !ult(RHS); }
/// \brief Signed greather or equal comparison
///
@ -1150,7 +1158,7 @@ class APInt {
/// the validity of the greater-or-equal relationship.
///
/// \returns true if *this >= RHS when considered signed.
bool sge(uint64_t RHS) const { return sge(APInt(getBitWidth(), RHS)); }
bool sge(int64_t RHS) const { return !slt(RHS); }
/// This operation tests if there are any pairs of corresponding bits
/// between this APInt and RHS that are both set.
@ -1896,11 +1904,11 @@ inline APInt Xor(const APInt &LHS, const APInt &RHS) { return LHS ^ RHS; }
/// Performs a bitwise complement operation on APInt.
inline APInt Not(const APInt &APIVal) { return ~APIVal; }
} // namespace APIntOps
} // End of APIntOps namespace
// See friend declaration above. This additional declaration is required in
// order to compile LLVM with IBM xlC compiler.
hash_code hash_value(const APInt &Arg);
} // namespace llvm
} // End of llvm namespace
#endif

View File

@ -33,6 +33,15 @@ class APSInt : public APInt {
explicit APSInt(APInt I, bool isUnsigned = true)
: APInt(std::move(I)), IsUnsigned(isUnsigned) {}
/// Construct an APSInt from a string representation.
///
/// This constructor interprets the string \p Str using the radix of 10.
/// The interpretation stops at the end of the string. The bit width of the
/// constructed APSInt is determined automatically.
///
/// \param Str the string to be interpreted.
explicit APSInt(StringRef Str);
APSInt &operator=(APInt RHS) {
// Retain our current sign.
APInt::operator=(std::move(RHS));

View File

@ -286,6 +286,11 @@ namespace llvm {
return MutableArrayRef<T>(data()+N, M);
}
MutableArrayRef<T> drop_back(unsigned N) const {
assert(this->size() >= N && "Dropping more elements than exist");
return slice(0, this->size() - N);
}
/// @}
/// @name Operator Overloads
/// @{
@ -361,6 +366,6 @@ namespace llvm {
template <typename T> struct isPodLike<ArrayRef<T> > {
static const bool value = true;
};
} // namespace llvm
}
#endif

View File

@ -568,7 +568,7 @@ class BitVector {
}
};
} // namespace llvm
} // End llvm namespace
namespace std {
/// Implement std::swap in terms of BitVector swap.

View File

@ -42,7 +42,7 @@ struct DenseMapPair : public std::pair<KeyT, ValueT> {
ValueT &getSecond() { return std::pair<KeyT, ValueT>::second; }
const ValueT &getSecond() const { return std::pair<KeyT, ValueT>::second; }
};
} // namespace detail
}
template <
typename KeyT, typename ValueT, typename KeyInfoT = DenseMapInfo<KeyT>,

View File

@ -14,6 +14,8 @@
#ifndef LLVM_ADT_DENSEMAPINFO_H
#define LLVM_ADT_DENSEMAPINFO_H
#include "llvm/ADT/Hashing.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/Support/PointerLikeTypeTraits.h"
#include "llvm/Support/type_traits.h"
@ -163,6 +165,31 @@ struct DenseMapInfo<std::pair<T, U> > {
}
};
// Provide DenseMapInfo for StringRefs.
template <> struct DenseMapInfo<StringRef> {
static inline StringRef getEmptyKey() {
return StringRef(reinterpret_cast<const char *>(~static_cast<uintptr_t>(0)),
0);
}
static inline StringRef getTombstoneKey() {
return StringRef(reinterpret_cast<const char *>(~static_cast<uintptr_t>(1)),
0);
}
static unsigned getHashValue(StringRef Val) {
assert(Val.data() != getEmptyKey().data() && "Cannot hash the empty key!");
assert(Val.data() != getTombstoneKey().data() &&
"Cannot hash the tombstone key!");
return (unsigned)(hash_value(Val));
}
static bool isEqual(StringRef LHS, StringRef RHS) {
if (RHS.data() == getEmptyKey().data())
return LHS.data() == getEmptyKey().data();
if (RHS.data() == getTombstoneKey().data())
return LHS.data() == getTombstoneKey().data();
return LHS == RHS;
}
};
} // end namespace llvm
#endif

View File

@ -32,7 +32,7 @@ template <typename KeyT> class DenseSetPair : public DenseSetEmpty {
DenseSetEmpty &getSecond() { return *this; }
const DenseSetEmpty &getSecond() const { return *this; }
};
} // namespace detail
}
/// DenseSet - This implements a dense probed hash-table based set.
template<typename ValueT, typename ValueInfoT = DenseMapInfo<ValueT> >

View File

@ -288,6 +288,6 @@ iterator_range<idf_ext_iterator<T, SetTy>> inverse_depth_first_ext(const T& G,
return make_range(idf_ext_begin(G, S), idf_ext_end(G, S));
}
} // namespace llvm
} // End llvm namespace
#endif

View File

@ -278,6 +278,6 @@ class EquivalenceClasses {
};
};
} // namespace llvm
} // End llvm namespace
#endif

View File

@ -101,6 +101,6 @@ struct GraphTraits<Inverse<Inverse<T> > > {
}
};
} // namespace llvm
} // End llvm namespace
#endif

View File

@ -80,6 +80,6 @@ template <typename T, typename ToIndexT = llvm::identity<unsigned> >
}
};
} // namespace llvm
} // End llvm namespace
#endif

View File

@ -83,6 +83,6 @@ class IntEqClasses {
void uncompress();
};
} // namespace llvm
} // End llvm namespace
#endif

View File

@ -204,6 +204,6 @@ void operator>=(const Optional<T> &X, const Optional<U> &Y);
template<typename T, typename U>
void operator>(const Optional<T> &X, const Optional<U> &Y);
} // namespace llvm
} // end llvm namespace
#endif

View File

@ -507,6 +507,6 @@ namespace llvm {
RHS.template get<U>()));
}
};
} // namespace llvm
}
#endif

View File

@ -295,6 +295,6 @@ class ReversePostOrderTraversal {
rpo_iterator end() { return Blocks.rend(); }
};
} // namespace llvm
} // End llvm namespace
#endif

View File

@ -79,6 +79,6 @@ class PriorityQueue : public std::priority_queue<T, Sequence, Compare> {
}
};
} // namespace llvm
} // End llvm namespace
#endif

View File

@ -240,6 +240,6 @@ template <class T> scc_iterator<Inverse<T> > scc_end(const Inverse<T> &G) {
return scc_iterator<Inverse<T> >::end(G);
}
} // namespace llvm
} // End llvm namespace
#endif

View File

@ -417,6 +417,6 @@ template <typename T> struct deref {
}
};
} // namespace llvm
} // End llvm namespace
#endif

View File

@ -66,6 +66,6 @@ void set_subtract(S1Ty &S1, const S2Ty &S2) {
S1.erase(*SI);
}
} // namespace llvm
} // End llvm namespace
#endif

View File

@ -225,7 +225,7 @@ class SmallSetVector : public SetVector<T, SmallVector<T, N>, SmallSet<T, N> > {
}
};
} // namespace llvm
} // End llvm namespace
// vim: sw=2 ai
#endif

View File

@ -588,7 +588,7 @@ operator^(const SmallBitVector &LHS, const SmallBitVector &RHS) {
return Result;
}
} // namespace llvm
} // End llvm namespace
namespace std {
/// Implement std::swap in terms of BitVector swap.

View File

@ -334,7 +334,7 @@ class SmallPtrSet : public SmallPtrSetImpl<PtrType> {
}
};
} // namespace llvm
}
namespace std {
/// Implement std::swap in terms of SmallPtrSet swap.

View File

@ -292,6 +292,6 @@ class SmallString : public SmallVector<char, InternalLen> {
}
};
} // namespace llvm
}
#endif

View File

@ -924,7 +924,7 @@ static inline size_t capacity_in_bytes(const SmallVector<T, N> &X) {
return X.capacity_in_bytes();
}
} // namespace llvm
} // End llvm namespace
namespace std {
/// Implement std::swap in terms of SmallVector swap.
@ -940,6 +940,6 @@ namespace std {
swap(llvm::SmallVector<T, N> &LHS, llvm::SmallVector<T, N> &RHS) {
LHS.swap(RHS);
}
} // namespace std
}
#endif

View File

@ -176,6 +176,6 @@ void PrintStatistics();
/// \brief Print statistics to the given output stream.
void PrintStatistics(raw_ostream &OS);
} // namespace llvm
} // End llvm namespace
#endif

View File

@ -207,6 +207,6 @@ inline std::string join(IteratorT Begin, IteratorT End, StringRef Separator) {
return join_impl(Begin, End, Separator, tag());
}
} // namespace llvm
} // End llvm namespace
#endif

View File

@ -447,6 +447,6 @@ class StringMapIterator : public StringMapConstIterator<ValueTy> {
}
};
} // namespace llvm
}
#endif

View File

@ -566,6 +566,6 @@ namespace llvm {
// StringRefs can be treated like a POD type.
template <typename T> struct isPodLike;
template <> struct isPodLike<StringRef> { static const bool value = true; };
} // namespace llvm
}
#endif

View File

@ -29,6 +29,6 @@ namespace llvm {
return base::insert(std::make_pair(Key, '\0'));
}
};
} // namespace llvm
}
#endif // LLVM_ADT_STRINGSET_H

View File

@ -85,7 +85,9 @@ class Triple {
spir64, // SPIR: standard portable IR for OpenCL 64-bit version
kalimba, // Kalimba: generic kalimba
shave, // SHAVE: Movidius vector VLIW processors
LastArchType = shave
wasm32, // WebAssembly with 32-bit pointers
wasm64, // WebAssembly with 64-bit pointers
LastArchType = wasm64
};
enum SubArchType {
NoSubArch,
@ -609,7 +611,7 @@ class Triple {
/// @}
};
} // namespace llvm
} // End llvm namespace
#endif

View File

@ -537,6 +537,6 @@ namespace llvm {
}
/// @}
} // namespace llvm
}
#endif

View File

@ -97,6 +97,6 @@ unsigned ComputeEditDistance(ArrayRef<T> FromArray, ArrayRef<T> ToArray,
return Result;
}
} // namespace llvm
} // End llvm namespace
#endif

View File

@ -655,7 +655,7 @@ struct ilist : public iplist<NodeTy> {
void resize(size_type newsize) { resize(newsize, NodeTy()); }
};
} // namespace llvm
} // End llvm namespace
namespace std {
// Ensure that swap uses the fast list swap...

View File

@ -101,6 +101,6 @@ class ilist_node : private ilist_half_node<NodeTy> {
/// @}
};
} // namespace llvm
} // End llvm namespace
#endif

View File

@ -162,6 +162,8 @@ class iterator_adaptor_base
int>::type = 0)
: I(std::forward<U &&>(u)) {}
const WrappedIteratorT &wrapped() const { return I; }
public:
typedef DifferenceTypeT difference_type;
@ -239,6 +241,6 @@ struct pointee_iterator
T &operator*() const { return **this->I; }
};
} // namespace llvm
}
#endif

View File

@ -51,6 +51,6 @@ template <class T> iterator_range<T> make_range(T x, T y) {
template <typename T> iterator_range<T> make_range(std::pair<T, T> p) {
return iterator_range<T>(std::move(p.first), std::move(p.second));
}
} // namespace llvm
}
#endif

View File

@ -56,6 +56,34 @@ class MemTransferInst;
class MemIntrinsic;
class DominatorTree;
/// The possible results of an alias query.
///
/// These results are always computed between two MemoryLocation objects as
/// a query to some alias analysis.
///
/// Note that these are unscoped enumerations because we would like to support
/// implicitly testing a result for the existence of any possible aliasing with
/// a conversion to bool, but an "enum class" doesn't support this. The
/// canonical names from the literature are suffixed and unique anyways, and so
/// they serve as global constants in LLVM for these results.
///
/// See docs/AliasAnalysis.html for more information on the specific meanings
/// of these values.
enum AliasResult {
/// The two locations do not alias at all.
///
/// This value is arranged to convert to false, while all other values
/// convert to true. This allows a boolean context to convert the result to
/// a binary flag indicating whether there is the possibility of aliasing.
NoAlias = 0,
/// The two locations may or may not alias. This is the least precise result.
MayAlias,
/// The two locations alias, but only due to a partial overlap.
PartialAlias,
/// The two locations precisely alias each other.
MustAlias,
};
class AliasAnalysis {
protected:
const DataLayout *DL;
@ -95,22 +123,6 @@ class AliasAnalysis {
/// Alias Queries...
///
/// Alias analysis result - Either we know for sure that it does not alias, we
/// know for sure it must alias, or we don't know anything: The two pointers
/// _might_ alias. This enum is designed so you can do things like:
/// if (AA.alias(P1, P2)) { ... }
/// to check to see if two pointers might alias.
///
/// See docs/AliasAnalysis.html for more information on the specific meanings
/// of these values.
///
enum AliasResult {
NoAlias = 0, ///< No dependencies.
MayAlias, ///< Anything goes.
PartialAlias, ///< Pointers differ, but pointees overlap.
MustAlias ///< Pointers are equal.
};
/// alias - The main low level interface to the alias analysis implementation.
/// Returns an AliasResult indicating whether the two pointers are aliased to
/// each other. This is the interface that must be implemented by specific
@ -558,6 +570,6 @@ bool isIdentifiedObject(const Value *V);
/// IdentifiedObjects.
bool isIdentifiedFunctionLocal(const Value *V);
} // namespace llvm
} // End llvm namespace
#endif

View File

@ -117,24 +117,30 @@ class AliasSet : public ilist_node<AliasSet> {
// AliasSets forwarding to it.
unsigned RefCount : 28;
/// AccessType - Keep track of whether this alias set merely refers to the
/// locations of memory, whether it modifies the memory, or whether it does
/// both. The lattice goes from "NoModRef" to either Refs or Mods, then to
/// ModRef as necessary.
/// The kinds of access this alias set models.
///
enum AccessType {
NoModRef = 0, Refs = 1, // Ref = bit 1
Mods = 2, ModRef = 3 // Mod = bit 2
/// We keep track of whether this alias set merely refers to the locations of
/// memory (and not any particular access), whether it modifies or references
/// the memory, or whether it does both. The lattice goes from "NoAccess" to
/// either RefAccess or ModAccess, then to ModRefAccess as necessary.
enum AccessLattice {
NoAccess = 0,
RefAccess = 1,
ModAccess = 2,
ModRefAccess = RefAccess | ModAccess
};
unsigned AccessTy : 2;
unsigned Access : 2;
/// AliasType - Keep track the relationships between the pointers in the set.
/// Lattice goes from MustAlias to MayAlias.
/// The kind of alias relationship between pointers of the set.
///
enum AliasType {
MustAlias = 0, MayAlias = 1
/// These represent conservatively correct alias results between any members
/// of the set. We represent these independently of the values of alias
/// results in order to pack it into a single bit. Lattice goes from
/// MustAlias to MayAlias.
enum AliasLattice {
SetMustAlias = 0, SetMayAlias = 1
};
unsigned AliasTy : 1;
unsigned Alias : 1;
// Volatile - True if this alias set contains volatile loads or stores.
bool Volatile : 1;
@ -153,10 +159,10 @@ class AliasSet : public ilist_node<AliasSet> {
public:
/// Accessors...
bool isRef() const { return AccessTy & Refs; }
bool isMod() const { return AccessTy & Mods; }
bool isMustAlias() const { return AliasTy == MustAlias; }
bool isMayAlias() const { return AliasTy == MayAlias; }
bool isRef() const { return Access & RefAccess; }
bool isMod() const { return Access & ModAccess; }
bool isMustAlias() const { return Alias == SetMustAlias; }
bool isMayAlias() const { return Alias == SetMayAlias; }
// isVolatile - Return true if this alias set contains volatile loads or
// stores.
@ -218,7 +224,7 @@ class AliasSet : public ilist_node<AliasSet> {
friend struct ilist_sentinel_traits<AliasSet>;
AliasSet()
: PtrList(nullptr), PtrListEnd(&PtrList), Forward(nullptr), RefCount(0),
AccessTy(NoModRef), AliasTy(MustAlias), Volatile(false) {
Access(NoAccess), Alias(SetMustAlias), Volatile(false) {
}
AliasSet(const AliasSet &AS) = delete;
@ -419,11 +425,11 @@ class AliasSetTracker {
}
AliasSet &addPointer(Value *P, uint64_t Size, const AAMDNodes &AAInfo,
AliasSet::AccessType E,
AliasSet::AccessLattice E,
bool &NewSet) {
NewSet = false;
AliasSet &AS = getAliasSetForPointer(P, Size, AAInfo, &NewSet);
AS.AccessTy |= E;
AS.Access |= E;
return AS;
}
AliasSet *findAliasSetForPointer(const Value *Ptr, uint64_t Size,
@ -437,6 +443,6 @@ inline raw_ostream& operator<<(raw_ostream &OS, const AliasSetTracker &AST) {
return OS;
}
} // namespace llvm
} // End llvm namespace
#endif

View File

@ -63,6 +63,6 @@ class BlockFrequencyInfo : public FunctionPass {
};
} // namespace llvm
}
#endif

View File

@ -628,7 +628,7 @@ void IrreducibleGraph::addEdges(const BlockNode &Node,
else
addBlockEdges(*this, Irr, OuterLoop);
}
} // namespace bfi_detail
}
/// \brief Shared implementation for block frequency analysis.
///
@ -1133,7 +1133,7 @@ template <class BT> struct BlockEdgesAdder {
G.addEdge(Irr, BFI.getNode(*I), OuterLoop);
}
};
} // namespace bfi_detail
}
template <class BT>
void BlockFrequencyInfoImpl<BT>::computeIrreducibleMass(
LoopData *OuterLoop, std::list<LoopData>::iterator Insert) {

View File

@ -158,6 +158,6 @@ class BranchProbabilityInfo : public FunctionPass {
bool calcInvokeHeuristics(BasicBlock *BB);
};
} // namespace llvm
}
#endif

View File

@ -78,6 +78,17 @@ bool isPotentiallyReachable(const BasicBlock *From, const BasicBlock *To,
const DominatorTree *DT = nullptr,
const LoopInfo *LI = nullptr);
} // namespace llvm
/// \brief Determine whether there is at least one path from a block in
/// 'Worklist' to 'StopBB', returning true if uncertain.
///
/// Determine whether there is a path from at least one block in Worklist to
/// StopBB within a single function. Returns false only if we can prove that
/// once any block in 'Worklist' has been reached then 'StopBB' can not be
/// executed. Conservatively returns true.
bool isPotentiallyReachableFromMany(SmallVectorImpl<BasicBlock *> &Worklist,
BasicBlock *StopBB,
const DominatorTree *DT = nullptr,
const LoopInfo *LI = nullptr);
} // End llvm namespace
#endif

View File

@ -119,7 +119,7 @@ struct DOTGraphTraits<const Function*> : public DefaultDOTGraphTraits {
return "";
}
};
} // namespace llvm
} // End llvm namespace
namespace llvm {
class FunctionPass;

View File

@ -485,6 +485,6 @@ CGSCCToFunctionPassAdaptor<FunctionPassT>
createCGSCCToFunctionPassAdaptor(FunctionPassT Pass) {
return CGSCCToFunctionPassAdaptor<FunctionPassT>(std::move(Pass));
}
} // namespace llvm
}
#endif

View File

@ -481,6 +481,6 @@ struct GraphTraits<const CallGraph *> : public GraphTraits<
static const CallGraphNode &CGdereference(PairTy P) { return *P.second; }
};
} // namespace llvm
} // End llvm namespace
#endif

View File

@ -102,6 +102,6 @@ class CallGraphSCC {
iterator end() const { return Nodes.end(); }
};
} // namespace llvm
} // End llvm namespace
#endif

View File

@ -102,6 +102,6 @@ struct CodeMetrics {
SmallPtrSetImpl<const Value *> &EphValues);
};
} // namespace llvm
}
#endif

View File

@ -97,6 +97,6 @@ bool canConstantFoldCallTo(const Function *F);
/// with the specified arguments, returning null if unsuccessful.
Constant *ConstantFoldCall(Function *F, ArrayRef<Constant *> Operands,
const TargetLibraryInfo *TLI = nullptr);
} // namespace llvm
}
#endif

View File

@ -25,6 +25,6 @@ namespace llvm {
FunctionPass *createPostDomOnlyPrinterPass();
FunctionPass *createPostDomViewerPass();
FunctionPass *createPostDomOnlyViewerPass();
} // namespace llvm
} // End llvm namespace
#endif

View File

@ -205,6 +205,6 @@ class DominanceFrontier : public FunctionPass {
EXTERN_TEMPLATE_INSTANTIATION(class DominanceFrontierBase<BasicBlock>);
EXTERN_TEMPLATE_INSTANTIATION(class ForwardDominanceFrontierBase<BasicBlock>);
} // namespace llvm
} // End llvm namespace
#endif

View File

@ -221,6 +221,6 @@ ForwardDominanceFrontierBase<BlockT>::calculate(const DomTreeT &DT,
return *Result;
}
} // namespace llvm
} // End llvm namespace
#endif

View File

@ -178,6 +178,6 @@ class IVUsers : public LoopPass {
Pass *createIVUsersPass();
} // namespace llvm
}
#endif

View File

@ -36,7 +36,7 @@ namespace InlineConstants {
/// Do not inline functions which allocate this many bytes on the stack
/// when the caller is recursive.
const unsigned TotalAllocaSizeRecursiveCaller = 1024;
} // namespace InlineConstants
}
/// \brief Represents the cost of inlining a function.
///
@ -138,6 +138,6 @@ class InlineCostAnalysis : public CallGraphSCCPass {
bool isInlineViable(Function &Callee);
};
} // namespace llvm
}
#endif

View File

@ -145,6 +145,6 @@ template <> struct GraphTraits<Inverse<Interval*> > {
}
};
} // namespace llvm
} // End llvm namespace
#endif

View File

@ -263,6 +263,6 @@ inline interval_part_interval_iterator intervals_end(IntervalPartition &IP) {
return interval_part_interval_iterator();
}
} // namespace llvm
} // End llvm namespace
#endif

View File

@ -106,6 +106,6 @@ class IntervalPartition : public FunctionPass {
void updatePredecessors(Interval *Int);
};
} // namespace llvm
} // End llvm namespace
#endif

View File

@ -92,5 +92,5 @@ class IDFCalculator {
const SmallPtrSetImpl<BasicBlock *> *DefBlocks;
SmallVector<BasicBlock *, 32> PHIBlocks;
};
} // namespace llvm
}
#endif

View File

@ -66,6 +66,6 @@ class JumpInstrTableInfo : public ImmutablePass {
/// bound specifies the maximum number of bytes needed to represent an
/// unconditional jump or a trap instruction in the back end currently in use.
ModulePass *createJumpInstrTableInfoPass(unsigned Bound);
} // namespace llvm
}
#endif /* LLVM_ANALYSIS_JUMPINSTRTABLEINFO_H */

View File

@ -569,6 +569,6 @@ class LazyCallGraphPrinterPass {
static StringRef name() { return "LazyCallGraphPrinterPass"; }
};
} // namespace llvm
}
#endif

View File

@ -66,6 +66,6 @@ namespace llvm {
ImmutableCallSite CS,
const MemoryLocation &Loc);
};
} // namespace llvm
} // End of llvm namespace
#endif

View File

@ -44,6 +44,6 @@ void lintFunction(
const Function &F ///< The function to be checked
);
} // namespace llvm
} // End llvm namespace
#endif

View File

@ -52,6 +52,6 @@ Value *FindAvailableLoadedValue(Value *Ptr, BasicBlock *ScanBB,
AliasAnalysis *AA = nullptr,
AAMDNodes *AATags = nullptr);
} // namespace llvm
}
#endif

View File

@ -555,6 +555,6 @@ class LoopAccessAnalysis : public FunctionPass {
DominatorTree *DT;
LoopInfo *LI;
};
} // namespace llvm
} // End llvm namespace
#endif

View File

@ -763,6 +763,6 @@ class LoopInfoWrapperPass : public FunctionPass {
void getAnalysisUsage(AnalysisUsage &AU) const override;
};
} // namespace llvm
} // End llvm namespace
#endif

View File

@ -535,6 +535,6 @@ void LoopInfoBase<BlockT, LoopT>::verify() const {
#endif
}
} // namespace llvm
} // End llvm namespace
#endif

View File

@ -169,6 +169,6 @@ class LPPassManager : public FunctionPass, public PMDataManager {
Loop *CurrentLoop;
};
} // namespace llvm
} // End llvm namespace
#endif

View File

@ -262,6 +262,6 @@ class ObjectSizeOffsetEvaluator
SizeOffsetEvalType visitInstruction(Instruction &I);
};
} // namespace llvm
} // End llvm namespace
#endif

View File

@ -445,6 +445,6 @@ namespace llvm {
};
} // namespace llvm
} // End llvm namespace
#endif

View File

@ -137,6 +137,6 @@ template <> struct DenseMapInfo<MemoryLocation> {
return LHS == RHS;
}
};
} // namespace llvm
}
#endif

View File

@ -173,6 +173,6 @@ namespace llvm {
//
FunctionPass *createMemDerefPrinter();
} // namespace llvm
}
#endif

View File

@ -112,6 +112,6 @@ template <> struct GraphTraits<PostDominatorTree*>
}
};
} // namespace llvm
} // End llvm namespace
#endif

View File

@ -280,6 +280,6 @@ class PtrUseVisitor : protected InstVisitor<DerivedT>,
}
};
} // namespace llvm
}
#endif

View File

@ -906,5 +906,5 @@ EXTERN_TEMPLATE_INSTANTIATION(class RegionBase<RegionTraits<Function>>);
EXTERN_TEMPLATE_INSTANTIATION(class RegionNodeBase<RegionTraits<Function>>);
EXTERN_TEMPLATE_INSTANTIATION(class RegionInfoBase<RegionTraits<Function>>);
} // namespace llvm
} // End llvm namespace
#endif

View File

@ -123,6 +123,6 @@ class RGPassManager : public FunctionPass, public PMDataManager {
}
};
} // namespace llvm
} // End llvm namespace
#endif

View File

@ -954,6 +954,86 @@ namespace llvm {
void print(raw_ostream &OS, const Module* = nullptr) const override;
void verifyAnalysis() const override;
/// Collect parametric terms occurring in step expressions.
void collectParametricTerms(const SCEV *Expr,
SmallVectorImpl<const SCEV *> &Terms);
/// Return in Subscripts the access functions for each dimension in Sizes.
void computeAccessFunctions(const SCEV *Expr,
SmallVectorImpl<const SCEV *> &Subscripts,
SmallVectorImpl<const SCEV *> &Sizes);
/// Split this SCEVAddRecExpr into two vectors of SCEVs representing the
/// subscripts and sizes of an array access.
///
/// The delinearization is a 3 step process: the first two steps compute the
/// sizes of each subscript and the third step computes the access functions
/// for the delinearized array:
///
/// 1. Find the terms in the step functions
/// 2. Compute the array size
/// 3. Compute the access function: divide the SCEV by the array size
/// starting with the innermost dimensions found in step 2. The Quotient
/// is the SCEV to be divided in the next step of the recursion. The
/// Remainder is the subscript of the innermost dimension. Loop over all
/// array dimensions computed in step 2.
///
/// To compute a uniform array size for several memory accesses to the same
/// object, one can collect in step 1 all the step terms for all the memory
/// accesses, and compute in step 2 a unique array shape. This guarantees
/// that the array shape will be the same across all memory accesses.
///
/// FIXME: We could derive the result of steps 1 and 2 from a description of
/// the array shape given in metadata.
///
/// Example:
///
/// A[][n][m]
///
/// for i
/// for j
/// for k
/// A[j+k][2i][5i] =
///
/// The initial SCEV:
///
/// A[{{{0,+,2*m+5}_i, +, n*m}_j, +, n*m}_k]
///
/// 1. Find the different terms in the step functions:
/// -> [2*m, 5, n*m, n*m]
///
/// 2. Compute the array size: sort and unique them
/// -> [n*m, 2*m, 5]
/// find the GCD of all the terms = 1
/// divide by the GCD and erase constant terms
/// -> [n*m, 2*m]
/// GCD = m
/// divide by GCD -> [n, 2]
/// remove constant terms
/// -> [n]
/// size of the array is A[unknown][n][m]
///
/// 3. Compute the access function
/// a. Divide {{{0,+,2*m+5}_i, +, n*m}_j, +, n*m}_k by the innermost size m
/// Quotient: {{{0,+,2}_i, +, n}_j, +, n}_k
/// Remainder: {{{0,+,5}_i, +, 0}_j, +, 0}_k
/// The remainder is the subscript of the innermost array dimension: [5i].
///
/// b. Divide Quotient: {{{0,+,2}_i, +, n}_j, +, n}_k by next outer size n
/// Quotient: {{{0,+,0}_i, +, 1}_j, +, 1}_k
/// Remainder: {{{0,+,2}_i, +, 0}_j, +, 0}_k
/// The Remainder is the subscript of the next array dimension: [2i].
///
/// The subscript of the outermost dimension is the Quotient: [j+k].
///
/// Overall, we have: A[][n][m], and the access function: A[j+k][2i][5i].
void delinearize(const SCEV *Expr,
SmallVectorImpl<const SCEV *> &Subscripts,
SmallVectorImpl<const SCEV *> &Sizes,
const SCEV *ElementSize);
private:
/// Compute the backedge taken count knowing the interval difference, the
/// stride and presence of the equality in the comparison.
@ -981,6 +1061,6 @@ namespace llvm {
/// to locate them all and call their destructors.
SCEVUnknown *FirstUnknown;
};
} // namespace llvm
}
#endif

View File

@ -275,6 +275,6 @@ namespace llvm {
Value *expandIVInc(PHINode *PN, Value *StepV, const Loop *L,
Type *ExpandTy, Type *IntTy, bool useSubtract);
};
} // namespace llvm
}
#endif

View File

@ -356,84 +356,6 @@ namespace llvm {
static inline bool classof(const SCEV *S) {
return S->getSCEVType() == scAddRecExpr;
}
/// Collect parametric terms occurring in step expressions.
void collectParametricTerms(ScalarEvolution &SE,
SmallVectorImpl<const SCEV *> &Terms) const;
/// Return in Subscripts the access functions for each dimension in Sizes.
void computeAccessFunctions(ScalarEvolution &SE,
SmallVectorImpl<const SCEV *> &Subscripts,
SmallVectorImpl<const SCEV *> &Sizes) const;
/// Split this SCEVAddRecExpr into two vectors of SCEVs representing the
/// subscripts and sizes of an array access.
///
/// The delinearization is a 3 step process: the first two steps compute the
/// sizes of each subscript and the third step computes the access functions
/// for the delinearized array:
///
/// 1. Find the terms in the step functions
/// 2. Compute the array size
/// 3. Compute the access function: divide the SCEV by the array size
/// starting with the innermost dimensions found in step 2. The Quotient
/// is the SCEV to be divided in the next step of the recursion. The
/// Remainder is the subscript of the innermost dimension. Loop over all
/// array dimensions computed in step 2.
///
/// To compute a uniform array size for several memory accesses to the same
/// object, one can collect in step 1 all the step terms for all the memory
/// accesses, and compute in step 2 a unique array shape. This guarantees
/// that the array shape will be the same across all memory accesses.
///
/// FIXME: We could derive the result of steps 1 and 2 from a description of
/// the array shape given in metadata.
///
/// Example:
///
/// A[][n][m]
///
/// for i
/// for j
/// for k
/// A[j+k][2i][5i] =
///
/// The initial SCEV:
///
/// A[{{{0,+,2*m+5}_i, +, n*m}_j, +, n*m}_k]
///
/// 1. Find the different terms in the step functions:
/// -> [2*m, 5, n*m, n*m]
///
/// 2. Compute the array size: sort and unique them
/// -> [n*m, 2*m, 5]
/// find the GCD of all the terms = 1
/// divide by the GCD and erase constant terms
/// -> [n*m, 2*m]
/// GCD = m
/// divide by GCD -> [n, 2]
/// remove constant terms
/// -> [n]
/// size of the array is A[unknown][n][m]
///
/// 3. Compute the access function
/// a. Divide {{{0,+,2*m+5}_i, +, n*m}_j, +, n*m}_k by the innermost size m
/// Quotient: {{{0,+,2}_i, +, n}_j, +, n}_k
/// Remainder: {{{0,+,5}_i, +, 0}_j, +, 0}_k
/// The remainder is the subscript of the innermost array dimension: [5i].
///
/// b. Divide Quotient: {{{0,+,2}_i, +, n}_j, +, n}_k by next outer size n
/// Quotient: {{{0,+,0}_i, +, 1}_j, +, 1}_k
/// Remainder: {{{0,+,2}_i, +, 0}_j, +, 0}_k
/// The Remainder is the subscript of the next array dimension: [2i].
///
/// The subscript of the outermost dimension is the Quotient: [j+k].
///
/// Overall, we have: A[][n][m], and the access function: A[j+k][2i][5i].
void delinearize(ScalarEvolution &SE,
SmallVectorImpl<const SCEV *> &Subscripts,
SmallVectorImpl<const SCEV *> &Sizes,
const SCEV *ElementSize) const;
};
//===--------------------------------------------------------------------===//
@ -829,6 +751,6 @@ static inline const SCEV *apply(const SCEV *Scev, LoopToScevMapT &Map,
return SCEVApplyRewriter::rewrite(Scev, Map, SE);
}
} // namespace llvm
}
#endif

View File

@ -73,6 +73,6 @@ const SCEV *TransformForPostIncUse(TransformKind Kind,
ScalarEvolution &SE,
DominatorTree &DT);
} // namespace llvm
}
#endif

View File

@ -265,6 +265,6 @@ class TargetFolder {
}
};
} // namespace llvm
}
#endif

View File

@ -519,6 +519,11 @@ class TargetTransformInfo {
Value *getOrCreateResultFromMemIntrinsic(IntrinsicInst *Inst,
Type *ExpectedType) const;
/// \returns True if the two functions have compatible attributes for inlining
/// purposes.
bool hasCompatibleFunctionAttributes(const Function *Caller,
const Function *Callee) const;
/// @}
private:
@ -619,6 +624,8 @@ class TargetTransformInfo::Concept {
MemIntrinsicInfo &Info) = 0;
virtual Value *getOrCreateResultFromMemIntrinsic(IntrinsicInst *Inst,
Type *ExpectedType) = 0;
virtual bool hasCompatibleFunctionAttributes(const Function *Caller,
const Function *Callee) const = 0;
};
template <typename T>
@ -804,6 +811,10 @@ class TargetTransformInfo::Model final : public TargetTransformInfo::Concept {
Type *ExpectedType) override {
return Impl.getOrCreateResultFromMemIntrinsic(Inst, ExpectedType);
}
bool hasCompatibleFunctionAttributes(const Function *Caller,
const Function *Callee) const override {
return Impl.hasCompatibleFunctionAttributes(Caller, Callee);
}
};
template <typename T>
@ -908,6 +919,6 @@ class TargetTransformInfoWrapperPass : public ImmutablePass {
/// clients.
ImmutablePass *createTargetTransformInfoWrapperPass(TargetIRAnalysis TIRA);
} // namespace llvm
} // End llvm namespace
#endif

View File

@ -335,6 +335,14 @@ class TargetTransformInfoImplBase {
Type *ExpectedType) {
return nullptr;
}
bool hasCompatibleFunctionAttributes(const Function *Caller,
const Function *Callee) const {
return (Caller->getFnAttribute("target-cpu") ==
Callee->getFnAttribute("target-cpu")) &&
(Caller->getFnAttribute("target-features") ==
Callee->getFnAttribute("target-features"));
}
};
/// \brief CRTP base class for use as a mix-in that aids implementing
@ -446,6 +454,6 @@ class TargetTransformInfoImplCRTPBase : public TargetTransformInfoImplBase {
U->getNumOperands() == 1 ? U->getOperand(0)->getType() : nullptr);
}
};
} // namespace llvm
}
#endif

View File

@ -0,0 +1,56 @@
//===- llvm/Transforms/Utils/VectorUtils.h - Vector utilities -*- C++ -*-=====//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file defines some vectorizer utilities.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_TRANSFORMS_UTILS_VECTORUTILS_H
#define LLVM_TRANSFORMS_UTILS_VECTORUTILS_H
#include "llvm/Analysis/TargetLibraryInfo.h"
#include "llvm/IR/IntrinsicInst.h"
#include "llvm/IR/Intrinsics.h"
namespace llvm {
/// \brief Identify if the intrinsic is trivially vectorizable.
/// This method returns true if the intrinsic's argument types are all
/// scalars for the scalar form of the intrinsic and all vectors for
/// the vector form of the intrinsic.
bool isTriviallyVectorizable(Intrinsic::ID ID);
/// \brief Identifies if the intrinsic has a scalar operand. It checks for
/// ctlz,cttz and powi special intrinsics whose argument is scalar.
bool hasVectorInstrinsicScalarOpd(Intrinsic::ID ID, unsigned ScalarOpdIdx);
/// \brief Identify if call has a unary float signature
/// It returns input intrinsic ID if call has a single argument,
/// argument type and call instruction type should be floating
/// point type and call should only reads memory.
/// else return not_intrinsic.
Intrinsic::ID checkUnaryFloatSignature(const CallInst &I,
Intrinsic::ID ValidIntrinsicID);
/// \brief Identify if call has a binary float signature
/// It returns input intrinsic ID if call has two arguments,
/// arguments type and call instruction type should be floating
/// point type and call should only reads memory.
/// else return not_intrinsic.
Intrinsic::ID checkBinaryFloatSignature(const CallInst &I,
Intrinsic::ID ValidIntrinsicID);
/// \brief Returns intrinsic ID for call.
/// For the input call instruction it finds mapping intrinsic and returns
/// its intrinsic ID, in case it does not found it return not_intrinsic.
Intrinsic::ID getIntrinsicIDForCall(CallInst *CI, const TargetLibraryInfo *TLI);
} // llvm namespace
#endif

View File

@ -18,55 +18,67 @@
namespace llvm {
class Module;
class SMDiagnostic;
class LLVMContext;
class Module;
struct SlotMapping;
class SMDiagnostic;
/// This function is the main interface to the LLVM Assembly Parser. It parses
/// an ASCII file that (presumably) contains LLVM Assembly code. It returns a
/// Module (intermediate representation) with the corresponding features. Note
/// that this does not verify that the generated Module is valid, so you should
/// run the verifier after parsing the file to check that it is okay.
/// @brief Parse LLVM Assembly from a file
/// @param Filename The name of the file to parse
/// @param Error Error result info.
/// @param Context Context in which to allocate globals info.
/// \brief Parse LLVM Assembly from a file
/// \param Filename The name of the file to parse
/// \param Error Error result info.
/// \param Context Context in which to allocate globals info.
/// \param Slots The optional slot mapping that will be initialized during
/// parsing.
std::unique_ptr<Module> parseAssemblyFile(StringRef Filename,
SMDiagnostic &Error,
LLVMContext &Context);
LLVMContext &Context,
SlotMapping *Slots = nullptr);
/// The function is a secondary interface to the LLVM Assembly Parser. It parses
/// an ASCII string that (presumably) contains LLVM Assembly code. It returns a
/// Module (intermediate representation) with the corresponding features. Note
/// that this does not verify that the generated Module is valid, so you should
/// run the verifier after parsing the file to check that it is okay.
/// @brief Parse LLVM Assembly from a string
/// @param AsmString The string containing assembly
/// @param Error Error result info.
/// @param Context Context in which to allocate globals info.
/// \brief Parse LLVM Assembly from a string
/// \param AsmString The string containing assembly
/// \param Error Error result info.
/// \param Context Context in which to allocate globals info.
/// \param Slots The optional slot mapping that will be initialized during
/// parsing.
std::unique_ptr<Module> parseAssemblyString(StringRef AsmString,
SMDiagnostic &Error,
LLVMContext &Context);
LLVMContext &Context,
SlotMapping *Slots = nullptr);
/// parseAssemblyFile and parseAssemblyString are wrappers around this function.
/// @brief Parse LLVM Assembly from a MemoryBuffer.
/// @param F The MemoryBuffer containing assembly
/// @param Err Error result info.
/// @param Context Context in which to allocate globals info.
/// \brief Parse LLVM Assembly from a MemoryBuffer.
/// \param F The MemoryBuffer containing assembly
/// \param Err Error result info.
/// \param Slots The optional slot mapping that will be initialized during
/// parsing.
std::unique_ptr<Module> parseAssembly(MemoryBufferRef F, SMDiagnostic &Err,
LLVMContext &Context);
LLVMContext &Context,
SlotMapping *Slots = nullptr);
/// This function is the low-level interface to the LLVM Assembly Parser.
/// This is kept as an independent function instead of being inlined into
/// parseAssembly for the convenience of interactive users that want to add
/// recently parsed bits to an existing module.
///
/// @param F The MemoryBuffer containing assembly
/// @param M The module to add data to.
/// @param Err Error result info.
/// @return true on error.
bool parseAssemblyInto(MemoryBufferRef F, Module &M, SMDiagnostic &Err);
/// \param F The MemoryBuffer containing assembly
/// \param M The module to add data to.
/// \param Err Error result info.
/// \param Slots The optional slot mapping that will be initialized during
/// parsing.
/// \return true on error.
bool parseAssemblyInto(MemoryBufferRef F, Module &M, SMDiagnostic &Err,
SlotMapping *Slots = nullptr);
} // namespace llvm
} // End llvm namespace
#endif

View File

@ -0,0 +1,34 @@
//===-- SlotMapping.h - Slot number mapping for unnamed values --*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file contains the declaration of the SlotMapping struct.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_ASMPARSER_SLOTMAPPING_H
#define LLVM_ASMPARSER_SLOTMAPPING_H
#include "llvm/IR/TrackingMDRef.h"
#include <map>
#include <vector>
namespace llvm {
class GlobalValue;
/// This struct contains the mapping from the slot numbers to unnamed metadata
/// nodes and global values.
struct SlotMapping {
std::vector<GlobalValue *> GlobalValues;
std::map<unsigned, TrackingMDNodeRef> MetadataNodes;
};
} // end namespace llvm
#endif

View File

@ -77,7 +77,7 @@ namespace bitc {
// [id, name]
};
} // namespace bitc
} // End bitc namespace
/// BitCodeAbbrevOp - This describes one or more operands in an abbreviation.
/// This is actually a union of two different things:
@ -180,6 +180,6 @@ class BitCodeAbbrev : public RefCountedBase<BitCodeAbbrev> {
OperandList.push_back(OpInfo);
}
};
} // namespace llvm
} // End llvm namespace
#endif

View File

@ -56,6 +56,6 @@ class BitcodeWriterPass {
static StringRef name() { return "BitcodeWriterPass"; }
};
} // namespace llvm
}
#endif

View File

@ -512,6 +512,6 @@ class BitstreamCursor {
bool ReadBlockInfoBlock();
};
} // namespace llvm
} // End llvm namespace
#endif

View File

@ -520,6 +520,6 @@ class BitstreamWriter {
};
} // namespace llvm
} // End llvm namespace
#endif

View File

@ -167,6 +167,7 @@ namespace bitc {
METADATA_EXPRESSION = 29, // [distinct, n x element]
METADATA_OBJC_PROPERTY = 30, // [distinct, name, file, line, ...]
METADATA_IMPORTED_ENTITY=31, // [distinct, tag, scope, entity, line, name]
METADATA_MODULE=32, // [distinct, scope, name, ...]
};
// The constants block (CONSTANTS_BLOCK_ID) describes emission for each
@ -416,7 +417,7 @@ namespace bitc {
COMDAT_SELECTION_KIND_SAME_SIZE = 5,
};
} // namespace bitc
} // namespace llvm
} // End bitc namespace
} // End llvm namespace
#endif

View File

@ -166,7 +166,7 @@ namespace llvm {
}
};
} // namespace llvm
} // End llvm namespace
namespace std {
template <> struct is_error_code_enum<llvm::BitcodeError> : std::true_type {};

View File

@ -115,6 +115,6 @@ bool returnTypeIsEligibleForTailCall(const Function *F,
// or we are in LTO.
bool canBeOmittedFromSymbolTable(const GlobalValue *GV);
} // namespace llvm
} // End llvm namespace
#endif

View File

@ -535,6 +535,6 @@ class AsmPrinter : public MachineFunctionPass {
void EmitXXStructorList(const Constant *List, bool isCtor);
GCMetadataPrinter *GetOrCreateGCPrinter(GCStrategy &C);
};
} // namespace llvm
}
#endif

View File

@ -830,6 +830,6 @@ class BasicTTIImpl : public BasicTTIImplBase<BasicTTIImpl> {
}
};
} // namespace llvm
}
#endif

View File

@ -74,6 +74,6 @@ namespace llvm {
const MachineBlockFrequencyInfo &MBFI,
VirtRegAuxInfo::NormalizingFn norm =
normalizeSpillWeight);
} // namespace llvm
}
#endif // LLVM_CODEGEN_CALCSPILLWEIGHTS_H

View File

@ -17,6 +17,8 @@
#define LLVM_CODEGEN_COMMANDFLAGS_H
#include "llvm/ADT/StringExtras.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/Intrinsics.h"
#include "llvm/IR/Module.h"
#include "llvm/MC/MCTargetOptionsCommandFlags.h"
#include "llvm//MC/SubtargetFeature.h"
@ -249,7 +251,6 @@ static inline TargetOptions InitTargetOptionsFromCodeGenFlags() {
Options.NoZerosInBSS = DontPlaceZerosInBSS;
Options.GuaranteedTailCallOpt = EnableGuaranteedTailCallOpt;
Options.StackAlignmentOverride = OverrideStackAlignment;
Options.TrapFuncName = TrapFuncName;
Options.PositionIndependentExecutable = EnablePIE;
Options.UseInitArray = !UseCtors;
Options.DataSections = DataSections;
@ -320,6 +321,16 @@ static inline void setFunctionAttributes(StringRef CPU, StringRef Features,
"disable-tail-calls",
toStringRef(DisableTailCalls));
if (TrapFuncName.getNumOccurrences() > 0)
for (auto &B : F)
for (auto &I : B)
if (auto *Call = dyn_cast<CallInst>(&I))
if (const auto *F = Call->getCalledFunction())
if (F->getIntrinsicID() == Intrinsic::debugtrap ||
F->getIntrinsicID() == Intrinsic::trap)
Call->addAttribute(llvm::AttributeSet::FunctionIndex,
"trap-func-name", TrapFuncName);
// Let NewAttrs override Attrs.
NewAttrs = Attrs.addAttributes(Ctx, AttributeSet::FunctionIndex, NewAttrs);
F.setAttributes(NewAttrs);

View File

@ -91,7 +91,7 @@ class DFAPacketizer {
// API call is made to prune the dependence.
class VLIWPacketizerList {
protected:
const MachineFunction &MF;
MachineFunction &MF;
const TargetInstrInfo *TII;
// The VLIW Scheduler.
@ -159,6 +159,6 @@ class VLIWPacketizerList {
}
};
} // namespace llvm
}
#endif

View File

@ -15,6 +15,8 @@
#define LLVM_LIB_CODEGEN_ASMPRINTER_DIE_H
#include "llvm/ADT/FoldingSet.h"
#include "llvm/ADT/PointerIntPair.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/CodeGen/DwarfStringPoolEntry.h"
#include "llvm/Support/Dwarf.h"
@ -436,11 +438,11 @@ class DIEValue {
/// EmitValue - Emit value via the Dwarf writer.
///
void EmitValue(const AsmPrinter *AP, dwarf::Form Form) const;
void EmitValue(const AsmPrinter *AP) const;
/// SizeOf - Return the size of a value in bytes.
///
unsigned SizeOf(const AsmPrinter *AP, dwarf::Form Form) const;
unsigned SizeOf(const AsmPrinter *AP) const;
#ifndef NDEBUG
void print(raw_ostream &O) const;
@ -448,10 +450,179 @@ class DIEValue {
#endif
};
struct IntrusiveBackListNode {
PointerIntPair<IntrusiveBackListNode *, 1> Next;
IntrusiveBackListNode() : Next(this, true) {}
IntrusiveBackListNode *getNext() const {
return Next.getInt() ? nullptr : Next.getPointer();
}
};
struct IntrusiveBackListBase {
typedef IntrusiveBackListNode Node;
Node *Last = nullptr;
bool empty() const { return !Last; }
void push_back(Node &N) {
assert(N.Next.getPointer() == &N && "Expected unlinked node");
assert(N.Next.getInt() == true && "Expected unlinked node");
if (Last) {
N.Next = Last->Next;
Last->Next.setPointerAndInt(&N, false);
}
Last = &N;
}
};
template <class T> class IntrusiveBackList : IntrusiveBackListBase {
public:
using IntrusiveBackListBase::empty;
void push_back(T &N) { IntrusiveBackListBase::push_back(N); }
T &back() { return *static_cast<T *>(Last); }
const T &back() const { return *static_cast<T *>(Last); }
class const_iterator;
class iterator
: public iterator_facade_base<iterator, std::forward_iterator_tag, T> {
friend class const_iterator;
Node *N = nullptr;
public:
iterator() = default;
explicit iterator(T *N) : N(N) {}
iterator &operator++() {
N = N->getNext();
return *this;
}
explicit operator bool() const { return N; }
T &operator*() const { return *static_cast<T *>(N); }
bool operator==(const iterator &X) const { return N == X.N; }
bool operator!=(const iterator &X) const { return N != X.N; }
};
class const_iterator
: public iterator_facade_base<const_iterator, std::forward_iterator_tag,
const T> {
const Node *N = nullptr;
public:
const_iterator() = default;
// Placate MSVC by explicitly scoping 'iterator'.
const_iterator(typename IntrusiveBackList<T>::iterator X) : N(X.N) {}
explicit const_iterator(const T *N) : N(N) {}
const_iterator &operator++() {
N = N->getNext();
return *this;
}
explicit operator bool() const { return N; }
const T &operator*() const { return *static_cast<const T *>(N); }
bool operator==(const const_iterator &X) const { return N == X.N; }
bool operator!=(const const_iterator &X) const { return N != X.N; }
};
iterator begin() {
return Last ? iterator(static_cast<T *>(Last->Next.getPointer())) : end();
}
const_iterator begin() const {
return const_cast<IntrusiveBackList *>(this)->begin();
}
iterator end() { return iterator(); }
const_iterator end() const { return const_iterator(); }
static iterator toIterator(T &N) { return iterator(&N); }
static const_iterator toIterator(const T &N) { return const_iterator(&N); }
};
/// A list of DIE values.
///
/// This is a singly-linked list, but instead of reversing the order of
/// insertion, we keep a pointer to the back of the list so we can push in
/// order.
///
/// There are two main reasons to choose a linked list over a customized
/// vector-like data structure.
///
/// 1. For teardown efficiency, we want DIEs to be BumpPtrAllocated. Using a
/// linked list here makes this way easier to accomplish.
/// 2. Carrying an extra pointer per \a DIEValue isn't expensive. 45% of DIEs
/// have 2 or fewer values, and 90% have 5 or fewer. A vector would be
/// over-allocated by 50% on average anyway, the same cost as the
/// linked-list node.
class DIEValueList {
struct Node : IntrusiveBackListNode {
DIEValue V;
explicit Node(DIEValue V) : V(V) {}
};
typedef IntrusiveBackList<Node> ListTy;
ListTy List;
public:
bool empty() const { return List.empty(); }
class const_iterator;
class iterator
: public iterator_adaptor_base<iterator, ListTy::iterator,
std::forward_iterator_tag, DIEValue> {
friend class const_iterator;
typedef iterator_adaptor_base<iterator, ListTy::iterator,
std::forward_iterator_tag,
DIEValue> iterator_adaptor;
public:
iterator() = default;
explicit iterator(ListTy::iterator X) : iterator_adaptor(X) {}
explicit operator bool() const { return bool(wrapped()); }
DIEValue &operator*() const { return wrapped()->V; }
};
class const_iterator
: public iterator_adaptor_base<const_iterator, ListTy::const_iterator,
std::forward_iterator_tag,
const DIEValue> {
typedef iterator_adaptor_base<const_iterator, ListTy::const_iterator,
std::forward_iterator_tag,
const DIEValue> iterator_adaptor;
public:
const_iterator() = default;
const_iterator(DIEValueList::iterator X) : iterator_adaptor(X.wrapped()) {}
explicit const_iterator(ListTy::const_iterator X) : iterator_adaptor(X) {}
explicit operator bool() const { return bool(wrapped()); }
const DIEValue &operator*() const { return wrapped()->V; }
};
iterator insert(BumpPtrAllocator &Alloc, DIEValue V) {
List.push_back(*new (Alloc) Node(V));
return iterator(ListTy::toIterator(List.back()));
}
template <class... Ts>
iterator emplace(BumpPtrAllocator &Alloc, Ts &&... Args) {
return insert(Alloc, DIEValue(std::forward<Ts>(Args)...));
}
iterator begin() { return iterator(List.begin()); }
iterator end() { return iterator(List.end()); }
const_iterator begin() const { return const_iterator(List.begin()); }
const_iterator end() const { return const_iterator(List.end()); }
};
//===--------------------------------------------------------------------===//
/// DIE - A structured debug information entry. Has an abbreviation which
/// describes its organization.
class DIE {
class DIE : IntrusiveBackListNode {
friend class IntrusiveBackList<DIE>;
protected:
/// Offset - Offset in debug info section.
///
@ -468,27 +639,24 @@ class DIE {
dwarf::Tag Tag = (dwarf::Tag)0;
/// Children DIEs.
///
// This can't be a vector<DIE> because pointer validity is requirent for the
// Parent pointer and DIEEntry.
// It can't be a list<DIE> because some clients need pointer validity before
// the object has been added to any child list
// (eg: DwarfUnit::constructVariableDIE). These aren't insurmountable, but may
// be more convoluted than beneficial.
std::vector<std::unique_ptr<DIE>> Children;
IntrusiveBackList<DIE> Children;
DIE *Parent;
DIE *Parent = nullptr;
/// Attribute values.
///
SmallVector<DIEValue, 12> Values;
DIEValueList Values;
protected:
DIE() : Offset(0), Size(0), Parent(nullptr) {}
DIE() : Offset(0), Size(0) {}
private:
explicit DIE(dwarf::Tag Tag) : Offset(0), Size(0), Tag(Tag) {}
public:
explicit DIE(dwarf::Tag Tag)
: Offset(0), Size(0), Tag(Tag), Parent(nullptr) {}
static DIE *get(BumpPtrAllocator &Alloc, dwarf::Tag Tag) {
return new (Alloc) DIE(Tag);
}
// Accessors.
unsigned getAbbrevNumber() const { return AbbrevNumber; }
@ -497,26 +665,32 @@ class DIE {
unsigned getSize() const { return Size; }
bool hasChildren() const { return !Children.empty(); }
typedef std::vector<std::unique_ptr<DIE>>::const_iterator child_iterator;
typedef IntrusiveBackList<DIE>::iterator child_iterator;
typedef IntrusiveBackList<DIE>::const_iterator const_child_iterator;
typedef iterator_range<child_iterator> child_range;
typedef iterator_range<const_child_iterator> const_child_range;
child_range children() const {
child_range children() {
return llvm::make_range(Children.begin(), Children.end());
}
const_child_range children() const {
return llvm::make_range(Children.begin(), Children.end());
}
typedef SmallVectorImpl<DIEValue>::const_iterator value_iterator;
typedef DIEValueList::iterator value_iterator;
typedef iterator_range<value_iterator> value_range;
value_iterator values_begin() const { return Values.begin(); }
value_iterator values_end() const { return Values.end(); }
value_range values() const {
return llvm::make_range(values_begin(), values_end());
value_range values() {
return llvm::make_range(Values.begin(), Values.end());
}
void setValue(unsigned I, DIEValue New) {
assert(I < Values.size());
Values[I] = New;
typedef DIEValueList::const_iterator const_value_iterator;
typedef iterator_range<const_value_iterator> const_value_range;
const_value_range values() const {
return llvm::make_range(Values.begin(), Values.end());
}
DIE *getParent() const { return Parent; }
/// Generate the abbreviation for this DIE.
@ -539,19 +713,21 @@ class DIE {
/// addValue - Add a value and attributes to a DIE.
///
void addValue(DIEValue Value) { Values.push_back(Value); }
value_iterator addValue(BumpPtrAllocator &Alloc, DIEValue Value) {
return Values.insert(Alloc, Value);
}
template <class T>
void addValue(dwarf::Attribute Attribute, dwarf::Form Form, T &&Value) {
Values.emplace_back(Attribute, Form, std::forward<T>(Value));
value_iterator addValue(BumpPtrAllocator &Alloc, dwarf::Attribute Attribute,
dwarf::Form Form, T &&Value) {
return Values.emplace(Alloc, Attribute, Form, std::forward<T>(Value));
}
/// addChild - Add a child to the DIE.
///
DIE &addChild(std::unique_ptr<DIE> Child) {
assert(!Child->getParent());
/// Add a child to the DIE.
DIE &addChild(DIE *Child) {
assert(!Child->getParent() && "Child should be orphaned");
Child->Parent = this;
Children.push_back(std::move(Child));
return *Children.back();
Children.push_back(*Child);
return Children.back();
}
/// Find a value in the DIE with the attribute given.
@ -635,6 +811,6 @@ class DIEBlock : public DIE {
#endif
};
} // namespace llvm
} // end llvm namespace
#endif

View File

@ -69,7 +69,7 @@ class FastISel {
unsigned NumFixedArgs;
CallingConv::ID CallConv;
const Value *Callee;
const char *SymName;
MCSymbol *Symbol;
ArgListTy Args;
ImmutableCallSite *CS;
MachineInstr *Call;
@ -88,7 +88,7 @@ class FastISel {
: RetTy(nullptr), RetSExt(false), RetZExt(false), IsVarArg(false),
IsInReg(false), DoesNotReturn(false), IsReturnValueUsed(true),
IsTailCall(false), NumFixedArgs(-1), CallConv(CallingConv::C),
Callee(nullptr), SymName(nullptr), CS(nullptr), Call(nullptr),
Callee(nullptr), Symbol(nullptr), CS(nullptr), Call(nullptr),
ResultReg(0), NumResultRegs(0), IsPatchPoint(false) {}
CallLoweringInfo &setCallee(Type *ResultTy, FunctionType *FuncTy,
@ -114,12 +114,12 @@ class FastISel {
}
CallLoweringInfo &setCallee(Type *ResultTy, FunctionType *FuncTy,
const char *Target, ArgListTy &&ArgsList,
MCSymbol *Target, ArgListTy &&ArgsList,
ImmutableCallSite &Call,
unsigned FixedArgs = ~0U) {
RetTy = ResultTy;
Callee = Call.getCalledValue();
SymName = Target;
Symbol = Target;
IsInReg = Call.paramHasAttr(0, Attribute::InReg);
DoesNotReturn = Call.doesNotReturn();
@ -148,11 +148,16 @@ class FastISel {
return *this;
}
CallLoweringInfo &setCallee(CallingConv::ID CC, Type *ResultTy,
CallLoweringInfo &setCallee(const DataLayout &DL, MCContext &Ctx,
CallingConv::ID CC, Type *ResultTy,
const char *Target, ArgListTy &&ArgsList,
unsigned FixedArgs = ~0U);
CallLoweringInfo &setCallee(CallingConv::ID CC, Type *ResultTy,
MCSymbol *Target, ArgListTy &&ArgsList,
unsigned FixedArgs = ~0U) {
RetTy = ResultTy;
SymName = Target;
Symbol = Target;
CallConv = CC;
Args = std::move(ArgsList);
NumFixedArgs = (FixedArgs == ~0U) ? Args.size() : FixedArgs;
@ -504,7 +509,9 @@ class FastISel {
CmpInst::Predicate optimizeCmpPredicate(const CmpInst *CI) const;
bool lowerCallTo(const CallInst *CI, const char *SymName, unsigned NumArgs);
bool lowerCallTo(const CallInst *CI, MCSymbol *Symbol, unsigned NumArgs);
bool lowerCallTo(const CallInst *CI, const char *SymbolName,
unsigned NumArgs);
bool lowerCallTo(CallLoweringInfo &CLI);
bool isCommutativeIntrinsic(IntrinsicInst const *II) {

View File

@ -1,4 +1,4 @@
//===------------------- FaultMaps.h - StackMaps ----------------*- C++ -*-===//
//===------------------- FaultMaps.h - The "FaultMaps" section --*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
@ -12,6 +12,8 @@
#include "llvm/ADT/DenseMap.h"
#include "llvm/MC/MCSymbol.h"
#include "llvm/Support/Endian.h"
#include "llvm/Support/Format.h"
#include <vector>
#include <map>
@ -68,6 +70,151 @@ class FaultMaps {
void emitFunctionInfo(const MCSymbol *FnLabel, const FunctionFaultInfos &FFI);
};
/// A parser for the __llvm_faultmaps section generated by the FaultMaps class
/// above. This parser is version locked with with the __llvm_faultmaps section
/// generated by the version of LLVM that includes it. No guarantees are made
/// with respect to forward or backward compatibility.
class FaultMapParser {
typedef uint8_t FaultMapVersionType;
static const size_t FaultMapVersionOffset = 0;
typedef uint8_t Reserved0Type;
static const size_t Reserved0Offset =
FaultMapVersionOffset + sizeof(FaultMapVersionType);
typedef uint16_t Reserved1Type;
static const size_t Reserved1Offset = Reserved0Offset + sizeof(Reserved0Type);
typedef uint32_t NumFunctionsType;
static const size_t NumFunctionsOffset =
Reserved1Offset + sizeof(Reserved1Type);
static const size_t FunctionInfosOffset =
NumFunctionsOffset + sizeof(NumFunctionsType);
const uint8_t *P;
const uint8_t *E;
template <typename T> static T read(const uint8_t *P, const uint8_t *E) {
assert(P + sizeof(T) <= E && "out of bounds read!");
return support::endian::read<T, support::little, 1>(P);
}
public:
class FunctionFaultInfoAccessor {
typedef uint32_t FaultKindType;
static const size_t FaultKindOffset = 0;
typedef uint32_t FaultingPCOffsetType;
static const size_t FaultingPCOffsetOffset =
FaultKindOffset + sizeof(FaultKindType);
typedef uint32_t HandlerPCOffsetType;
static const size_t HandlerPCOffsetOffset =
FaultingPCOffsetOffset + sizeof(FaultingPCOffsetType);
const uint8_t *P;
const uint8_t *E;
public:
static const size_t Size =
HandlerPCOffsetOffset + sizeof(HandlerPCOffsetType);
explicit FunctionFaultInfoAccessor(const uint8_t *P, const uint8_t *E)
: P(P), E(E) {}
FaultKindType getFaultKind() const {
return read<FaultKindType>(P + FaultKindOffset, E);
}
FaultingPCOffsetType getFaultingPCOffset() const {
return read<FaultingPCOffsetType>(P + FaultingPCOffsetOffset, E);
}
HandlerPCOffsetType getHandlerPCOffset() const {
return read<HandlerPCOffsetType>(P + HandlerPCOffsetOffset, E);
}
};
class FunctionInfoAccessor {
typedef uint64_t FunctionAddrType;
static const size_t FunctionAddrOffset = 0;
typedef uint32_t NumFaultingPCsType;
static const size_t NumFaultingPCsOffset =
FunctionAddrOffset + sizeof(FunctionAddrType);
typedef uint32_t ReservedType;
static const size_t ReservedOffset =
NumFaultingPCsOffset + sizeof(NumFaultingPCsType);
static const size_t FunctionFaultInfosOffset =
ReservedOffset + sizeof(ReservedType);
static const size_t FunctionInfoHeaderSize = FunctionFaultInfosOffset;
const uint8_t *P;
const uint8_t *E;
public:
FunctionInfoAccessor() : P(nullptr), E(nullptr) {}
explicit FunctionInfoAccessor(const uint8_t *P, const uint8_t *E)
: P(P), E(E) {}
FunctionAddrType getFunctionAddr() const {
return read<FunctionAddrType>(P + FunctionAddrOffset, E);
}
NumFaultingPCsType getNumFaultingPCs() const {
return read<NumFaultingPCsType>(P + NumFaultingPCsOffset, E);
}
FunctionFaultInfoAccessor getFunctionFaultInfoAt(uint32_t Index) const {
assert(Index < getNumFaultingPCs() && "index out of bounds!");
const uint8_t *Begin = P + FunctionFaultInfosOffset +
FunctionFaultInfoAccessor::Size * Index;
return FunctionFaultInfoAccessor(Begin, E);
}
FunctionInfoAccessor getNextFunctionInfo() const {
size_t MySize = FunctionInfoHeaderSize +
getNumFaultingPCs() * FunctionFaultInfoAccessor::Size;
const uint8_t *Begin = P + MySize;
assert(Begin < E && "out of bounds!");
return FunctionInfoAccessor(Begin, E);
}
};
explicit FaultMapParser(const uint8_t *Begin, const uint8_t *End)
: P(Begin), E(End) {}
FaultMapVersionType getFaultMapVersion() const {
auto Version = read<FaultMapVersionType>(P + FaultMapVersionOffset, E);
assert(Version == 1 && "only version 1 supported!");
return Version;
}
NumFunctionsType getNumFunctions() const {
return read<NumFunctionsType>(P + NumFunctionsOffset, E);
}
FunctionInfoAccessor getFirstFunctionInfo() const {
const uint8_t *Begin = P + FunctionInfosOffset;
return FunctionInfoAccessor(Begin, E);
}
};
raw_ostream &
operator<<(raw_ostream &OS, const FaultMapParser::FunctionFaultInfoAccessor &);
raw_ostream &operator<<(raw_ostream &OS,
const FaultMapParser::FunctionInfoAccessor &);
raw_ostream &operator<<(raw_ostream &OS, const FaultMapParser &);
} // namespace llvm
#endif

Some files were not shown because too many files have changed in this diff Show More