Upgrade our copy of llvm/clang to trunk r162107. With thanks to

Benjamin Kramer and Joerg Sonnenberger for their input and fixes.
This commit is contained in:
Dimitry Andric 2012-08-20 18:33:03 +00:00
commit 7ae0e2c9f0
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=239462
1687 changed files with 149571 additions and 72242 deletions

View File

@ -38,6 +38,31 @@
# xargs -n1 | sort | uniq -d;
# done
# 20120816: new clang import which bumps version from 3.1 to 3.2
OLD_FILES+=usr/include/clang/3.1/altivec.h
OLD_FILES+=usr/include/clang/3.1/avx2intrin.h
OLD_FILES+=usr/include/clang/3.1/avxintrin.h
OLD_FILES+=usr/include/clang/3.1/bmi2intrin.h
OLD_FILES+=usr/include/clang/3.1/bmiintrin.h
OLD_FILES+=usr/include/clang/3.1/cpuid.h
OLD_FILES+=usr/include/clang/3.1/emmintrin.h
OLD_FILES+=usr/include/clang/3.1/fma4intrin.h
OLD_FILES+=usr/include/clang/3.1/immintrin.h
OLD_FILES+=usr/include/clang/3.1/lzcntintrin.h
OLD_FILES+=usr/include/clang/3.1/mm3dnow.h
OLD_FILES+=usr/include/clang/3.1/mm_malloc.h
OLD_FILES+=usr/include/clang/3.1/mmintrin.h
OLD_FILES+=usr/include/clang/3.1/module.map
OLD_FILES+=usr/include/clang/3.1/nmmintrin.h
OLD_FILES+=usr/include/clang/3.1/pmmintrin.h
OLD_FILES+=usr/include/clang/3.1/popcntintrin.h
OLD_FILES+=usr/include/clang/3.1/smmintrin.h
OLD_FILES+=usr/include/clang/3.1/tmmintrin.h
OLD_FILES+=usr/include/clang/3.1/unwind.h
OLD_FILES+=usr/include/clang/3.1/wmmintrin.h
OLD_FILES+=usr/include/clang/3.1/x86intrin.h
OLD_FILES+=usr/include/clang/3.1/xmmintrin.h
OLD_DIRS+=usr/include/clang/3.1
# 20120712: OpenSSL 1.0.1c import
OLD_LIBS+=lib/libcrypto.so.6
OLD_LIBS+=usr/lib/libssl.so.6

View File

@ -21,9 +21,9 @@
/* Need these includes to support the LLVM 'cast' template for the C++ 'wrap'
and 'unwrap' conversion functions. */
#include "llvm/IRBuilder.h"
#include "llvm/Module.h"
#include "llvm/PassRegistry.h"
#include "llvm/Support/IRBuilder.h"
extern "C" {
#endif
@ -53,7 +53,7 @@ extern "C" {
* The declared parameter names are descriptive and specify which type is
* required. Additionally, each type hierarchy is documented along with the
* functions that operate upon it. For more detail, refer to LLVM's C++ code.
* If in doubt, refer to Core.cpp, which performs paramter downcasts in the
* If in doubt, refer to Core.cpp, which performs parameter downcasts in the
* form unwrap<RequiredType>(Param).
*
* Many exotic languages can interoperate with C code but have a harder time
@ -106,7 +106,7 @@ typedef struct LLVMOpaqueType *LLVMTypeRef;
typedef struct LLVMOpaqueValue *LLVMValueRef;
/**
* Represents a basic block of instruction in LLVM IR.
* Represents a basic block of instructions in LLVM IR.
*
* This models llvm::BasicBlock.
*/
@ -477,6 +477,15 @@ void LLVMSetTarget(LLVMModuleRef M, const char *Triple);
*/
void LLVMDumpModule(LLVMModuleRef M);
/**
* Print a representation of a module to a file. The ErrorMessage needs to be
* disposed with LLVMDisposeMessage. Returns 0 on success, 1 otherwise.
*
* @see Module::print()
*/
LLVMBool LLVMPrintModuleToFile(LLVMModuleRef M, const char *Filename,
char **ErrorMessage);
/**
* Set inline assembly for a module.
*
@ -977,7 +986,7 @@ LLVMTypeRef LLVMX86MMXType(void);
*
* LLVMValueRef essentially represents llvm::Value. There is a rich
* hierarchy of classes within this type. Depending on the instance
* obtain, not all APIs are available.
* obtained, not all APIs are available.
*
* Callers can determine the type of a LLVMValueRef by calling the
* LLVMIsA* family of functions (e.g. LLVMIsAArgument()). These
@ -1153,7 +1162,7 @@ LLVM_FOR_EACH_VALUE_SUBCLASS(LLVM_DECLARE_VALUE_CAST)
*
* Uses are obtained in an iterator fashion. First, call this function
* to obtain a reference to the first use. Then, call LLVMGetNextUse()
* on that instance and all subsequently obtained instances untl
* on that instance and all subsequently obtained instances until
* LLVMGetNextUse() returns NULL.
*
* @see llvm::Value::use_begin()
@ -2106,7 +2115,7 @@ LLVMBasicBlockRef LLVMGetInstructionParent(LLVMValueRef Inst);
LLVMValueRef LLVMGetNextInstruction(LLVMValueRef Inst);
/**
* Obtain the instruction that occured before this one.
* Obtain the instruction that occurred before this one.
*
* If the instruction is the first instruction in a basic block, NULL
* will be returned.

View File

@ -109,9 +109,9 @@ struct LLVMOpInfo1 {
*/
typedef const char *(*LLVMSymbolLookupCallback)(void *DisInfo,
uint64_t ReferenceValue,
uint64_t *ReferenceType,
uint64_t ReferencePC,
const char **ReferenceName);
uint64_t *ReferenceType,
uint64_t ReferencePC,
const char **ReferenceName);
/**
* The reference types on input and output.
*/

View File

@ -0,0 +1,42 @@
/*===-- llvm-c/Linker.h - Module Linker C Interface -------------*- C++ -*-===*\
|* *|
|* The LLVM Compiler Infrastructure *|
|* *|
|* This file is distributed under the University of Illinois Open Source *|
|* License. See LICENSE.TXT for details. *|
|* *|
|*===----------------------------------------------------------------------===*|
|* *|
|* This file defines the C interface to the module/file/archive linker. *|
|* *|
\*===----------------------------------------------------------------------===*/
#ifndef LLVM_C_LINKER_H
#define LLVM_C_LINKER_H
#include "llvm-c/Core.h"
#ifdef __cplusplus
extern "C" {
#endif
typedef enum {
LLVMLinkerDestroySource = 0, /* Allow source module to be destroyed. */
LLVMLinkerPreserveSource = 1 /* Preserve the source module. */
} LLVMLinkerMode;
/* Links the source module into the destination module, taking ownership
* of the source module away from the caller. Optionally returns a
* human-readable description of any errors that occurred in linking.
* OutMessage must be disposed with LLVMDisposeMessage. The return value
* is true if an error occurred, false otherwise. */
LLVMBool LLVMLinkModules(LLVMModuleRef Dest, LLVMModuleRef Src,
LLVMLinkerMode Mode, char **OutMessage);
#ifdef __cplusplus
}
#endif
#endif

View File

@ -56,19 +56,19 @@ typedef struct LLVMStructLayout *LLVMStructLayoutRef;
/* Declare all of the available assembly printer initialization functions. */
#define LLVM_ASM_PRINTER(TargetName) \
void LLVMInitialize##TargetName##AsmPrinter();
void LLVMInitialize##TargetName##AsmPrinter(void);
#include "llvm/Config/AsmPrinters.def"
#undef LLVM_ASM_PRINTER /* Explicit undef to make SWIG happier */
/* Declare all of the available assembly parser initialization functions. */
#define LLVM_ASM_PARSER(TargetName) \
void LLVMInitialize##TargetName##AsmParser();
void LLVMInitialize##TargetName##AsmParser(void);
#include "llvm/Config/AsmParsers.def"
#undef LLVM_ASM_PARSER /* Explicit undef to make SWIG happier */
/* Declare all of the available disassembler initialization functions. */
#define LLVM_DISASSEMBLER(TargetName) \
void LLVMInitialize##TargetName##Disassembler();
void LLVMInitialize##TargetName##Disassembler(void);
#include "llvm/Config/Disassemblers.def"
#undef LLVM_DISASSEMBLER /* Explicit undef to make SWIG happier */
@ -102,7 +102,7 @@ static inline void LLVMInitializeAllTargetMCs(void) {
/** LLVMInitializeAllAsmPrinters - The main program should call this function if
it wants all asm printers that LLVM is configured to support, to make them
available via the TargetRegistry. */
static inline void LLVMInitializeAllAsmPrinters() {
static inline void LLVMInitializeAllAsmPrinters(void) {
#define LLVM_ASM_PRINTER(TargetName) LLVMInitialize##TargetName##AsmPrinter();
#include "llvm/Config/AsmPrinters.def"
#undef LLVM_ASM_PRINTER /* Explicit undef to make SWIG happier */
@ -111,7 +111,7 @@ static inline void LLVMInitializeAllAsmPrinters() {
/** LLVMInitializeAllAsmParsers - The main program should call this function if
it wants all asm parsers that LLVM is configured to support, to make them
available via the TargetRegistry. */
static inline void LLVMInitializeAllAsmParsers() {
static inline void LLVMInitializeAllAsmParsers(void) {
#define LLVM_ASM_PARSER(TargetName) LLVMInitialize##TargetName##AsmParser();
#include "llvm/Config/AsmParsers.def"
#undef LLVM_ASM_PARSER /* Explicit undef to make SWIG happier */
@ -120,7 +120,7 @@ static inline void LLVMInitializeAllAsmParsers() {
/** LLVMInitializeAllDisassemblers - The main program should call this function
if it wants all disassemblers that LLVM is configured to support, to make
them available via the TargetRegistry. */
static inline void LLVMInitializeAllDisassemblers() {
static inline void LLVMInitializeAllDisassemblers(void) {
#define LLVM_DISASSEMBLER(TargetName) \
LLVMInitialize##TargetName##Disassembler();
#include "llvm/Config/Disassemblers.def"

View File

@ -274,6 +274,7 @@ namespace llvm {
/* C fmod, or llvm frem. */
opStatus mod(const APFloat &, roundingMode);
opStatus fusedMultiplyAdd(const APFloat &, const APFloat &, roundingMode);
opStatus roundToIntegral(roundingMode);
/* Sign operations. */
void changeSign();

View File

@ -16,6 +16,7 @@
#define LLVM_APINT_H
#include "llvm/ADT/ArrayRef.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/MathExtras.h"
#include <cassert>
#include <climits>
@ -273,6 +274,13 @@ class APInt {
initSlowCase(that);
}
#if LLVM_USE_RVALUE_REFERENCES
/// @brief Move Constructor.
APInt(APInt&& that) : BitWidth(that.BitWidth), VAL(that.VAL) {
that.BitWidth = 0;
}
#endif
/// @brief Destructor.
~APInt() {
if (!isSingleWord())
@ -349,13 +357,7 @@ class APInt {
/// @brief Check if this APInt has an N-bits unsigned integer value.
bool isIntN(unsigned N) const {
assert(N && "N == 0 ???");
if (N >= getBitWidth())
return true;
if (isSingleWord())
return isUIntN(N, VAL);
return APInt(N, makeArrayRef(pVal, getNumWords())).zext(getBitWidth())
== (*this);
return getActiveBits() <= N;
}
/// @brief Check if this APInt has an N-bits signed integer value.
@ -503,6 +505,18 @@ class APInt {
return getAllOnesValue(numBits).lshr(numBits - loBitsSet);
}
/// \brief Determine if two APInts have the same value, after zero-extending
/// one of them (if needed!) to ensure that the bit-widths match.
static bool isSameValue(const APInt &I1, const APInt &I2) {
if (I1.getBitWidth() == I2.getBitWidth())
return I1 == I2;
if (I1.getBitWidth() > I2.getBitWidth())
return I1 == I2.zext(I1.getBitWidth());
return I1.zext(I2.getBitWidth()) == I2;
}
/// \brief Overload to compute a hash_code for an APInt value.
friend hash_code hash_value(const APInt &Arg);
@ -587,6 +601,21 @@ class APInt {
return AssignSlowCase(RHS);
}
#if LLVM_USE_RVALUE_REFERENCES
/// @brief Move assignment operator.
APInt& operator=(APInt&& that) {
if (!isSingleWord())
delete [] pVal;
BitWidth = that.BitWidth;
VAL = that.VAL;
that.BitWidth = 0;
return *this;
}
#endif
/// The RHS value is assigned to *this. If the significant bits in RHS exceed
/// the bit width, the excess bits are truncated. If the bit width is larger
/// than 64, the value is zero filled in the unspecified high order bits.
@ -817,9 +846,10 @@ class APInt {
if (LHS.isNegative()) {
if (RHS.isNegative())
APInt::udivrem(-LHS, -RHS, Quotient, Remainder);
else
else {
APInt::udivrem(-LHS, RHS, Quotient, Remainder);
Quotient = -Quotient;
Quotient = -Quotient;
}
Remainder = -Remainder;
} else if (RHS.isNegative()) {
APInt::udivrem(LHS, -RHS, Quotient, Remainder);
@ -1087,7 +1117,7 @@ class APInt {
else {
// Set all the bits in all the words.
for (unsigned i = 0; i < getNumWords(); ++i)
pVal[i] = -1ULL;
pVal[i] = -1ULL;
}
// Clear the unused ones
clearUnusedBits();

View File

@ -135,6 +135,19 @@ class APSInt : public APInt {
assert(IsUnsigned == RHS.IsUnsigned && "Signedness mismatch!");
return IsUnsigned ? uge(RHS) : sge(RHS);
}
inline bool operator==(const APSInt& RHS) const {
assert(IsUnsigned == RHS.IsUnsigned && "Signedness mismatch!");
return eq(RHS);
}
inline bool operator==(int64_t RHS) const {
return isSameValue(*this, APSInt(APInt(64, RHS), true));
}
inline bool operator!=(const APSInt& RHS) const {
return !((*this) == RHS);
}
inline bool operator!=(int64_t RHS) const {
return !((*this) == RHS);
}
// The remaining operators just wrap the logic of APInt, but retain the
// signedness information.
@ -250,17 +263,50 @@ class APSInt : public APInt {
: APInt::getSignedMinValue(numBits), Unsigned);
}
/// \brief Determine if two APSInts have the same value, zero- or
/// sign-extending as needed.
static bool isSameValue(const APSInt &I1, const APSInt &I2) {
if (I1.getBitWidth() == I2.getBitWidth() && I1.isSigned() == I2.isSigned())
return I1 == I2;
// Check for a bit-width mismatch.
if (I1.getBitWidth() > I2.getBitWidth())
return isSameValue(I1, I2.extend(I1.getBitWidth()));
else if (I2.getBitWidth() > I1.getBitWidth())
return isSameValue(I1.extend(I2.getBitWidth()), I2);
// We have a signedness mismatch. Turn the signed value into an unsigned
// value.
if (I1.isSigned()) {
if (I1.isNegative())
return false;
return APSInt(I1, true) == I2;
}
if (I2.isNegative())
return false;
return I1 == APSInt(I2, true);
}
/// Profile - Used to insert APSInt objects, or objects that contain APSInt
/// objects, into FoldingSets.
void Profile(FoldingSetNodeID& ID) const;
};
inline bool operator==(int64_t V1, const APSInt& V2) {
return V2 == V1;
}
inline bool operator!=(int64_t V1, const APSInt& V2) {
return V2 != V1;
}
inline raw_ostream &operator<<(raw_ostream &OS, const APSInt &I) {
I.print(OS, I.isSigned());
return OS;
}
} // end namespace llvm
#endif

View File

@ -60,7 +60,7 @@ namespace llvm {
: Data(begin), Length(end - begin) {}
/// Construct an ArrayRef from a SmallVector.
/*implicit*/ ArrayRef(const SmallVectorImpl<T> &Vec)
/*implicit*/ ArrayRef(const SmallVectorTemplateCommon<T> &Vec)
: Data(Vec.data()), Length(Vec.size()) {}
/// Construct an ArrayRef from a std::vector.

View File

@ -14,6 +14,7 @@
#ifndef LLVM_ADT_BITVECTOR_H
#define LLVM_ADT_BITVECTOR_H
#include "llvm/Support/Compiler.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/MathExtras.h"
#include <algorithm>
@ -97,6 +98,13 @@ class BitVector {
std::memcpy(Bits, RHS.Bits, Capacity * sizeof(BitWord));
}
#if LLVM_USE_RVALUE_REFERENCES
BitVector(BitVector &&RHS)
: Bits(RHS.Bits), Size(RHS.Size), Capacity(RHS.Capacity) {
RHS.Bits = 0;
}
#endif
~BitVector() {
std::free(Bits);
}
@ -251,11 +259,6 @@ class BitVector {
return *this;
}
// No argument flip.
BitVector operator~() const {
return BitVector(*this).flip();
}
// Indexing.
reference operator[](unsigned Idx) {
assert (Idx < Size && "Out-of-bounds Bit access.");
@ -272,6 +275,16 @@ class BitVector {
return (*this)[Idx];
}
/// Test if any common bits are set.
bool anyCommon(const BitVector &RHS) const {
unsigned ThisWords = NumBitWords(size());
unsigned RHSWords = NumBitWords(RHS.size());
for (unsigned i = 0, e = std::min(ThisWords, RHSWords); i != e; ++i)
if (Bits[i] & RHS.Bits[i])
return true;
return false;
}
// Comparison operators.
bool operator==(const BitVector &RHS) const {
unsigned ThisWords = NumBitWords(size());
@ -366,6 +379,21 @@ class BitVector {
return *this;
}
#if LLVM_USE_RVALUE_REFERENCES
const BitVector &operator=(BitVector &&RHS) {
if (this == &RHS) return *this;
std::free(Bits);
Bits = RHS.Bits;
Size = RHS.Size;
Capacity = RHS.Capacity;
RHS.Bits = 0;
return *this;
}
#endif
void swap(BitVector &RHS) {
std::swap(Bits, RHS.Bits);
std::swap(Size, RHS.Size);
@ -472,24 +500,6 @@ class BitVector {
}
};
inline BitVector operator&(const BitVector &LHS, const BitVector &RHS) {
BitVector Result(LHS);
Result &= RHS;
return Result;
}
inline BitVector operator|(const BitVector &LHS, const BitVector &RHS) {
BitVector Result(LHS);
Result |= RHS;
return Result;
}
inline BitVector operator^(const BitVector &LHS, const BitVector &RHS) {
BitVector Result(LHS);
Result ^= RHS;
return Result;
}
} // End llvm namespace
namespace std {

File diff suppressed because it is too large Load Diff

View File

@ -187,7 +187,7 @@ class df_iterator : public std::iterator<std::forward_iterator_tag,
/// current node, counting both nodes.
unsigned getPathLength() const { return VisitStack.size(); }
/// getPath - Return the n'th node in the path from the the entry node to the
/// getPath - Return the n'th node in the path from the entry node to the
/// current node.
NodeType *getPath(unsigned n) const {
return VisitStack[n].first.getPointer();

View File

@ -517,6 +517,111 @@ class ContextualFoldingSet : public FoldingSetImpl {
}
};
//===----------------------------------------------------------------------===//
/// FoldingSetVectorIterator - This implements an iterator for
/// FoldingSetVector. It is only necessary because FoldingSetIterator provides
/// a value_type of T, while the vector in FoldingSetVector exposes
/// a value_type of T*. Fortunately, FoldingSetIterator doesn't expose very
/// much besides operator* and operator->, so we just wrap the inner vector
/// iterator and perform the extra dereference.
template <class T, class VectorIteratorT>
class FoldingSetVectorIterator {
// Provide a typedef to workaround the lack of correct injected class name
// support in older GCCs.
typedef FoldingSetVectorIterator<T, VectorIteratorT> SelfT;
VectorIteratorT Iterator;
public:
FoldingSetVectorIterator(VectorIteratorT I) : Iterator(I) {}
bool operator==(const SelfT &RHS) const {
return Iterator == RHS.Iterator;
}
bool operator!=(const SelfT &RHS) const {
return Iterator != RHS.Iterator;
}
T &operator*() const { return **Iterator; }
T *operator->() const { return *Iterator; }
inline SelfT &operator++() {
++Iterator;
return *this;
}
SelfT operator++(int) {
SelfT tmp = *this;
++*this;
return tmp;
}
};
//===----------------------------------------------------------------------===//
/// FoldingSetVector - This template class combines a FoldingSet and a vector
/// to provide the interface of FoldingSet but with deterministic iteration
/// order based on the insertion order. T must be a subclass of FoldingSetNode
/// and implement a Profile function.
template <class T, class VectorT = SmallVector<T*, 8> >
class FoldingSetVector {
FoldingSet<T> Set;
VectorT Vector;
public:
explicit FoldingSetVector(unsigned Log2InitSize = 6)
: Set(Log2InitSize) {
}
typedef FoldingSetVectorIterator<T, typename VectorT::iterator> iterator;
iterator begin() { return Vector.begin(); }
iterator end() { return Vector.end(); }
typedef FoldingSetVectorIterator<const T, typename VectorT::const_iterator>
const_iterator;
const_iterator begin() const { return Vector.begin(); }
const_iterator end() const { return Vector.end(); }
/// clear - Remove all nodes from the folding set.
void clear() { Set.clear(); Vector.clear(); }
/// FindNodeOrInsertPos - Look up the node specified by ID. If it exists,
/// return it. If not, return the insertion token that will make insertion
/// faster.
T *FindNodeOrInsertPos(const FoldingSetNodeID &ID, void *&InsertPos) {
return Set.FindNodeOrInsertPos(ID, InsertPos);
}
/// GetOrInsertNode - If there is an existing simple Node exactly
/// equal to the specified node, return it. Otherwise, insert 'N' and
/// return it instead.
T *GetOrInsertNode(T *N) {
T *Result = Set.GetOrInsertNode(N);
if (Result == N) Vector.push_back(N);
return Result;
}
/// InsertNode - Insert the specified node into the folding set, knowing that
/// it is not already in the folding set. InsertPos must be obtained from
/// FindNodeOrInsertPos.
void InsertNode(T *N, void *InsertPos) {
Set.InsertNode(N, InsertPos);
Vector.push_back(N);
}
/// InsertNode - Insert the specified node into the folding set, knowing that
/// it is not already in the folding set.
void InsertNode(T *N) {
Set.InsertNode(N);
Vector.push_back(N);
}
/// size - Returns the number of nodes in the folding set.
unsigned size() const { return Set.size(); }
/// empty - Returns true if there are no nodes in the folding set.
bool empty() const { return Set.empty(); }
};
//===----------------------------------------------------------------------===//
/// FoldingSetIteratorImpl - This is the common iterator support shared by all
/// folding sets, which knows how to walk the folding set hash table.

View File

@ -76,10 +76,6 @@ namespace llvm {
/// using llvm::hash_value;
/// llvm::hash_code code = hash_value(x);
/// \endcode
///
/// Also note that there are two numerical values which are reserved, and the
/// implementation ensures will never be produced for real hash_codes. These
/// can be used as sentinels within hashing data structures.
class hash_code {
size_t value;

View File

@ -431,7 +431,7 @@ class ImutAVLFactory {
// Make sure the index is not the Tombstone or Entry key of the DenseMap.
static inline unsigned maskCacheIndex(unsigned I) {
return (I & ~0x02);
return (I & ~0x02);
}
unsigned incrementHeight(TreeTy* L, TreeTy* R) const {
@ -667,7 +667,7 @@ class ImutAVLTreeGenericIterator {
return reinterpret_cast<TreeTy*>(stack.back() & ~Flags);
}
uintptr_t getVisitState() {
uintptr_t getVisitState() const {
assert(!stack.empty());
return stack.back() & Flags;
}

View File

@ -20,19 +20,14 @@
#ifndef LLVM_ADT_INDEXEDMAP_H
#define LLVM_ADT_INDEXEDMAP_H
#include "llvm/ADT/STLExtras.h"
#include <cassert>
#include <functional>
#include <vector>
namespace llvm {
struct IdentityFunctor : public std::unary_function<unsigned, unsigned> {
unsigned operator()(unsigned Index) const {
return Index;
}
};
template <typename T, typename ToIndexT = IdentityFunctor>
template <typename T, typename ToIndexT = llvm::identity<unsigned> >
class IndexedMap {
typedef typename ToIndexT::argument_type IndexT;
typedef std::vector<T> StorageT;

View File

@ -21,9 +21,9 @@
#ifndef LLVM_ADT_INTRUSIVE_REF_CNT_PTR
#define LLVM_ADT_INTRUSIVE_REF_CNT_PTR
#include <cassert>
#include "llvm/Support/Casting.h"
#include "llvm/Support/Compiler.h"
#include <memory>
namespace llvm {
@ -34,7 +34,7 @@ namespace llvm {
/// RefCountedBase - A generic base class for objects that wish to
/// have their lifetimes managed using reference counts. Classes
/// subclass RefCountedBase to obtain such functionality, and are
/// typically handled with IntrusivePtr "smart pointers" (see below)
/// typically handled with IntrusiveRefCntPtr "smart pointers" (see below)
/// which automatically handle the management of reference counts.
/// Objects that subclass RefCountedBase should not be allocated on
/// the stack, as invoking "delete" (which is called when the
@ -123,25 +123,25 @@ namespace llvm {
retain();
}
#if LLVM_USE_RVALUE_REFERENCES
IntrusiveRefCntPtr(IntrusiveRefCntPtr&& S) : Obj(S.Obj) {
S.Obj = 0;
}
template <class X>
IntrusiveRefCntPtr(IntrusiveRefCntPtr<X>&& S) : Obj(S.getPtr()) {
S.Obj = 0;
}
#endif
template <class X>
IntrusiveRefCntPtr(const IntrusiveRefCntPtr<X>& S)
: Obj(S.getPtr()) {
retain();
}
IntrusiveRefCntPtr& operator=(const IntrusiveRefCntPtr& S) {
replace(S.getPtr());
return *this;
}
template <class X>
IntrusiveRefCntPtr& operator=(const IntrusiveRefCntPtr<X>& S) {
replace(S.getPtr());
return *this;
}
IntrusiveRefCntPtr& operator=(T * S) {
replace(S);
IntrusiveRefCntPtr& operator=(IntrusiveRefCntPtr S) {
swap(S);
return *this;
}
@ -176,10 +176,6 @@ namespace llvm {
private:
void retain() { if (Obj) IntrusiveRefCntPtrInfo<T>::retain(Obj); }
void release() { if (Obj) IntrusiveRefCntPtrInfo<T>::release(Obj); }
void replace(T* S) {
this_type(S).swap(*this);
}
};
template<class T, class U>

View File

@ -108,7 +108,14 @@ class PointerIntPair {
static PointerIntPair getFromOpaqueValue(void *V) {
PointerIntPair P; P.setFromOpaqueValue(V); return P;
}
// Allow PointerIntPairs to be created from const void * if and only if the
// pointer type could be created from a const void *.
static PointerIntPair getFromOpaqueValue(const void *V) {
(void)PtrTraits::getFromVoidPointer(V);
return getFromOpaqueValue(const_cast<void *>(V));
}
bool operator==(const PointerIntPair &RHS) const {return Value == RHS.Value;}
bool operator!=(const PointerIntPair &RHS) const {return Value != RHS.Value;}
bool operator<(const PointerIntPair &RHS) const {return Value < RHS.Value;}
@ -158,6 +165,10 @@ class PointerLikeTypeTraits<PointerIntPair<PointerTy, IntBits, IntType,
getFromVoidPointer(void *P) {
return PointerIntPair<PointerTy, IntBits, IntType>::getFromOpaqueValue(P);
}
static inline PointerIntPair<PointerTy, IntBits, IntType>
getFromVoidPointer(const void *P) {
return PointerIntPair<PointerTy, IntBits, IntType>::getFromOpaqueValue(P);
}
enum {
NumLowBitsAvailable = PtrTraits::NumLowBitsAvailable - IntBits
};

View File

@ -54,8 +54,8 @@ namespace llvm {
static inline void *getAsVoidPointer(void *P) { return P; }
static inline void *getFromVoidPointer(void *P) { return P; }
enum {
PT1BitsAv = PointerLikeTypeTraits<PT1>::NumLowBitsAvailable,
PT2BitsAv = PointerLikeTypeTraits<PT2>::NumLowBitsAvailable,
PT1BitsAv = (int)(PointerLikeTypeTraits<PT1>::NumLowBitsAvailable),
PT2BitsAv = (int)(PointerLikeTypeTraits<PT2>::NumLowBitsAvailable),
NumLowBitsAvailable = PT1BitsAv < PT2BitsAv ? PT1BitsAv : PT2BitsAv
};
};

View File

@ -23,26 +23,65 @@
namespace llvm {
template<class SetType, bool External> // Non-external set
// The po_iterator_storage template provides access to the set of already
// visited nodes during the po_iterator's depth-first traversal.
//
// The default implementation simply contains a set of visited nodes, while
// the Extended=true version uses a reference to an external set.
//
// It is possible to prune the depth-first traversal in several ways:
//
// - When providing an external set that already contains some graph nodes,
// those nodes won't be visited again. This is useful for restarting a
// post-order traversal on a graph with nodes that aren't dominated by a
// single node.
//
// - By providing a custom SetType class, unwanted graph nodes can be excluded
// by having the insert() function return false. This could for example
// confine a CFG traversal to blocks in a specific loop.
//
// - Finally, by specializing the po_iterator_storage template itself, graph
// edges can be pruned by returning false in the insertEdge() function. This
// could be used to remove loop back-edges from the CFG seen by po_iterator.
//
// A specialized po_iterator_storage class can observe both the pre-order and
// the post-order. The insertEdge() function is called in a pre-order, while
// the finishPostorder() function is called just before the po_iterator moves
// on to the next node.
/// Default po_iterator_storage implementation with an internal set object.
template<class SetType, bool External>
class po_iterator_storage {
public:
SetType Visited;
public:
// Return true if edge destination should be visited.
template<typename NodeType>
bool insertEdge(NodeType *From, NodeType *To) {
return Visited.insert(To);
}
// Called after all children of BB have been visited.
template<typename NodeType>
void finishPostorder(NodeType *BB) {}
};
/// DFSetTraits - Allow the SetType used to record depth-first search results to
/// optionally record node postorder.
template<class SetType>
struct DFSetTraits {
static void finishPostorder(
typename SetType::iterator::value_type, SetType &) {}
};
/// Specialization of po_iterator_storage that references an external set.
template<class SetType>
class po_iterator_storage<SetType, true> {
SetType &Visited;
public:
po_iterator_storage(SetType &VSet) : Visited(VSet) {}
po_iterator_storage(const po_iterator_storage &S) : Visited(S.Visited) {}
SetType &Visited;
// Return true if edge destination should be visited, called with From = 0 for
// the root node.
// Graph edges can be pruned by specializing this function.
template<class NodeType>
bool insertEdge(NodeType *From, NodeType *To) { return Visited.insert(To); }
// Called after all children of BB have been visited.
template<class NodeType>
void finishPostorder(NodeType *BB) {}
};
template<class GraphT,
@ -64,14 +103,15 @@ class po_iterator : public std::iterator<std::forward_iterator_tag,
void traverseChild() {
while (VisitStack.back().second != GT::child_end(VisitStack.back().first)) {
NodeType *BB = *VisitStack.back().second++;
if (this->Visited.insert(BB)) { // If the block is not visited...
if (this->insertEdge(VisitStack.back().first, BB)) {
// If the block is not visited...
VisitStack.push_back(std::make_pair(BB, GT::child_begin(BB)));
}
}
}
inline po_iterator(NodeType *BB) {
this->Visited.insert(BB);
this->insertEdge((NodeType*)0, BB);
VisitStack.push_back(std::make_pair(BB, GT::child_begin(BB)));
traverseChild();
}
@ -79,7 +119,7 @@ class po_iterator : public std::iterator<std::forward_iterator_tag,
inline po_iterator(NodeType *BB, SetType &S) :
po_iterator_storage<SetType, ExtStorage>(S) {
if (this->Visited.insert(BB)) {
if (this->insertEdge((NodeType*)0, BB)) {
VisitStack.push_back(std::make_pair(BB, GT::child_begin(BB)));
traverseChild();
}
@ -117,8 +157,7 @@ class po_iterator : public std::iterator<std::forward_iterator_tag,
inline NodeType *operator->() const { return operator*(); }
inline _Self& operator++() { // Preincrement
DFSetTraits<SetType>::finishPostorder(VisitStack.back().first,
this->Visited);
this->finishPostorder(VisitStack.back().first);
VisitStack.pop_back();
if (!VisitStack.empty())
traverseChild();
@ -173,14 +212,14 @@ ipo_iterator<T> ipo_end(T G){
return ipo_iterator<T>::end(G);
}
//Provide global definitions of external inverse postorder iterators...
// Provide global definitions of external inverse postorder iterators...
template <class T,
class SetType = std::set<typename GraphTraits<T>::NodeType*> >
struct ipo_ext_iterator : public ipo_iterator<T, SetType, true> {
ipo_ext_iterator(const ipo_iterator<T, SetType, true> &V) :
ipo_iterator<T, SetType, true>(&V) {}
ipo_iterator<T, SetType, true>(V) {}
ipo_ext_iterator(const po_iterator<Inverse<T>, SetType, true> &V) :
ipo_iterator<T, SetType, true>(&V) {}
ipo_iterator<T, SetType, true>(V) {}
};
template <class T, class SetType>

View File

@ -29,6 +29,16 @@ namespace llvm {
// Extra additions to <functional>
//===----------------------------------------------------------------------===//
template<class Ty>
struct identity : public std::unary_function<Ty, Ty> {
Ty &operator()(Ty &self) const {
return self;
}
const Ty &operator()(const Ty &self) const {
return self;
}
};
template<class Ty>
struct less_ptr : public std::binary_function<Ty, Ty, bool> {
bool operator()(const Ty* left, const Ty* right) const {
@ -49,7 +59,7 @@ struct greater_ptr : public std::binary_function<Ty, Ty, bool> {
// for_each(V.begin(), B.end(), deleter<Interval>);
//
template <class T>
static inline void deleter(T *Ptr) {
inline void deleter(T *Ptr) {
delete Ptr;
}
@ -228,7 +238,7 @@ inline size_t array_lengthof(T (&)[N]) {
/// array_pod_sort_comparator - This is helper function for array_pod_sort,
/// which just uses operator< on T.
template<typename T>
static inline int array_pod_sort_comparator(const void *P1, const void *P2) {
inline int array_pod_sort_comparator(const void *P1, const void *P2) {
if (*reinterpret_cast<const T*>(P1) < *reinterpret_cast<const T*>(P2))
return -1;
if (*reinterpret_cast<const T*>(P2) < *reinterpret_cast<const T*>(P1))
@ -239,7 +249,7 @@ static inline int array_pod_sort_comparator(const void *P1, const void *P2) {
/// get_array_pad_sort_comparator - This is an internal helper function used to
/// get type deduction of T right.
template<typename T>
static int (*get_array_pad_sort_comparator(const T &))
inline int (*get_array_pad_sort_comparator(const T &))
(const void*, const void*) {
return array_pod_sort_comparator<T>;
}
@ -260,7 +270,7 @@ static int (*get_array_pad_sort_comparator(const T &))
/// NOTE: If qsort_r were portable, we could allow a custom comparator and
/// default to std::less.
template<class IteratorTy>
static inline void array_pod_sort(IteratorTy Start, IteratorTy End) {
inline void array_pod_sort(IteratorTy Start, IteratorTy End) {
// Don't dereference start iterator of empty sequence.
if (Start == End) return;
qsort(&*Start, End-Start, sizeof(*Start),
@ -268,13 +278,13 @@ static inline void array_pod_sort(IteratorTy Start, IteratorTy End) {
}
template<class IteratorTy>
static inline void array_pod_sort(IteratorTy Start, IteratorTy End,
inline void array_pod_sort(IteratorTy Start, IteratorTy End,
int (*Compare)(const void*, const void*)) {
// Don't dereference start iterator of empty sequence.
if (Start == End) return;
qsort(&*Start, End-Start, sizeof(*Start), Compare);
}
//===----------------------------------------------------------------------===//
// Extra additions to <algorithm>
//===----------------------------------------------------------------------===//

View File

@ -15,6 +15,7 @@
#define LLVM_ADT_SMALLBITVECTOR_H
#include "llvm/ADT/BitVector.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/MathExtras.h"
#include <cassert>
@ -152,6 +153,12 @@ class SmallBitVector {
switchToLarge(new BitVector(*RHS.getPointer()));
}
#if LLVM_USE_RVALUE_REFERENCES
SmallBitVector(SmallBitVector &&RHS) : X(RHS.X) {
RHS.X = 1;
}
#endif
~SmallBitVector() {
if (!isSmall())
delete getPointer();
@ -347,6 +354,19 @@ class SmallBitVector {
return (*this)[Idx];
}
/// Test if any common bits are set.
bool anyCommon(const SmallBitVector &RHS) const {
if (isSmall() && RHS.isSmall())
return (getSmallBits() & RHS.getSmallBits()) != 0;
if (!isSmall() && !RHS.isSmall())
return getPointer()->anyCommon(*RHS.getPointer());
for (unsigned i = 0, e = std::min(size(), RHS.size()); i != e; ++i)
if (test(i) && RHS.test(i))
return true;
return false;
}
// Comparison operators.
bool operator==(const SmallBitVector &RHS) const {
if (size() != RHS.size())
@ -422,9 +442,72 @@ class SmallBitVector {
return *this;
}
#if LLVM_USE_RVALUE_REFERENCES
const SmallBitVector &operator=(SmallBitVector &&RHS) {
if (this != &RHS) {
clear();
swap(RHS);
}
return *this;
}
#endif
void swap(SmallBitVector &RHS) {
std::swap(X, RHS.X);
}
/// setBitsInMask - Add '1' bits from Mask to this vector. Don't resize.
/// This computes "*this |= Mask".
void setBitsInMask(const uint32_t *Mask, unsigned MaskWords = ~0u) {
if (isSmall())
applyMask<true, false>(Mask, MaskWords);
else
getPointer()->setBitsInMask(Mask, MaskWords);
}
/// clearBitsInMask - Clear any bits in this vector that are set in Mask.
/// Don't resize. This computes "*this &= ~Mask".
void clearBitsInMask(const uint32_t *Mask, unsigned MaskWords = ~0u) {
if (isSmall())
applyMask<false, false>(Mask, MaskWords);
else
getPointer()->clearBitsInMask(Mask, MaskWords);
}
/// setBitsNotInMask - Add a bit to this vector for every '0' bit in Mask.
/// Don't resize. This computes "*this |= ~Mask".
void setBitsNotInMask(const uint32_t *Mask, unsigned MaskWords = ~0u) {
if (isSmall())
applyMask<true, true>(Mask, MaskWords);
else
getPointer()->setBitsNotInMask(Mask, MaskWords);
}
/// clearBitsNotInMask - Clear a bit in this vector for every '0' bit in Mask.
/// Don't resize. This computes "*this &= Mask".
void clearBitsNotInMask(const uint32_t *Mask, unsigned MaskWords = ~0u) {
if (isSmall())
applyMask<false, true>(Mask, MaskWords);
else
getPointer()->clearBitsNotInMask(Mask, MaskWords);
}
private:
template<bool AddBits, bool InvertMask>
void applyMask(const uint32_t *Mask, unsigned MaskWords) {
assert((NumBaseBits == 64 || NumBaseBits == 32) && "Unsupported word size");
if (NumBaseBits == 64 && MaskWords >= 2) {
uint64_t M = Mask[0] | (uint64_t(Mask[1]) << 32);
if (InvertMask) M = ~M;
if (AddBits) setSmallBits(getSmallBits() | M);
else setSmallBits(getSmallBits() & ~M);
} else {
uint32_t M = Mask[0];
if (InvertMask) M = ~M;
if (AddBits) setSmallBits(getSmallBits() | M);
else setSmallBits(getSmallBits() & ~M);
}
}
};
inline SmallBitVector

View File

@ -45,7 +45,7 @@ class SmallString : public SmallVector<char, InternalLen> {
/// @{
/// Assign from a repeated element
void assign(unsigned NumElts, char Elt) {
void assign(size_t NumElts, char Elt) {
this->SmallVectorImpl<char>::assign(NumElts, Elt);
}
@ -77,6 +77,11 @@ class SmallString : public SmallVector<char, InternalLen> {
void append(in_iter S, in_iter E) {
SmallVectorImpl<char>::append(S, E);
}
void append(size_t NumInputs, char Elt) {
SmallVectorImpl<char>::append(NumInputs, Elt);
}
/// Append from a StringRef
void append(StringRef RHS) {

View File

@ -14,6 +14,7 @@
#ifndef LLVM_ADT_SMALLVECTOR_H
#define LLVM_ADT_SMALLVECTOR_H
#include "llvm/Support/Compiler.h"
#include "llvm/Support/type_traits.h"
#include <algorithm>
#include <cassert>
@ -54,6 +55,11 @@ class SmallVectorBase {
return BeginX == static_cast<const void*>(&FirstEl);
}
/// resetToSmall - Put this vector in a state of being small.
void resetToSmall() {
BeginX = EndX = CapacityX = &FirstEl;
}
/// grow_pod - This is an implementation of the grow() method which only works
/// on POD-like data types and is out of line to reduce code duplication.
void grow_pod(size_t MinSizeInBytes, size_t TSize);
@ -160,28 +166,84 @@ class SmallVectorTemplateBase : public SmallVectorTemplateCommon<T> {
}
}
/// uninitialized_copy - Copy the range [I, E) onto the uninitialized memory
/// starting with "Dest", constructing elements into it as needed.
/// move - Use move-assignment to move the range [I, E) onto the
/// objects starting with "Dest". This is just <memory>'s
/// std::move, but not all stdlibs actually provide that.
template<typename It1, typename It2>
static It2 move(It1 I, It1 E, It2 Dest) {
#if LLVM_USE_RVALUE_REFERENCES
for (; I != E; ++I, ++Dest)
*Dest = ::std::move(*I);
return Dest;
#else
return ::std::copy(I, E, Dest);
#endif
}
/// move_backward - Use move-assignment to move the range
/// [I, E) onto the objects ending at "Dest", moving objects
/// in reverse order. This is just <algorithm>'s
/// std::move_backward, but not all stdlibs actually provide that.
template<typename It1, typename It2>
static It2 move_backward(It1 I, It1 E, It2 Dest) {
#if LLVM_USE_RVALUE_REFERENCES
while (I != E)
*--Dest = ::std::move(*--E);
return Dest;
#else
return ::std::copy_backward(I, E, Dest);
#endif
}
/// uninitialized_move - Move the range [I, E) into the uninitialized
/// memory starting with "Dest", constructing elements as needed.
template<typename It1, typename It2>
static void uninitialized_move(It1 I, It1 E, It2 Dest) {
#if LLVM_USE_RVALUE_REFERENCES
for (; I != E; ++I, ++Dest)
::new ((void*) &*Dest) T(::std::move(*I));
#else
::std::uninitialized_copy(I, E, Dest);
#endif
}
/// uninitialized_copy - Copy the range [I, E) onto the uninitialized
/// memory starting with "Dest", constructing elements as needed.
template<typename It1, typename It2>
static void uninitialized_copy(It1 I, It1 E, It2 Dest) {
std::uninitialized_copy(I, E, Dest);
}
/// grow - double the size of the allocated memory, guaranteeing space for at
/// least one more element or MinSize if specified.
/// grow - Grow the allocated memory (without initializing new
/// elements), doubling the size of the allocated memory.
/// Guarantees space for at least one more element, or MinSize more
/// elements if specified.
void grow(size_t MinSize = 0);
public:
void push_back(const T &Elt) {
if (this->EndX < this->CapacityX) {
Retry:
new (this->end()) T(Elt);
::new ((void*) this->end()) T(Elt);
this->setEnd(this->end()+1);
return;
}
this->grow();
goto Retry;
}
#if LLVM_USE_RVALUE_REFERENCES
void push_back(T &&Elt) {
if (this->EndX < this->CapacityX) {
Retry:
::new ((void*) this->end()) T(::std::move(Elt));
this->setEnd(this->end()+1);
return;
}
this->grow();
goto Retry;
}
#endif
void pop_back() {
this->setEnd(this->end()-1);
@ -199,8 +261,8 @@ void SmallVectorTemplateBase<T, isPodLike>::grow(size_t MinSize) {
NewCapacity = MinSize;
T *NewElts = static_cast<T*>(malloc(NewCapacity*sizeof(T)));
// Copy the elements over.
this->uninitialized_copy(this->begin(), this->end(), NewElts);
// Move the elements over.
this->uninitialized_move(this->begin(), this->end(), NewElts);
// Destroy the original elements.
destroy_range(this->begin(), this->end());
@ -225,6 +287,29 @@ class SmallVectorTemplateBase<T, true> : public SmallVectorTemplateCommon<T> {
// No need to do a destroy loop for POD's.
static void destroy_range(T *, T *) {}
/// move - Use move-assignment to move the range [I, E) onto the
/// objects starting with "Dest". For PODs, this is just memcpy.
template<typename It1, typename It2>
static It2 move(It1 I, It1 E, It2 Dest) {
return ::std::copy(I, E, Dest);
}
/// move_backward - Use move-assignment to move the range
/// [I, E) onto the objects ending at "Dest", moving objects
/// in reverse order.
template<typename It1, typename It2>
static It2 move_backward(It1 I, It1 E, It2 Dest) {
return ::std::copy_backward(I, E, Dest);
}
/// uninitialized_move - Move the range [I, E) onto the uninitialized memory
/// starting with "Dest", constructing elements into it as needed.
template<typename It1, typename It2>
static void uninitialized_move(It1 I, It1 E, It2 Dest) {
// Just do a copy.
uninitialized_copy(I, E, Dest);
}
/// uninitialized_copy - Copy the range [I, E) onto the uninitialized memory
/// starting with "Dest", constructing elements into it as needed.
template<typename It1, typename It2>
@ -252,7 +337,7 @@ class SmallVectorTemplateBase<T, true> : public SmallVectorTemplateCommon<T> {
void push_back(const T &Elt) {
if (this->EndX < this->CapacityX) {
Retry:
*this->end() = Elt;
memcpy(this->end(), &Elt, sizeof(T));
this->setEnd(this->end()+1);
return;
}
@ -330,7 +415,11 @@ class SmallVectorImpl : public SmallVectorTemplateBase<T, isPodLike<T>::value> {
}
T pop_back_val() {
#if LLVM_USE_RVALUE_REFERENCES
T Result = ::std::move(this->back());
#else
T Result = this->back();
#endif
this->pop_back();
return Result;
}
@ -374,36 +463,79 @@ class SmallVectorImpl : public SmallVectorTemplateBase<T, isPodLike<T>::value> {
}
iterator erase(iterator I) {
assert(I >= this->begin() && "Iterator to erase is out of bounds.");
assert(I < this->end() && "Erasing at past-the-end iterator.");
iterator N = I;
// Shift all elts down one.
std::copy(I+1, this->end(), I);
this->move(I+1, this->end(), I);
// Drop the last elt.
this->pop_back();
return(N);
}
iterator erase(iterator S, iterator E) {
assert(S >= this->begin() && "Range to erase is out of bounds.");
assert(S <= E && "Trying to erase invalid range.");
assert(E <= this->end() && "Trying to erase past the end.");
iterator N = S;
// Shift all elts down.
iterator I = std::copy(E, this->end(), S);
iterator I = this->move(E, this->end(), S);
// Drop the last elts.
this->destroy_range(I, this->end());
this->setEnd(I);
return(N);
}
#if LLVM_USE_RVALUE_REFERENCES
iterator insert(iterator I, T &&Elt) {
if (I == this->end()) { // Important special case for empty vector.
this->push_back(::std::move(Elt));
return this->end()-1;
}
assert(I >= this->begin() && "Insertion iterator is out of bounds.");
assert(I <= this->end() && "Inserting past the end of the vector.");
if (this->EndX < this->CapacityX) {
Retry:
::new ((void*) this->end()) T(::std::move(this->back()));
this->setEnd(this->end()+1);
// Push everything else over.
this->move_backward(I, this->end()-1, this->end());
// If we just moved the element we're inserting, be sure to update
// the reference.
T *EltPtr = &Elt;
if (I <= EltPtr && EltPtr < this->EndX)
++EltPtr;
*I = ::std::move(*EltPtr);
return I;
}
size_t EltNo = I-this->begin();
this->grow();
I = this->begin()+EltNo;
goto Retry;
}
#endif
iterator insert(iterator I, const T &Elt) {
if (I == this->end()) { // Important special case for empty vector.
this->push_back(Elt);
return this->end()-1;
}
assert(I >= this->begin() && "Insertion iterator is out of bounds.");
assert(I <= this->end() && "Inserting past the end of the vector.");
if (this->EndX < this->CapacityX) {
Retry:
new (this->end()) T(this->back());
::new ((void*) this->end()) T(this->back());
this->setEnd(this->end()+1);
// Push everything else over.
std::copy_backward(I, this->end()-1, this->end());
this->move_backward(I, this->end()-1, this->end());
// If we just moved the element we're inserting, be sure to update
// the reference.
@ -421,14 +553,17 @@ class SmallVectorImpl : public SmallVectorTemplateBase<T, isPodLike<T>::value> {
}
iterator insert(iterator I, size_type NumToInsert, const T &Elt) {
if (I == this->end()) { // Important special case for empty vector.
append(NumToInsert, Elt);
return this->end()-1;
}
// Convert iterator to elt# to avoid invalidating iterator when we reserve()
size_t InsertElt = I - this->begin();
if (I == this->end()) { // Important special case for empty vector.
append(NumToInsert, Elt);
return this->begin()+InsertElt;
}
assert(I >= this->begin() && "Insertion iterator is out of bounds.");
assert(I <= this->end() && "Inserting past the end of the vector.");
// Ensure there is enough space.
reserve(static_cast<unsigned>(this->size() + NumToInsert));
@ -444,7 +579,7 @@ class SmallVectorImpl : public SmallVectorTemplateBase<T, isPodLike<T>::value> {
append(this->end()-NumToInsert, this->end());
// Copy the existing elements that get replaced.
std::copy_backward(I, OldEnd-NumToInsert, OldEnd);
this->move_backward(I, OldEnd-NumToInsert, OldEnd);
std::fill_n(I, NumToInsert, Elt);
return I;
@ -453,11 +588,11 @@ class SmallVectorImpl : public SmallVectorTemplateBase<T, isPodLike<T>::value> {
// Otherwise, we're inserting more elements than exist already, and we're
// not inserting at the end.
// Copy over the elements that we're about to overwrite.
// Move over the elements that we're about to overwrite.
T *OldEnd = this->end();
this->setEnd(this->end() + NumToInsert);
size_t NumOverwritten = OldEnd-I;
this->uninitialized_copy(I, OldEnd, this->end()-NumOverwritten);
this->uninitialized_move(I, OldEnd, this->end()-NumOverwritten);
// Replace the overwritten part.
std::fill_n(I, NumOverwritten, Elt);
@ -469,15 +604,19 @@ class SmallVectorImpl : public SmallVectorTemplateBase<T, isPodLike<T>::value> {
template<typename ItTy>
iterator insert(iterator I, ItTy From, ItTy To) {
if (I == this->end()) { // Important special case for empty vector.
append(From, To);
return this->end()-1;
}
size_t NumToInsert = std::distance(From, To);
// Convert iterator to elt# to avoid invalidating iterator when we reserve()
size_t InsertElt = I - this->begin();
if (I == this->end()) { // Important special case for empty vector.
append(From, To);
return this->begin()+InsertElt;
}
assert(I >= this->begin() && "Insertion iterator is out of bounds.");
assert(I <= this->end() && "Inserting past the end of the vector.");
size_t NumToInsert = std::distance(From, To);
// Ensure there is enough space.
reserve(static_cast<unsigned>(this->size() + NumToInsert));
@ -493,7 +632,7 @@ class SmallVectorImpl : public SmallVectorTemplateBase<T, isPodLike<T>::value> {
append(this->end()-NumToInsert, this->end());
// Copy the existing elements that get replaced.
std::copy_backward(I, OldEnd-NumToInsert, OldEnd);
this->move_backward(I, OldEnd-NumToInsert, OldEnd);
std::copy(From, To, I);
return I;
@ -502,16 +641,16 @@ class SmallVectorImpl : public SmallVectorTemplateBase<T, isPodLike<T>::value> {
// Otherwise, we're inserting more elements than exist already, and we're
// not inserting at the end.
// Copy over the elements that we're about to overwrite.
// Move over the elements that we're about to overwrite.
T *OldEnd = this->end();
this->setEnd(this->end() + NumToInsert);
size_t NumOverwritten = OldEnd-I;
this->uninitialized_copy(I, OldEnd, this->end()-NumOverwritten);
this->uninitialized_move(I, OldEnd, this->end()-NumOverwritten);
// Replace the overwritten part.
for (; NumOverwritten > 0; --NumOverwritten) {
*I = *From;
++I; ++From;
for (T *J = I; NumOverwritten > 0; --NumOverwritten) {
*J = *From;
++J; ++From;
}
// Insert the non-overwritten middle part.
@ -519,8 +658,11 @@ class SmallVectorImpl : public SmallVectorTemplateBase<T, isPodLike<T>::value> {
return I;
}
const SmallVectorImpl
&operator=(const SmallVectorImpl &RHS);
SmallVectorImpl &operator=(const SmallVectorImpl &RHS);
#if LLVM_USE_RVALUE_REFERENCES
SmallVectorImpl &operator=(SmallVectorImpl &&RHS);
#endif
bool operator==(const SmallVectorImpl &RHS) const {
if (this->size() != RHS.size()) return false;
@ -590,7 +732,7 @@ void SmallVectorImpl<T>::swap(SmallVectorImpl<T> &RHS) {
}
template <typename T>
const SmallVectorImpl<T> &SmallVectorImpl<T>::
SmallVectorImpl<T> &SmallVectorImpl<T>::
operator=(const SmallVectorImpl<T> &RHS) {
// Avoid self-assignment.
if (this == &RHS) return *this;
@ -617,6 +759,7 @@ const SmallVectorImpl<T> &SmallVectorImpl<T>::
// If we have to grow to have enough elements, destroy the current elements.
// This allows us to avoid copying them during the grow.
// FIXME: don't do this if they're efficiently moveable.
if (this->capacity() < RHSSize) {
// Destroy current elements.
this->destroy_range(this->begin(), this->end());
@ -637,6 +780,69 @@ const SmallVectorImpl<T> &SmallVectorImpl<T>::
return *this;
}
#if LLVM_USE_RVALUE_REFERENCES
template <typename T>
SmallVectorImpl<T> &SmallVectorImpl<T>::operator=(SmallVectorImpl<T> &&RHS) {
// Avoid self-assignment.
if (this == &RHS) return *this;
// If the RHS isn't small, clear this vector and then steal its buffer.
if (!RHS.isSmall()) {
this->destroy_range(this->begin(), this->end());
if (!this->isSmall()) free(this->begin());
this->BeginX = RHS.BeginX;
this->EndX = RHS.EndX;
this->CapacityX = RHS.CapacityX;
RHS.resetToSmall();
return *this;
}
// If we already have sufficient space, assign the common elements, then
// destroy any excess.
size_t RHSSize = RHS.size();
size_t CurSize = this->size();
if (CurSize >= RHSSize) {
// Assign common elements.
iterator NewEnd = this->begin();
if (RHSSize)
NewEnd = this->move(RHS.begin(), RHS.end(), NewEnd);
// Destroy excess elements and trim the bounds.
this->destroy_range(NewEnd, this->end());
this->setEnd(NewEnd);
// Clear the RHS.
RHS.clear();
return *this;
}
// If we have to grow to have enough elements, destroy the current elements.
// This allows us to avoid copying them during the grow.
// FIXME: this may not actually make any sense if we can efficiently move
// elements.
if (this->capacity() < RHSSize) {
// Destroy current elements.
this->destroy_range(this->begin(), this->end());
this->setEnd(this->begin());
CurSize = 0;
this->grow(RHSSize);
} else if (CurSize) {
// Otherwise, use assignment for the already-constructed elements.
this->move(RHS.begin(), RHS.end(), this->begin());
}
// Move-construct the new elements in place.
this->uninitialized_move(RHS.begin()+CurSize, RHS.end(),
this->begin()+CurSize);
// Set end.
this->setEnd(this->begin()+RHSSize);
RHS.clear();
return *this;
}
#endif
/// SmallVector - This is a 'vector' (really, a variable-sized array), optimized
/// for the case when the array is small. It contains some number of elements
@ -692,6 +898,18 @@ class SmallVector : public SmallVectorImpl<T> {
return *this;
}
#if LLVM_USE_RVALUE_REFERENCES
SmallVector(SmallVector &&RHS) : SmallVectorImpl<T>(NumTsAvailable) {
if (!RHS.empty())
SmallVectorImpl<T>::operator=(::std::move(RHS));
}
const SmallVector &operator=(SmallVector &&RHS) {
SmallVectorImpl<T>::operator=(::std::move(RHS));
return *this;
}
#endif
};
/// Specialize SmallVector at N=0. This specialization guarantees
@ -700,7 +918,8 @@ class SmallVector : public SmallVectorImpl<T> {
template <typename T>
class SmallVector<T,0> : public SmallVectorImpl<T> {
public:
SmallVector() : SmallVectorImpl<T>(0) {}
SmallVector() : SmallVectorImpl<T>(0) {
}
explicit SmallVector(unsigned Size, const T &Value = T())
: SmallVectorImpl<T>(0) {
@ -713,13 +932,26 @@ class SmallVector<T,0> : public SmallVectorImpl<T> {
}
SmallVector(const SmallVector &RHS) : SmallVectorImpl<T>(0) {
if (!RHS.empty())
SmallVectorImpl<T>::operator=(RHS);
}
const SmallVector &operator=(const SmallVector &RHS) {
SmallVectorImpl<T>::operator=(RHS);
return *this;
}
SmallVector &operator=(const SmallVectorImpl<T> &RHS) {
return SmallVectorImpl<T>::operator=(RHS);
#if LLVM_USE_RVALUE_REFERENCES
SmallVector(SmallVector &&RHS) : SmallVectorImpl<T>(0) {
if (!RHS.empty())
SmallVectorImpl<T>::operator=(::std::move(RHS));
}
const SmallVector &operator=(SmallVector &&RHS) {
SmallVectorImpl<T>::operator=(::std::move(RHS));
return *this;
}
#endif
};
template<typename T, unsigned N>

View File

@ -21,33 +21,62 @@
#define LLVM_ADT_SPARSESET_H
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/Support/DataTypes.h"
#include <limits>
namespace llvm {
/// SparseSetFunctor - Objects in a SparseSet are identified by small integer
/// keys. A functor object is used to compute the key of an object. The
/// functor's operator() must return an unsigned smaller than the universe.
/// SparseSetValTraits - Objects in a SparseSet are identified by keys that can
/// be uniquely converted to a small integer less than the set's universe. This
/// class allows the set to hold values that differ from the set's key type as
/// long as an index can still be derived from the value. SparseSet never
/// directly compares ValueT, only their indices, so it can map keys to
/// arbitrary values. SparseSetValTraits computes the index from the value
/// object. To compute the index from a key, SparseSet uses a separate
/// KeyFunctorT template argument.
///
/// The default functor implementation forwards to a getSparseSetKey() method
/// on the object. It is intended for sparse sets holding ad-hoc structs.
/// A simple type declaration, SparseSet<Type>, handles these cases:
/// - unsigned key, identity index, identity value
/// - unsigned key, identity index, fat value providing getSparseSetIndex()
///
/// The type declaration SparseSet<Type, UnaryFunction> handles:
/// - unsigned key, remapped index, identity value (virtual registers)
/// - pointer key, pointer-derived index, identity value (node+ID)
/// - pointer key, pointer-derived index, fat value with getSparseSetIndex()
///
/// Only other, unexpected cases require specializing SparseSetValTraits.
///
/// For best results, ValueT should not require a destructor.
///
template<typename ValueT>
struct SparseSetFunctor {
unsigned operator()(const ValueT &Val) {
return Val.getSparseSetKey();
struct SparseSetValTraits {
static unsigned getValIndex(const ValueT &Val) {
return Val.getSparseSetIndex();
}
};
/// SparseSetFunctor<unsigned> - Provide a trivial identity functor for
/// SparseSet<unsigned>.
/// SparseSetValFunctor - Helper class for selecting SparseSetValTraits. The
/// generic implementation handles ValueT classes which either provide
/// getSparseSetIndex() or specialize SparseSetValTraits<>.
///
template<> struct SparseSetFunctor<unsigned> {
unsigned operator()(unsigned Val) { return Val; }
template<typename KeyT, typename ValueT, typename KeyFunctorT>
struct SparseSetValFunctor {
unsigned operator()(const ValueT &Val) const {
return SparseSetValTraits<ValueT>::getValIndex(Val);
}
};
/// SparseSet - Fast set implementation for objects that can be identified by
/// SparseSetValFunctor<KeyT, KeyT> - Helper class for the common case of
/// identity key/value sets.
template<typename KeyT, typename KeyFunctorT>
struct SparseSetValFunctor<KeyT, KeyT, KeyFunctorT> {
unsigned operator()(const KeyT &Key) const {
return KeyFunctorT()(Key);
}
};
/// SparseSet - Fast set implmentation for objects that can be identified by
/// small unsigned keys.
///
/// SparseSet allocates memory proportional to the size of the key universe, so
@ -82,18 +111,20 @@ template<> struct SparseSetFunctor<unsigned> {
/// uint16_t or uint32_t.
///
/// @param ValueT The type of objects in the set.
/// @param KeyFunctorT A functor that computes an unsigned index from KeyT.
/// @param SparseT An unsigned integer type. See above.
/// @param KeyFunctorT A functor that computes the unsigned key of a ValueT.
///
template<typename ValueT,
typename SparseT = uint8_t,
typename KeyFunctorT = SparseSetFunctor<ValueT> >
typename KeyFunctorT = llvm::identity<unsigned>,
typename SparseT = uint8_t>
class SparseSet {
typedef typename KeyFunctorT::argument_type KeyT;
typedef SmallVector<ValueT, 8> DenseT;
DenseT Dense;
SparseT *Sparse;
unsigned Universe;
KeyFunctorT KeyOf;
KeyFunctorT KeyIndexOf;
SparseSetValFunctor<KeyT, ValueT, KeyFunctorT> ValIndexOf;
// Disable copy construction and assignment.
// This data structure is not meant to be used that way.
@ -160,21 +191,21 @@ class SparseSet {
Dense.clear();
}
/// find - Find an element by its key.
/// findIndex - Find an element by its index.
///
/// @param Key A valid key to find.
/// @param Idx A valid index to find.
/// @returns An iterator to the element identified by key, or end().
///
iterator find(unsigned Key) {
assert(Key < Universe && "Key out of range");
iterator findIndex(unsigned Idx) {
assert(Idx < Universe && "Key out of range");
assert(std::numeric_limits<SparseT>::is_integer &&
!std::numeric_limits<SparseT>::is_signed &&
"SparseT must be an unsigned integer type");
const unsigned Stride = std::numeric_limits<SparseT>::max() + 1u;
for (unsigned i = Sparse[Key], e = size(); i < e; i += Stride) {
const unsigned FoundKey = KeyOf(Dense[i]);
assert(FoundKey < Universe && "Invalid key in set. Did object mutate?");
if (Key == FoundKey)
for (unsigned i = Sparse[Idx], e = size(); i < e; i += Stride) {
const unsigned FoundIdx = ValIndexOf(Dense[i]);
assert(FoundIdx < Universe && "Invalid key in set. Did object mutate?");
if (Idx == FoundIdx)
return begin() + i;
// Stride is 0 when SparseT >= unsigned. We don't need to loop.
if (!Stride)
@ -183,13 +214,22 @@ class SparseSet {
return end();
}
const_iterator find(unsigned Key) const {
return const_cast<SparseSet*>(this)->find(Key);
/// find - Find an element by its key.
///
/// @param Key A valid key to find.
/// @returns An iterator to the element identified by key, or end().
///
iterator find(const KeyT &Key) {
return findIndex(KeyIndexOf(Key));
}
const_iterator find(const KeyT &Key) const {
return const_cast<SparseSet*>(this)->findIndex(KeyIndexOf(Key));
}
/// count - Returns true if this set contains an element identified by Key.
///
bool count(unsigned Key) const {
bool count(const KeyT &Key) const {
return find(Key) != end();
}
@ -204,11 +244,11 @@ class SparseSet {
/// Insertion invalidates all iterators.
///
std::pair<iterator, bool> insert(const ValueT &Val) {
unsigned Key = KeyOf(Val);
iterator I = find(Key);
unsigned Idx = ValIndexOf(Val);
iterator I = findIndex(Idx);
if (I != end())
return std::make_pair(I, false);
Sparse[Key] = size();
Sparse[Idx] = size();
Dense.push_back(Val);
return std::make_pair(end() - 1, true);
}
@ -216,7 +256,7 @@ class SparseSet {
/// array subscript - If an element already exists with this key, return it.
/// Otherwise, automatically construct a new value from Key, insert it,
/// and return the newly inserted element.
ValueT &operator[](unsigned Key) {
ValueT &operator[](const KeyT &Key) {
return *insert(ValueT(Key)).first;
}
@ -238,9 +278,9 @@ class SparseSet {
assert(unsigned(I - begin()) < size() && "Invalid iterator");
if (I != end() - 1) {
*I = Dense.back();
unsigned BackKey = KeyOf(Dense.back());
assert(BackKey < Universe && "Invalid key in set. Did object mutate?");
Sparse[BackKey] = I - begin();
unsigned BackIdx = ValIndexOf(Dense.back());
assert(BackIdx < Universe && "Invalid key in set. Did object mutate?");
Sparse[BackIdx] = I - begin();
}
// This depends on SmallVector::pop_back() not invalidating iterators.
// std::vector::pop_back() doesn't give that guarantee.
@ -253,7 +293,7 @@ class SparseSet {
/// @param Key The key identifying the element to erase.
/// @returns True when an element was erased, false if no element was found.
///
bool erase(unsigned Key) {
bool erase(const KeyT &Key) {
iterator I = find(Key);
if (I == end())
return false;

View File

@ -12,6 +12,7 @@
#include "llvm/Support/type_traits.h"
#include <algorithm>
#include <cassert>
#include <cstring>
#include <limits>
@ -292,6 +293,16 @@ namespace llvm {
/// Note: O(size() + Chars.size())
size_type find_last_of(StringRef Chars, size_t From = npos) const;
/// find_last_not_of - Find the last character in the string that is not
/// \arg C, or npos if not found.
size_type find_last_not_of(char C, size_t From = npos) const;
/// find_last_not_of - Find the last character in the string that is not in
/// \arg Chars, or npos if not found.
///
/// Note: O(size() + Chars.size())
size_type find_last_not_of(StringRef Chars, size_t From = npos) const;
/// @}
/// @name Helpful Algorithms
/// @{
@ -480,6 +491,24 @@ namespace llvm {
return std::make_pair(slice(0, Idx), slice(Idx+1, npos));
}
/// ltrim - Return string with consecutive characters in \arg Chars starting
/// from the left removed.
StringRef ltrim(StringRef Chars = " \t\n\v\f\r") const {
return drop_front(std::min(Length, find_first_not_of(Chars)));
}
/// rtrim - Return string with consecutive characters in \arg Chars starting
/// from the right removed.
StringRef rtrim(StringRef Chars = " \t\n\v\f\r") const {
return drop_back(Length - std::min(Length, find_last_not_of(Chars) + 1));
}
/// trim - Return string with consecutive characters in \arg Chars starting
/// from the left and right removed.
StringRef trim(StringRef Chars = " \t\n\v\f\r") const {
return ltrim(Chars).rtrim(Chars);
}
/// @}
};

View File

@ -48,8 +48,8 @@ class StringSwitch {
const T *Result;
public:
explicit StringSwitch(StringRef Str)
: Str(Str), Result(0) { }
explicit StringSwitch(StringRef S)
: Str(S), Result(0) { }
template<unsigned N>
StringSwitch& Case(const char (&S)[N], const T& Value) {

View File

@ -10,8 +10,11 @@
#ifndef LLVM_ADT_TINYPTRVECTOR_H
#define LLVM_ADT_TINYPTRVECTOR_H
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/PointerUnion.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/Support/Compiler.h"
namespace llvm {
@ -25,18 +28,78 @@ template <typename EltTy>
class TinyPtrVector {
public:
typedef llvm::SmallVector<EltTy, 4> VecTy;
typedef typename VecTy::value_type value_type;
llvm::PointerUnion<EltTy, VecTy*> Val;
TinyPtrVector() {}
TinyPtrVector(const TinyPtrVector &RHS) : Val(RHS.Val) {
if (VecTy *V = Val.template dyn_cast<VecTy*>())
Val = new VecTy(*V);
}
~TinyPtrVector() {
if (VecTy *V = Val.template dyn_cast<VecTy*>())
delete V;
}
TinyPtrVector(const TinyPtrVector &RHS) : Val(RHS.Val) {
if (VecTy *V = Val.template dyn_cast<VecTy*>())
Val = new VecTy(*V);
}
TinyPtrVector &operator=(const TinyPtrVector &RHS) {
if (this == &RHS)
return *this;
if (RHS.empty()) {
this->clear();
return *this;
}
// Try to squeeze into the single slot. If it won't fit, allocate a copied
// vector.
if (Val.template is<EltTy>()) {
if (RHS.size() == 1)
Val = RHS.front();
else
Val = new VecTy(*RHS.Val.template get<VecTy*>());
return *this;
}
// If we have a full vector allocated, try to re-use it.
if (RHS.Val.template is<EltTy>()) {
Val.template get<VecTy*>()->clear();
Val.template get<VecTy*>()->push_back(RHS.front());
} else {
*Val.template get<VecTy*>() = *RHS.Val.template get<VecTy*>();
}
return *this;
}
#if LLVM_USE_RVALUE_REFERENCES
TinyPtrVector(TinyPtrVector &&RHS) : Val(RHS.Val) {
RHS.Val = (EltTy)0;
}
TinyPtrVector &operator=(TinyPtrVector &&RHS) {
if (this == &RHS)
return *this;
if (RHS.empty()) {
this->clear();
return *this;
}
// If this vector has been allocated on the heap, re-use it if cheap. If it
// would require more copying, just delete it and we'll steal the other
// side.
if (VecTy *V = Val.template dyn_cast<VecTy*>()) {
if (RHS.Val.template is<EltTy>()) {
V->clear();
V->push_back(RHS.front());
return *this;
}
delete V;
}
Val = RHS.Val;
RHS.Val = (EltTy)0;
return *this;
}
#endif
// implicit conversion operator to ArrayRef.
operator ArrayRef<EltTy>() const {
if (Val.isNull())
@ -45,7 +108,7 @@ class TinyPtrVector {
return *Val.getAddrOfPtr1();
return *Val.template get<VecTy*>();
}
bool empty() const {
// This vector can be empty if it contains no element, or if it
// contains a pointer to an empty vector.
@ -54,7 +117,7 @@ class TinyPtrVector {
return Vec->empty();
return false;
}
unsigned size() const {
if (empty())
return 0;
@ -62,27 +125,21 @@ class TinyPtrVector {
return 1;
return Val.template get<VecTy*>()->size();
}
typedef const EltTy *const_iterator;
typedef EltTy *iterator;
iterator begin() {
if (empty())
return 0;
if (Val.template is<EltTy>())
return Val.getAddrOfPtr1();
return Val.template get<VecTy *>()->begin();
}
iterator end() {
if (empty())
return 0;
if (Val.template is<EltTy>())
return begin() + 1;
return begin() + (Val.isNull() ? 0 : 1);
return Val.template get<VecTy *>()->end();
}
@ -100,38 +157,53 @@ class TinyPtrVector {
assert(i == 0 && "tinyvector index out of range");
return V;
}
assert(i < Val.template get<VecTy*>()->size() &&
assert(i < Val.template get<VecTy*>()->size() &&
"tinyvector index out of range");
return (*Val.template get<VecTy*>())[i];
}
EltTy front() const {
assert(!empty() && "vector empty");
if (EltTy V = Val.template dyn_cast<EltTy>())
return V;
return Val.template get<VecTy*>()->front();
}
EltTy back() const {
assert(!empty() && "vector empty");
if (EltTy V = Val.template dyn_cast<EltTy>())
return V;
return Val.template get<VecTy*>()->back();
}
void push_back(EltTy NewVal) {
assert(NewVal != 0 && "Can't add a null value");
// If we have nothing, add something.
if (Val.isNull()) {
Val = NewVal;
return;
}
// If we have a single value, convert to a vector.
if (EltTy V = Val.template dyn_cast<EltTy>()) {
Val = new VecTy();
Val.template get<VecTy*>()->push_back(V);
}
// Add the new value, we know we have a vector.
Val.template get<VecTy*>()->push_back(NewVal);
}
void pop_back() {
// If we have a single value, convert to empty.
if (Val.template is<EltTy>())
Val = (EltTy)0;
else if (VecTy *Vec = Val.template get<VecTy*>())
Vec->pop_back();
}
void clear() {
// If we have a single value, convert to empty.
if (Val.template is<EltTy>()) {
@ -144,6 +216,9 @@ class TinyPtrVector {
}
iterator erase(iterator I) {
assert(I >= begin() && "Iterator to erase is out of bounds.");
assert(I < end() && "Erasing at past-the-end iterator.");
// If we have a single value, convert to empty.
if (Val.template is<EltTy>()) {
if (I == begin())
@ -153,12 +228,63 @@ class TinyPtrVector {
// benefit to collapsing back to a pointer
return Vec->erase(I);
}
return 0;
return end();
}
iterator erase(iterator S, iterator E) {
assert(S >= begin() && "Range to erase is out of bounds.");
assert(S <= E && "Trying to erase invalid range.");
assert(E <= end() && "Trying to erase past the end.");
if (Val.template is<EltTy>()) {
if (S == begin() && S != E)
Val = (EltTy)0;
} else if (VecTy *Vec = Val.template dyn_cast<VecTy*>()) {
return Vec->erase(S, E);
}
return end();
}
iterator insert(iterator I, const EltTy &Elt) {
assert(I >= this->begin() && "Insertion iterator is out of bounds.");
assert(I <= this->end() && "Inserting past the end of the vector.");
if (I == end()) {
push_back(Elt);
return llvm::prior(end());
}
assert(!Val.isNull() && "Null value with non-end insert iterator.");
if (EltTy V = Val.template dyn_cast<EltTy>()) {
assert(I == begin());
Val = Elt;
push_back(V);
return begin();
}
return Val.template get<VecTy*>()->insert(I, Elt);
}
template<typename ItTy>
iterator insert(iterator I, ItTy From, ItTy To) {
assert(I >= this->begin() && "Insertion iterator is out of bounds.");
assert(I <= this->end() && "Inserting past the end of the vector.");
if (From == To)
return I;
// If we have a single value, convert to a vector.
ptrdiff_t Offset = I - begin();
if (Val.isNull()) {
if (llvm::next(From) == To) {
Val = *From;
return begin();
}
Val = new VecTy();
} else if (EltTy V = Val.template dyn_cast<EltTy>()) {
Val = new VecTy();
Val.template get<VecTy*>()->push_back(V);
}
return Val.template get<VecTy*>()->insert(begin() + Offset, From, To);
}
private:
void operator=(const TinyPtrVector&); // NOT IMPLEMENTED YET.
};
} // end namespace llvm

View File

@ -62,8 +62,8 @@ class Triple {
x86_64, // X86-64: amd64, x86_64
xcore, // XCore: xcore
mblaze, // MBlaze: mblaze
ptx32, // PTX: ptx (32-bit)
ptx64, // PTX: ptx (64-bit)
nvptx, // NVPTX: 32-bit
nvptx64, // NVPTX: 64-bit
le32, // le32: generic little-endian 32-bit CPU (PNaCl / Emscripten)
amdil // amdil: amd IL
};
@ -98,7 +98,8 @@ class Triple {
Minix,
RTEMS,
NativeClient,
CNK // BG/P Compute-Node Kernel
CNK, // BG/P Compute-Node Kernel
Bitrig
};
enum EnvironmentType {
UnknownEnvironment,
@ -194,6 +195,11 @@ class Triple {
bool getMacOSXVersion(unsigned &Major, unsigned &Minor,
unsigned &Micro) const;
/// getiOSVersion - Parse the version number as with getOSVersion. This should
/// only be called with IOS triples.
void getiOSVersion(unsigned &Major, unsigned &Minor,
unsigned &Micro) const;
/// @}
/// @name Direct Component Access
/// @{
@ -266,7 +272,7 @@ class Triple {
/// compatibility, which handles supporting skewed version numbering schemes
/// used by the "darwin" triples.
unsigned isMacOSXVersionLT(unsigned Major, unsigned Minor = 0,
unsigned Micro = 0) const {
unsigned Micro = 0) const {
assert(isMacOSX() && "Not an OS X triple!");
// If this is OS X, expect a sane version number.

View File

@ -111,20 +111,21 @@ class ValueMap {
/// count - Return true if the specified key is in the map.
bool count(const KeyT &Val) const {
return Map.count(Wrap(Val));
return Map.find_as(Val) != Map.end();
}
iterator find(const KeyT &Val) {
return iterator(Map.find(Wrap(Val)));
return iterator(Map.find_as(Val));
}
const_iterator find(const KeyT &Val) const {
return const_iterator(Map.find(Wrap(Val)));
return const_iterator(Map.find_as(Val));
}
/// lookup - Return the entry for the specified key, or a default
/// constructed value if no such entry exists.
ValueT lookup(const KeyT &Val) const {
return Map.lookup(Wrap(Val));
typename MapT::const_iterator I = Map.find_as(Val);
return I != Map.end() ? I->second : ValueT();
}
// Inserts key,value pair into the map if the key isn't already in the map.
@ -145,7 +146,12 @@ class ValueMap {
bool erase(const KeyT &Val) {
return Map.erase(Wrap(Val));
typename MapT::iterator I = Map.find_as(Val);
if (I == Map.end())
return false;
Map.erase(I);
return true;
}
void erase(iterator I) {
return Map.erase(I.base());
@ -256,9 +262,15 @@ struct DenseMapInfo<ValueMapCallbackVH<KeyT, ValueT, Config> > {
static unsigned getHashValue(const VH &Val) {
return PointerInfo::getHashValue(Val.Unwrap());
}
static unsigned getHashValue(const KeyT &Val) {
return PointerInfo::getHashValue(Val);
}
static bool isEqual(const VH &LHS, const VH &RHS) {
return LHS == RHS;
}
static bool isEqual(const KeyT &LHS, const VH &RHS) {
return LHS == RHS.getValPtr();
}
};

View File

@ -206,7 +206,7 @@ struct VariadicFunction2 {
ResultT operator()(Param0T P0, Param1T P1, \
LLVM_COMMA_JOIN ## N(const ArgT &A)) const { \
const ArgT *const Args[] = { LLVM_COMMA_JOIN ## N(&A) }; \
return Func(P0, P1, makeAraryRef(Args)); \
return Func(P0, P1, makeArrayRef(Args)); \
}
LLVM_DEFINE_OVERLOAD(1)
LLVM_DEFINE_OVERLOAD(2)

View File

@ -50,6 +50,7 @@ class Pass;
class AnalysisUsage;
class MemTransferInst;
class MemIntrinsic;
class DominatorTree;
class AliasAnalysis {
protected:
@ -462,6 +463,18 @@ class AliasAnalysis {
virtual ModRefResult getModRefInfo(ImmutableCallSite CS1,
ImmutableCallSite CS2);
/// callCapturesBefore - Return information about whether a particular call
/// site modifies or reads the specified memory location.
ModRefResult callCapturesBefore(const Instruction *I,
const AliasAnalysis::Location &MemLoc,
DominatorTree *DT);
/// callCapturesBefore - A convenience wrapper.
ModRefResult callCapturesBefore(const Instruction *I, const Value *P,
uint64_t Size, DominatorTree *DT) {
return callCapturesBefore(I, Location(P, Size), DT);
}
//===--------------------------------------------------------------------===//
/// Higher level methods for querying mod/ref information.
///

View File

@ -217,7 +217,7 @@ class BlockFrequencyImpl {
divBlockFreq(BB, BranchProbability(Numerator, EntryFreq));
}
/// doLoop - Propagate block frequency down throught the loop.
/// doLoop - Propagate block frequency down through the loop.
void doLoop(BlockT *Head, BlockT *Tail) {
DEBUG(dbgs() << "doLoop(" << getBlockName(Head) << ", "
<< getBlockName(Tail) << ")\n");

View File

@ -122,6 +122,7 @@ class BranchProbabilityInfo : public FunctionPass {
bool calcLoopBranchHeuristics(BasicBlock *BB);
bool calcZeroHeuristics(BasicBlock *BB);
bool calcFloatingPointHeuristics(BasicBlock *BB);
bool calcInvokeHeuristics(BasicBlock *BB);
};
}

View File

@ -16,6 +16,7 @@
#define LLVM_ANALYSIS_CODEMETRICS_H
#include "llvm/ADT/DenseMap.h"
#include "llvm/Support/CallSite.h"
namespace llvm {
class BasicBlock;
@ -29,10 +30,11 @@ namespace llvm {
/// \brief Check whether a call will lower to something small.
///
/// This tests checks whether calls to this function will lower to something
/// This tests checks whether this callsite will lower to something
/// significantly cheaper than a traditional call, often a single
/// instruction.
bool callIsSmall(const Function *F);
/// instruction. Note that if isInstructionFree(CS.getInstruction()) would
/// return true, so will this function.
bool callIsSmall(ImmutableCallSite CS);
/// \brief Utility to calculate the size and a few similar metrics for a set
/// of basic blocks.

View File

@ -152,7 +152,7 @@ EXTERN_TEMPLATE_INSTANTIATION(class DomTreeNodeBase<BasicBlock>);
EXTERN_TEMPLATE_INSTANTIATION(class DomTreeNodeBase<MachineBasicBlock>);
template<class NodeT>
static raw_ostream &operator<<(raw_ostream &o,
inline raw_ostream &operator<<(raw_ostream &o,
const DomTreeNodeBase<NodeT> *Node) {
if (Node->getBlock())
WriteAsOperand(o, Node->getBlock(), false);
@ -165,7 +165,7 @@ static raw_ostream &operator<<(raw_ostream &o,
}
template<class NodeT>
static void PrintDomTree(const DomTreeNodeBase<NodeT> *N, raw_ostream &o,
inline void PrintDomTree(const DomTreeNodeBase<NodeT> *N, raw_ostream &o,
unsigned Lev) {
o.indent(2*Lev) << "[" << Lev << "] " << N;
for (typename DomTreeNodeBase<NodeT>::const_iterator I = N->begin(),
@ -705,6 +705,21 @@ DominatorTreeBase<NodeT>::properlyDominates(const NodeT *A, const NodeT *B) {
EXTERN_TEMPLATE_INSTANTIATION(class DominatorTreeBase<BasicBlock>);
class BasicBlockEdge {
const BasicBlock *Start;
const BasicBlock *End;
public:
BasicBlockEdge(const BasicBlock *Start_, const BasicBlock *End_) :
Start(Start_), End(End_) { }
const BasicBlock *getStart() const {
return Start;
}
const BasicBlock *getEnd() const {
return End;
}
bool isSingleEdge() const;
};
//===-------------------------------------
/// DominatorTree Class - Concrete subclass of DominatorTreeBase that is used to
/// compute a normal dominator tree.
@ -778,6 +793,8 @@ class DominatorTree : public FunctionPass {
bool dominates(const Instruction *Def, const Use &U) const;
bool dominates(const Instruction *Def, const Instruction *User) const;
bool dominates(const Instruction *Def, const BasicBlock *BB) const;
bool dominates(const BasicBlockEdge &BBE, const Use &U) const;
bool dominates(const BasicBlockEdge &BBE, const BasicBlock *BB) const;
bool properlyDominates(const DomTreeNode *A, const DomTreeNode *B) const {
return DT->properlyDominates(A, B);

View File

@ -127,10 +127,6 @@ namespace llvm {
// adding a replacement API.
InlineCost getInlineCost(CallSite CS, Function *Callee, int Threshold);
};
/// callIsSmall - If a call is likely to lower to a single target instruction,
/// or is otherwise deemed small return true.
bool callIsSmall(const Function *Callee);
}
#endif

View File

@ -46,7 +46,7 @@
namespace llvm {
template<typename T>
static void RemoveFromVector(std::vector<T*> &V, T *N) {
inline void RemoveFromVector(std::vector<T*> &V, T *N) {
typename std::vector<T*>::iterator I = std::find(V.begin(), V.end(), N);
assert(I != V.end() && "N is not in this list!");
V.erase(I);
@ -97,6 +97,9 @@ class LoopBase {
BlockT *getHeader() const { return Blocks.front(); }
LoopT *getParentLoop() const { return ParentLoop; }
/// setParentLoop is a raw interface for bypassing addChildLoop.
void setParentLoop(LoopT *L) { ParentLoop = L; }
/// contains - Return true if the specified loop is contained within in
/// this loop.
///
@ -122,14 +125,20 @@ class LoopBase {
/// iterator/begin/end - Return the loops contained entirely within this loop.
///
const std::vector<LoopT *> &getSubLoops() const { return SubLoops; }
std::vector<LoopT *> &getSubLoopsVector() { return SubLoops; }
typedef typename std::vector<LoopT *>::const_iterator iterator;
typedef typename std::vector<LoopT *>::const_reverse_iterator
reverse_iterator;
iterator begin() const { return SubLoops.begin(); }
iterator end() const { return SubLoops.end(); }
reverse_iterator rbegin() const { return SubLoops.rbegin(); }
reverse_iterator rend() const { return SubLoops.rend(); }
bool empty() const { return SubLoops.empty(); }
/// getBlocks - Get a list of the basic blocks which make up this loop.
///
const std::vector<BlockT*> &getBlocks() const { return Blocks; }
std::vector<BlockT*> &getBlocksVector() { return Blocks; }
typedef typename std::vector<BlockT*>::const_iterator block_iterator;
block_iterator block_begin() const { return Blocks.begin(); }
block_iterator block_end() const { return Blocks.end(); }
@ -181,83 +190,26 @@ class LoopBase {
/// outside of the loop. These are the blocks _inside of the current loop_
/// which branch out. The returned list is always unique.
///
void getExitingBlocks(SmallVectorImpl<BlockT *> &ExitingBlocks) const {
// Sort the blocks vector so that we can use binary search to do quick
// lookups.
SmallVector<BlockT*, 128> LoopBBs(block_begin(), block_end());
std::sort(LoopBBs.begin(), LoopBBs.end());
typedef GraphTraits<BlockT*> BlockTraits;
for (block_iterator BI = block_begin(), BE = block_end(); BI != BE; ++BI)
for (typename BlockTraits::ChildIteratorType I =
BlockTraits::child_begin(*BI), E = BlockTraits::child_end(*BI);
I != E; ++I)
if (!std::binary_search(LoopBBs.begin(), LoopBBs.end(), *I)) {
// Not in current loop? It must be an exit block.
ExitingBlocks.push_back(*BI);
break;
}
}
void getExitingBlocks(SmallVectorImpl<BlockT *> &ExitingBlocks) const;
/// getExitingBlock - If getExitingBlocks would return exactly one block,
/// return that block. Otherwise return null.
BlockT *getExitingBlock() const {
SmallVector<BlockT*, 8> ExitingBlocks;
getExitingBlocks(ExitingBlocks);
if (ExitingBlocks.size() == 1)
return ExitingBlocks[0];
return 0;
}
BlockT *getExitingBlock() const;
/// getExitBlocks - Return all of the successor blocks of this loop. These
/// are the blocks _outside of the current loop_ which are branched to.
///
void getExitBlocks(SmallVectorImpl<BlockT*> &ExitBlocks) const {
// Sort the blocks vector so that we can use binary search to do quick
// lookups.
SmallVector<BlockT*, 128> LoopBBs(block_begin(), block_end());
std::sort(LoopBBs.begin(), LoopBBs.end());
typedef GraphTraits<BlockT*> BlockTraits;
for (block_iterator BI = block_begin(), BE = block_end(); BI != BE; ++BI)
for (typename BlockTraits::ChildIteratorType I =
BlockTraits::child_begin(*BI), E = BlockTraits::child_end(*BI);
I != E; ++I)
if (!std::binary_search(LoopBBs.begin(), LoopBBs.end(), *I))
// Not in current loop? It must be an exit block.
ExitBlocks.push_back(*I);
}
void getExitBlocks(SmallVectorImpl<BlockT*> &ExitBlocks) const;
/// getExitBlock - If getExitBlocks would return exactly one block,
/// return that block. Otherwise return null.
BlockT *getExitBlock() const {
SmallVector<BlockT*, 8> ExitBlocks;
getExitBlocks(ExitBlocks);
if (ExitBlocks.size() == 1)
return ExitBlocks[0];
return 0;
}
BlockT *getExitBlock() const;
/// Edge type.
typedef std::pair<BlockT*, BlockT*> Edge;
typedef std::pair<const BlockT*, const BlockT*> Edge;
/// getExitEdges - Return all pairs of (_inside_block_,_outside_block_).
template <typename EdgeT>
void getExitEdges(SmallVectorImpl<EdgeT> &ExitEdges) const {
// Sort the blocks vector so that we can use binary search to do quick
// lookups.
SmallVector<BlockT*, 128> LoopBBs(block_begin(), block_end());
array_pod_sort(LoopBBs.begin(), LoopBBs.end());
typedef GraphTraits<BlockT*> BlockTraits;
for (block_iterator BI = block_begin(), BE = block_end(); BI != BE; ++BI)
for (typename BlockTraits::ChildIteratorType I =
BlockTraits::child_begin(*BI), E = BlockTraits::child_end(*BI);
I != E; ++I)
if (!std::binary_search(LoopBBs.begin(), LoopBBs.end(), *I))
// Not in current loop? It must be an exit block.
ExitEdges.push_back(EdgeT(*BI, *I));
}
void getExitEdges(SmallVectorImpl<Edge> &ExitEdges) const;
/// getLoopPreheader - If there is a preheader for this loop, return it. A
/// loop has a preheader if there is only one edge to the header of the loop
@ -266,71 +218,18 @@ class LoopBase {
///
/// This method returns null if there is no preheader for the loop.
///
BlockT *getLoopPreheader() const {
// Keep track of nodes outside the loop branching to the header...
BlockT *Out = getLoopPredecessor();
if (!Out) return 0;
// Make sure there is only one exit out of the preheader.
typedef GraphTraits<BlockT*> BlockTraits;
typename BlockTraits::ChildIteratorType SI = BlockTraits::child_begin(Out);
++SI;
if (SI != BlockTraits::child_end(Out))
return 0; // Multiple exits from the block, must not be a preheader.
// The predecessor has exactly one successor, so it is a preheader.
return Out;
}
BlockT *getLoopPreheader() const;
/// getLoopPredecessor - If the given loop's header has exactly one unique
/// predecessor outside the loop, return it. Otherwise return null.
/// This is less strict that the loop "preheader" concept, which requires
/// the predecessor to have exactly one successor.
///
BlockT *getLoopPredecessor() const {
// Keep track of nodes outside the loop branching to the header...
BlockT *Out = 0;
// Loop over the predecessors of the header node...
BlockT *Header = getHeader();
typedef GraphTraits<BlockT*> BlockTraits;
typedef GraphTraits<Inverse<BlockT*> > InvBlockTraits;
for (typename InvBlockTraits::ChildIteratorType PI =
InvBlockTraits::child_begin(Header),
PE = InvBlockTraits::child_end(Header); PI != PE; ++PI) {
typename InvBlockTraits::NodeType *N = *PI;
if (!contains(N)) { // If the block is not in the loop...
if (Out && Out != N)
return 0; // Multiple predecessors outside the loop
Out = N;
}
}
// Make sure there is only one exit out of the preheader.
assert(Out && "Header of loop has no predecessors from outside loop?");
return Out;
}
BlockT *getLoopPredecessor() const;
/// getLoopLatch - If there is a single latch block for this loop, return it.
/// A latch block is a block that contains a branch back to the header.
BlockT *getLoopLatch() const {
BlockT *Header = getHeader();
typedef GraphTraits<Inverse<BlockT*> > InvBlockTraits;
typename InvBlockTraits::ChildIteratorType PI =
InvBlockTraits::child_begin(Header);
typename InvBlockTraits::ChildIteratorType PE =
InvBlockTraits::child_end(Header);
BlockT *Latch = 0;
for (; PI != PE; ++PI) {
typename InvBlockTraits::NodeType *N = *PI;
if (contains(N)) {
if (Latch) return 0;
Latch = N;
}
}
return Latch;
}
BlockT *getLoopLatch() const;
//===--------------------------------------------------------------------===//
// APIs for updating loop information after changing the CFG
@ -348,17 +247,7 @@ class LoopBase {
/// the OldChild entry in our children list with NewChild, and updates the
/// parent pointer of OldChild to be null and the NewChild to be this loop.
/// This updates the loop depth of the new child.
void replaceChildLoopWith(LoopT *OldChild,
LoopT *NewChild) {
assert(OldChild->ParentLoop == this && "This loop is already broken!");
assert(NewChild->ParentLoop == 0 && "NewChild already has a parent!");
typename std::vector<LoopT *>::iterator I =
std::find(SubLoops.begin(), SubLoops.end(), OldChild);
assert(I != SubLoops.end() && "OldChild not in loop!");
*I = NewChild;
OldChild->ParentLoop = 0;
NewChild->ParentLoop = static_cast<LoopT *>(this);
}
void replaceChildLoopWith(LoopT *OldChild, LoopT *NewChild);
/// addChildLoop - Add the specified loop to be a child of this loop. This
/// updates the loop depth of the new child.
@ -411,121 +300,12 @@ class LoopBase {
}
/// verifyLoop - Verify loop structure
void verifyLoop() const {
#ifndef NDEBUG
assert(!Blocks.empty() && "Loop header is missing");
// Setup for using a depth-first iterator to visit every block in the loop.
SmallVector<BlockT*, 8> ExitBBs;
getExitBlocks(ExitBBs);
llvm::SmallPtrSet<BlockT*, 8> VisitSet;
VisitSet.insert(ExitBBs.begin(), ExitBBs.end());
df_ext_iterator<BlockT*, llvm::SmallPtrSet<BlockT*, 8> >
BI = df_ext_begin(getHeader(), VisitSet),
BE = df_ext_end(getHeader(), VisitSet);
// Keep track of the number of BBs visited.
unsigned NumVisited = 0;
// Sort the blocks vector so that we can use binary search to do quick
// lookups.
SmallVector<BlockT*, 128> LoopBBs(block_begin(), block_end());
std::sort(LoopBBs.begin(), LoopBBs.end());
// Check the individual blocks.
for ( ; BI != BE; ++BI) {
BlockT *BB = *BI;
bool HasInsideLoopSuccs = false;
bool HasInsideLoopPreds = false;
SmallVector<BlockT *, 2> OutsideLoopPreds;
typedef GraphTraits<BlockT*> BlockTraits;
for (typename BlockTraits::ChildIteratorType SI =
BlockTraits::child_begin(BB), SE = BlockTraits::child_end(BB);
SI != SE; ++SI)
if (std::binary_search(LoopBBs.begin(), LoopBBs.end(), *SI)) {
HasInsideLoopSuccs = true;
break;
}
typedef GraphTraits<Inverse<BlockT*> > InvBlockTraits;
for (typename InvBlockTraits::ChildIteratorType PI =
InvBlockTraits::child_begin(BB), PE = InvBlockTraits::child_end(BB);
PI != PE; ++PI) {
BlockT *N = *PI;
if (std::binary_search(LoopBBs.begin(), LoopBBs.end(), N))
HasInsideLoopPreds = true;
else
OutsideLoopPreds.push_back(N);
}
if (BB == getHeader()) {
assert(!OutsideLoopPreds.empty() && "Loop is unreachable!");
} else if (!OutsideLoopPreds.empty()) {
// A non-header loop shouldn't be reachable from outside the loop,
// though it is permitted if the predecessor is not itself actually
// reachable.
BlockT *EntryBB = BB->getParent()->begin();
for (df_iterator<BlockT *> NI = df_begin(EntryBB),
NE = df_end(EntryBB); NI != NE; ++NI)
for (unsigned i = 0, e = OutsideLoopPreds.size(); i != e; ++i)
assert(*NI != OutsideLoopPreds[i] &&
"Loop has multiple entry points!");
}
assert(HasInsideLoopPreds && "Loop block has no in-loop predecessors!");
assert(HasInsideLoopSuccs && "Loop block has no in-loop successors!");
assert(BB != getHeader()->getParent()->begin() &&
"Loop contains function entry block!");
NumVisited++;
}
assert(NumVisited == getNumBlocks() && "Unreachable block in loop");
// Check the subloops.
for (iterator I = begin(), E = end(); I != E; ++I)
// Each block in each subloop should be contained within this loop.
for (block_iterator BI = (*I)->block_begin(), BE = (*I)->block_end();
BI != BE; ++BI) {
assert(std::binary_search(LoopBBs.begin(), LoopBBs.end(), *BI) &&
"Loop does not contain all the blocks of a subloop!");
}
// Check the parent loop pointer.
if (ParentLoop) {
assert(std::find(ParentLoop->begin(), ParentLoop->end(), this) !=
ParentLoop->end() &&
"Loop is not a subloop of its parent!");
}
#endif
}
void verifyLoop() const;
/// verifyLoop - Verify loop structure of this loop and all nested loops.
void verifyLoopNest(DenseSet<const LoopT*> *Loops) const {
Loops->insert(static_cast<const LoopT *>(this));
// Verify this loop.
verifyLoop();
// Verify the subloops.
for (iterator I = begin(), E = end(); I != E; ++I)
(*I)->verifyLoopNest(Loops);
}
void verifyLoopNest(DenseSet<const LoopT*> *Loops) const;
void print(raw_ostream &OS, unsigned Depth = 0) const {
OS.indent(Depth*2) << "Loop at depth " << getLoopDepth()
<< " containing: ";
for (unsigned i = 0; i < getBlocks().size(); ++i) {
if (i) OS << ",";
BlockT *BB = getBlocks()[i];
WriteAsOperand(OS, BB, false);
if (BB == getHeader()) OS << "<header>";
if (BB == getLoopLatch()) OS << "<latch>";
if (isLoopExiting(BB)) OS << "<exiting>";
}
OS << "\n";
for (iterator I = begin(), E = end(); I != E; ++I)
(*I)->print(OS, Depth+2);
}
void print(raw_ostream &OS, unsigned Depth = 0) const;
protected:
friend class LoopInfoBase<BlockT, LoopT>;
@ -540,6 +320,11 @@ raw_ostream& operator<<(raw_ostream &OS, const LoopBase<BlockT, LoopT> &Loop) {
return OS;
}
// Implementation in LoopInfoImpl.h
#ifdef __GNUC__
__extension__ extern template class LoopBase<BasicBlock, Loop>;
#endif
class Loop : public LoopBase<BasicBlock, Loop> {
public:
Loop() {}
@ -650,8 +435,12 @@ class LoopInfoBase {
/// function.
///
typedef typename std::vector<LoopT *>::const_iterator iterator;
typedef typename std::vector<LoopT *>::const_reverse_iterator
reverse_iterator;
iterator begin() const { return TopLevelLoops.begin(); }
iterator end() const { return TopLevelLoops.end(); }
reverse_iterator rbegin() const { return TopLevelLoops.rbegin(); }
reverse_iterator rend() const { return TopLevelLoops.rend(); }
bool empty() const { return TopLevelLoops.empty(); }
/// getLoopFor - Return the inner most loop that BB lives in. If a basic
@ -744,189 +533,19 @@ class LoopInfoBase {
return isNotAlreadyContainedIn(SubLoop->getParentLoop(), ParentLoop);
}
void Calculate(DominatorTreeBase<BlockT> &DT) {
BlockT *RootNode = DT.getRootNode()->getBlock();
for (df_iterator<BlockT*> NI = df_begin(RootNode),
NE = df_end(RootNode); NI != NE; ++NI)
if (LoopT *L = ConsiderForLoop(*NI, DT))
TopLevelLoops.push_back(L);
}
LoopT *ConsiderForLoop(BlockT *BB, DominatorTreeBase<BlockT> &DT) {
if (BBMap.count(BB)) return 0; // Haven't processed this node?
std::vector<BlockT *> TodoStack;
// Scan the predecessors of BB, checking to see if BB dominates any of
// them. This identifies backedges which target this node...
typedef GraphTraits<Inverse<BlockT*> > InvBlockTraits;
for (typename InvBlockTraits::ChildIteratorType I =
InvBlockTraits::child_begin(BB), E = InvBlockTraits::child_end(BB);
I != E; ++I) {
typename InvBlockTraits::NodeType *N = *I;
// If BB dominates its predecessor...
if (DT.dominates(BB, N) && DT.isReachableFromEntry(N))
TodoStack.push_back(N);
}
if (TodoStack.empty()) return 0; // No backedges to this block...
// Create a new loop to represent this basic block...
LoopT *L = new LoopT(BB);
BBMap[BB] = L;
while (!TodoStack.empty()) { // Process all the nodes in the loop
BlockT *X = TodoStack.back();
TodoStack.pop_back();
if (!L->contains(X) && // As of yet unprocessed??
DT.isReachableFromEntry(X)) {
// Check to see if this block already belongs to a loop. If this occurs
// then we have a case where a loop that is supposed to be a child of
// the current loop was processed before the current loop. When this
// occurs, this child loop gets added to a part of the current loop,
// making it a sibling to the current loop. We have to reparent this
// loop.
if (LoopT *SubLoop =
const_cast<LoopT *>(getLoopFor(X)))
if (SubLoop->getHeader() == X && isNotAlreadyContainedIn(SubLoop, L)){
// Remove the subloop from its current parent...
assert(SubLoop->ParentLoop && SubLoop->ParentLoop != L);
LoopT *SLP = SubLoop->ParentLoop; // SubLoopParent
typename std::vector<LoopT *>::iterator I =
std::find(SLP->SubLoops.begin(), SLP->SubLoops.end(), SubLoop);
assert(I != SLP->SubLoops.end() &&"SubLoop not a child of parent?");
SLP->SubLoops.erase(I); // Remove from parent...
// Add the subloop to THIS loop...
SubLoop->ParentLoop = L;
L->SubLoops.push_back(SubLoop);
}
// Normal case, add the block to our loop...
L->Blocks.push_back(X);
typedef GraphTraits<Inverse<BlockT*> > InvBlockTraits;
// Add all of the predecessors of X to the end of the work stack...
TodoStack.insert(TodoStack.end(), InvBlockTraits::child_begin(X),
InvBlockTraits::child_end(X));
}
}
// If there are any loops nested within this loop, create them now!
for (typename std::vector<BlockT*>::iterator I = L->Blocks.begin(),
E = L->Blocks.end(); I != E; ++I)
if (LoopT *NewLoop = ConsiderForLoop(*I, DT)) {
L->SubLoops.push_back(NewLoop);
NewLoop->ParentLoop = L;
}
// Add the basic blocks that comprise this loop to the BBMap so that this
// loop can be found for them.
//
for (typename std::vector<BlockT*>::iterator I = L->Blocks.begin(),
E = L->Blocks.end(); I != E; ++I)
BBMap.insert(std::make_pair(*I, L));
// Now that we have a list of all of the child loops of this loop, check to
// see if any of them should actually be nested inside of each other. We
// can accidentally pull loops our of their parents, so we must make sure to
// organize the loop nests correctly now.
{
std::map<BlockT *, LoopT *> ContainingLoops;
for (unsigned i = 0; i != L->SubLoops.size(); ++i) {
LoopT *Child = L->SubLoops[i];
assert(Child->getParentLoop() == L && "Not proper child loop?");
if (LoopT *ContainingLoop = ContainingLoops[Child->getHeader()]) {
// If there is already a loop which contains this loop, move this loop
// into the containing loop.
MoveSiblingLoopInto(Child, ContainingLoop);
--i; // The loop got removed from the SubLoops list.
} else {
// This is currently considered to be a top-level loop. Check to see
// if any of the contained blocks are loop headers for subloops we
// have already processed.
for (unsigned b = 0, e = Child->Blocks.size(); b != e; ++b) {
LoopT *&BlockLoop = ContainingLoops[Child->Blocks[b]];
if (BlockLoop == 0) { // Child block not processed yet...
BlockLoop = Child;
} else if (BlockLoop != Child) {
LoopT *SubLoop = BlockLoop;
// Reparent all of the blocks which used to belong to BlockLoops
for (unsigned j = 0, f = SubLoop->Blocks.size(); j != f; ++j)
ContainingLoops[SubLoop->Blocks[j]] = Child;
// There is already a loop which contains this block, that means
// that we should reparent the loop which the block is currently
// considered to belong to to be a child of this loop.
MoveSiblingLoopInto(SubLoop, Child);
--i; // We just shrunk the SubLoops list.
}
}
}
}
}
return L;
}
/// MoveSiblingLoopInto - This method moves the NewChild loop to live inside
/// of the NewParent Loop, instead of being a sibling of it.
void MoveSiblingLoopInto(LoopT *NewChild,
LoopT *NewParent) {
LoopT *OldParent = NewChild->getParentLoop();
assert(OldParent && OldParent == NewParent->getParentLoop() &&
NewChild != NewParent && "Not sibling loops!");
// Remove NewChild from being a child of OldParent
typename std::vector<LoopT *>::iterator I =
std::find(OldParent->SubLoops.begin(), OldParent->SubLoops.end(),
NewChild);
assert(I != OldParent->SubLoops.end() && "Parent fields incorrect??");
OldParent->SubLoops.erase(I); // Remove from parent's subloops list
NewChild->ParentLoop = 0;
InsertLoopInto(NewChild, NewParent);
}
/// InsertLoopInto - This inserts loop L into the specified parent loop. If
/// the parent loop contains a loop which should contain L, the loop gets
/// inserted into L instead.
void InsertLoopInto(LoopT *L, LoopT *Parent) {
BlockT *LHeader = L->getHeader();
assert(Parent->contains(LHeader) &&
"This loop should not be inserted here!");
// Check to see if it belongs in a child loop...
for (unsigned i = 0, e = static_cast<unsigned>(Parent->SubLoops.size());
i != e; ++i)
if (Parent->SubLoops[i]->contains(LHeader)) {
InsertLoopInto(L, Parent->SubLoops[i]);
return;
}
// If not, insert it here!
Parent->SubLoops.push_back(L);
L->ParentLoop = Parent;
}
/// Create the loop forest using a stable algorithm.
void Analyze(DominatorTreeBase<BlockT> &DomTree);
// Debugging
void print(raw_ostream &OS) const {
for (unsigned i = 0; i < TopLevelLoops.size(); ++i)
TopLevelLoops[i]->print(OS);
#if 0
for (DenseMap<BasicBlock*, LoopT*>::const_iterator I = BBMap.begin(),
E = BBMap.end(); I != E; ++I)
OS << "BB '" << I->first->getName() << "' level = "
<< I->second->getLoopDepth() << "\n";
#endif
}
void print(raw_ostream &OS) const;
};
// Implementation in LoopInfoImpl.h
#ifdef __GNUC__
__extension__ extern template class LoopInfoBase<BasicBlock, Loop>;
#endif
class LoopInfo : public FunctionPass {
LoopInfoBase<BasicBlock, Loop> LI;
friend class LoopBase<BasicBlock, Loop>;
@ -946,8 +565,11 @@ class LoopInfo : public FunctionPass {
/// function.
///
typedef LoopInfoBase<BasicBlock, Loop>::iterator iterator;
typedef LoopInfoBase<BasicBlock, Loop>::reverse_iterator reverse_iterator;
inline iterator begin() const { return LI.begin(); }
inline iterator end() const { return LI.end(); }
inline reverse_iterator rbegin() const { return LI.rbegin(); }
inline reverse_iterator rend() const { return LI.rend(); }
bool empty() const { return LI.empty(); }
/// getLoopFor - Return the inner most loop that BB lives in. If a basic
@ -1074,27 +696,6 @@ template <> struct GraphTraits<Loop*> {
}
};
template<class BlockT, class LoopT>
void
LoopBase<BlockT, LoopT>::addBasicBlockToLoop(BlockT *NewBB,
LoopInfoBase<BlockT, LoopT> &LIB) {
assert((Blocks.empty() || LIB[getHeader()] == this) &&
"Incorrect LI specified for this loop!");
assert(NewBB && "Cannot add a null basic block to the loop!");
assert(LIB[NewBB] == 0 && "BasicBlock already in the loop!");
LoopT *L = static_cast<LoopT *>(this);
// Add the loop mapping to the LoopInfo object...
LIB.BBMap[NewBB] = L;
// Add the basic block to this loop and all parent loops...
while (L) {
L->Blocks.push_back(NewBB);
L = L->getParentLoop();
}
}
} // End llvm namespace
#endif

View File

@ -0,0 +1,570 @@
//===- llvm/Analysis/LoopInfoImpl.h - Natural Loop Calculator ---*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This is the generic implementation of LoopInfo used for both Loops and
// MachineLoops.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_ANALYSIS_LOOP_INFO_IMPL_H
#define LLVM_ANALYSIS_LOOP_INFO_IMPL_H
#include "llvm/Analysis/LoopInfo.h"
#include "llvm/ADT/PostOrderIterator.h"
namespace llvm {
//===----------------------------------------------------------------------===//
// APIs for simple analysis of the loop. See header notes.
/// getExitingBlocks - Return all blocks inside the loop that have successors
/// outside of the loop. These are the blocks _inside of the current loop_
/// which branch out. The returned list is always unique.
///
template<class BlockT, class LoopT>
void LoopBase<BlockT, LoopT>::
getExitingBlocks(SmallVectorImpl<BlockT *> &ExitingBlocks) const {
// Sort the blocks vector so that we can use binary search to do quick
// lookups.
SmallVector<BlockT*, 128> LoopBBs(block_begin(), block_end());
std::sort(LoopBBs.begin(), LoopBBs.end());
typedef GraphTraits<BlockT*> BlockTraits;
for (block_iterator BI = block_begin(), BE = block_end(); BI != BE; ++BI)
for (typename BlockTraits::ChildIteratorType I =
BlockTraits::child_begin(*BI), E = BlockTraits::child_end(*BI);
I != E; ++I)
if (!std::binary_search(LoopBBs.begin(), LoopBBs.end(), *I)) {
// Not in current loop? It must be an exit block.
ExitingBlocks.push_back(*BI);
break;
}
}
/// getExitingBlock - If getExitingBlocks would return exactly one block,
/// return that block. Otherwise return null.
template<class BlockT, class LoopT>
BlockT *LoopBase<BlockT, LoopT>::getExitingBlock() const {
SmallVector<BlockT*, 8> ExitingBlocks;
getExitingBlocks(ExitingBlocks);
if (ExitingBlocks.size() == 1)
return ExitingBlocks[0];
return 0;
}
/// getExitBlocks - Return all of the successor blocks of this loop. These
/// are the blocks _outside of the current loop_ which are branched to.
///
template<class BlockT, class LoopT>
void LoopBase<BlockT, LoopT>::
getExitBlocks(SmallVectorImpl<BlockT*> &ExitBlocks) const {
// Sort the blocks vector so that we can use binary search to do quick
// lookups.
SmallVector<BlockT*, 128> LoopBBs(block_begin(), block_end());
std::sort(LoopBBs.begin(), LoopBBs.end());
typedef GraphTraits<BlockT*> BlockTraits;
for (block_iterator BI = block_begin(), BE = block_end(); BI != BE; ++BI)
for (typename BlockTraits::ChildIteratorType I =
BlockTraits::child_begin(*BI), E = BlockTraits::child_end(*BI);
I != E; ++I)
if (!std::binary_search(LoopBBs.begin(), LoopBBs.end(), *I))
// Not in current loop? It must be an exit block.
ExitBlocks.push_back(*I);
}
/// getExitBlock - If getExitBlocks would return exactly one block,
/// return that block. Otherwise return null.
template<class BlockT, class LoopT>
BlockT *LoopBase<BlockT, LoopT>::getExitBlock() const {
SmallVector<BlockT*, 8> ExitBlocks;
getExitBlocks(ExitBlocks);
if (ExitBlocks.size() == 1)
return ExitBlocks[0];
return 0;
}
/// getExitEdges - Return all pairs of (_inside_block_,_outside_block_).
template<class BlockT, class LoopT>
void LoopBase<BlockT, LoopT>::
getExitEdges(SmallVectorImpl<Edge> &ExitEdges) const {
// Sort the blocks vector so that we can use binary search to do quick
// lookups.
SmallVector<BlockT*, 128> LoopBBs(block_begin(), block_end());
array_pod_sort(LoopBBs.begin(), LoopBBs.end());
typedef GraphTraits<BlockT*> BlockTraits;
for (block_iterator BI = block_begin(), BE = block_end(); BI != BE; ++BI)
for (typename BlockTraits::ChildIteratorType I =
BlockTraits::child_begin(*BI), E = BlockTraits::child_end(*BI);
I != E; ++I)
if (!std::binary_search(LoopBBs.begin(), LoopBBs.end(), *I))
// Not in current loop? It must be an exit block.
ExitEdges.push_back(Edge(*BI, *I));
}
/// getLoopPreheader - If there is a preheader for this loop, return it. A
/// loop has a preheader if there is only one edge to the header of the loop
/// from outside of the loop. If this is the case, the block branching to the
/// header of the loop is the preheader node.
///
/// This method returns null if there is no preheader for the loop.
///
template<class BlockT, class LoopT>
BlockT *LoopBase<BlockT, LoopT>::getLoopPreheader() const {
// Keep track of nodes outside the loop branching to the header...
BlockT *Out = getLoopPredecessor();
if (!Out) return 0;
// Make sure there is only one exit out of the preheader.
typedef GraphTraits<BlockT*> BlockTraits;
typename BlockTraits::ChildIteratorType SI = BlockTraits::child_begin(Out);
++SI;
if (SI != BlockTraits::child_end(Out))
return 0; // Multiple exits from the block, must not be a preheader.
// The predecessor has exactly one successor, so it is a preheader.
return Out;
}
/// getLoopPredecessor - If the given loop's header has exactly one unique
/// predecessor outside the loop, return it. Otherwise return null.
/// This is less strict that the loop "preheader" concept, which requires
/// the predecessor to have exactly one successor.
///
template<class BlockT, class LoopT>
BlockT *LoopBase<BlockT, LoopT>::getLoopPredecessor() const {
// Keep track of nodes outside the loop branching to the header...
BlockT *Out = 0;
// Loop over the predecessors of the header node...
BlockT *Header = getHeader();
typedef GraphTraits<BlockT*> BlockTraits;
typedef GraphTraits<Inverse<BlockT*> > InvBlockTraits;
for (typename InvBlockTraits::ChildIteratorType PI =
InvBlockTraits::child_begin(Header),
PE = InvBlockTraits::child_end(Header); PI != PE; ++PI) {
typename InvBlockTraits::NodeType *N = *PI;
if (!contains(N)) { // If the block is not in the loop...
if (Out && Out != N)
return 0; // Multiple predecessors outside the loop
Out = N;
}
}
// Make sure there is only one exit out of the preheader.
assert(Out && "Header of loop has no predecessors from outside loop?");
return Out;
}
/// getLoopLatch - If there is a single latch block for this loop, return it.
/// A latch block is a block that contains a branch back to the header.
template<class BlockT, class LoopT>
BlockT *LoopBase<BlockT, LoopT>::getLoopLatch() const {
BlockT *Header = getHeader();
typedef GraphTraits<Inverse<BlockT*> > InvBlockTraits;
typename InvBlockTraits::ChildIteratorType PI =
InvBlockTraits::child_begin(Header);
typename InvBlockTraits::ChildIteratorType PE =
InvBlockTraits::child_end(Header);
BlockT *Latch = 0;
for (; PI != PE; ++PI) {
typename InvBlockTraits::NodeType *N = *PI;
if (contains(N)) {
if (Latch) return 0;
Latch = N;
}
}
return Latch;
}
//===----------------------------------------------------------------------===//
// APIs for updating loop information after changing the CFG
//
/// addBasicBlockToLoop - This method is used by other analyses to update loop
/// information. NewBB is set to be a new member of the current loop.
/// Because of this, it is added as a member of all parent loops, and is added
/// to the specified LoopInfo object as being in the current basic block. It
/// is not valid to replace the loop header with this method.
///
template<class BlockT, class LoopT>
void LoopBase<BlockT, LoopT>::
addBasicBlockToLoop(BlockT *NewBB, LoopInfoBase<BlockT, LoopT> &LIB) {
assert((Blocks.empty() || LIB[getHeader()] == this) &&
"Incorrect LI specified for this loop!");
assert(NewBB && "Cannot add a null basic block to the loop!");
assert(LIB[NewBB] == 0 && "BasicBlock already in the loop!");
LoopT *L = static_cast<LoopT *>(this);
// Add the loop mapping to the LoopInfo object...
LIB.BBMap[NewBB] = L;
// Add the basic block to this loop and all parent loops...
while (L) {
L->Blocks.push_back(NewBB);
L = L->getParentLoop();
}
}
/// replaceChildLoopWith - This is used when splitting loops up. It replaces
/// the OldChild entry in our children list with NewChild, and updates the
/// parent pointer of OldChild to be null and the NewChild to be this loop.
/// This updates the loop depth of the new child.
template<class BlockT, class LoopT>
void LoopBase<BlockT, LoopT>::
replaceChildLoopWith(LoopT *OldChild, LoopT *NewChild) {
assert(OldChild->ParentLoop == this && "This loop is already broken!");
assert(NewChild->ParentLoop == 0 && "NewChild already has a parent!");
typename std::vector<LoopT *>::iterator I =
std::find(SubLoops.begin(), SubLoops.end(), OldChild);
assert(I != SubLoops.end() && "OldChild not in loop!");
*I = NewChild;
OldChild->ParentLoop = 0;
NewChild->ParentLoop = static_cast<LoopT *>(this);
}
/// verifyLoop - Verify loop structure
template<class BlockT, class LoopT>
void LoopBase<BlockT, LoopT>::verifyLoop() const {
#ifndef NDEBUG
assert(!Blocks.empty() && "Loop header is missing");
// Setup for using a depth-first iterator to visit every block in the loop.
SmallVector<BlockT*, 8> ExitBBs;
getExitBlocks(ExitBBs);
llvm::SmallPtrSet<BlockT*, 8> VisitSet;
VisitSet.insert(ExitBBs.begin(), ExitBBs.end());
df_ext_iterator<BlockT*, llvm::SmallPtrSet<BlockT*, 8> >
BI = df_ext_begin(getHeader(), VisitSet),
BE = df_ext_end(getHeader(), VisitSet);
// Keep track of the number of BBs visited.
unsigned NumVisited = 0;
// Sort the blocks vector so that we can use binary search to do quick
// lookups.
SmallVector<BlockT*, 128> LoopBBs(block_begin(), block_end());
std::sort(LoopBBs.begin(), LoopBBs.end());
// Check the individual blocks.
for ( ; BI != BE; ++BI) {
BlockT *BB = *BI;
bool HasInsideLoopSuccs = false;
bool HasInsideLoopPreds = false;
SmallVector<BlockT *, 2> OutsideLoopPreds;
typedef GraphTraits<BlockT*> BlockTraits;
for (typename BlockTraits::ChildIteratorType SI =
BlockTraits::child_begin(BB), SE = BlockTraits::child_end(BB);
SI != SE; ++SI)
if (std::binary_search(LoopBBs.begin(), LoopBBs.end(), *SI)) {
HasInsideLoopSuccs = true;
break;
}
typedef GraphTraits<Inverse<BlockT*> > InvBlockTraits;
for (typename InvBlockTraits::ChildIteratorType PI =
InvBlockTraits::child_begin(BB), PE = InvBlockTraits::child_end(BB);
PI != PE; ++PI) {
BlockT *N = *PI;
if (std::binary_search(LoopBBs.begin(), LoopBBs.end(), N))
HasInsideLoopPreds = true;
else
OutsideLoopPreds.push_back(N);
}
if (BB == getHeader()) {
assert(!OutsideLoopPreds.empty() && "Loop is unreachable!");
} else if (!OutsideLoopPreds.empty()) {
// A non-header loop shouldn't be reachable from outside the loop,
// though it is permitted if the predecessor is not itself actually
// reachable.
BlockT *EntryBB = BB->getParent()->begin();
for (df_iterator<BlockT *> NI = df_begin(EntryBB),
NE = df_end(EntryBB); NI != NE; ++NI)
for (unsigned i = 0, e = OutsideLoopPreds.size(); i != e; ++i)
assert(*NI != OutsideLoopPreds[i] &&
"Loop has multiple entry points!");
}
assert(HasInsideLoopPreds && "Loop block has no in-loop predecessors!");
assert(HasInsideLoopSuccs && "Loop block has no in-loop successors!");
assert(BB != getHeader()->getParent()->begin() &&
"Loop contains function entry block!");
NumVisited++;
}
assert(NumVisited == getNumBlocks() && "Unreachable block in loop");
// Check the subloops.
for (iterator I = begin(), E = end(); I != E; ++I)
// Each block in each subloop should be contained within this loop.
for (block_iterator BI = (*I)->block_begin(), BE = (*I)->block_end();
BI != BE; ++BI) {
assert(std::binary_search(LoopBBs.begin(), LoopBBs.end(), *BI) &&
"Loop does not contain all the blocks of a subloop!");
}
// Check the parent loop pointer.
if (ParentLoop) {
assert(std::find(ParentLoop->begin(), ParentLoop->end(), this) !=
ParentLoop->end() &&
"Loop is not a subloop of its parent!");
}
#endif
}
/// verifyLoop - Verify loop structure of this loop and all nested loops.
template<class BlockT, class LoopT>
void LoopBase<BlockT, LoopT>::verifyLoopNest(
DenseSet<const LoopT*> *Loops) const {
Loops->insert(static_cast<const LoopT *>(this));
// Verify this loop.
verifyLoop();
// Verify the subloops.
for (iterator I = begin(), E = end(); I != E; ++I)
(*I)->verifyLoopNest(Loops);
}
template<class BlockT, class LoopT>
void LoopBase<BlockT, LoopT>::print(raw_ostream &OS, unsigned Depth) const {
OS.indent(Depth*2) << "Loop at depth " << getLoopDepth()
<< " containing: ";
for (unsigned i = 0; i < getBlocks().size(); ++i) {
if (i) OS << ",";
BlockT *BB = getBlocks()[i];
WriteAsOperand(OS, BB, false);
if (BB == getHeader()) OS << "<header>";
if (BB == getLoopLatch()) OS << "<latch>";
if (isLoopExiting(BB)) OS << "<exiting>";
}
OS << "\n";
for (iterator I = begin(), E = end(); I != E; ++I)
(*I)->print(OS, Depth+2);
}
//===----------------------------------------------------------------------===//
/// Stable LoopInfo Analysis - Build a loop tree using stable iterators so the
/// result does / not depend on use list (block predecessor) order.
///
/// Discover a subloop with the specified backedges such that: All blocks within
/// this loop are mapped to this loop or a subloop. And all subloops within this
/// loop have their parent loop set to this loop or a subloop.
template<class BlockT, class LoopT>
static void discoverAndMapSubloop(LoopT *L, ArrayRef<BlockT*> Backedges,
LoopInfoBase<BlockT, LoopT> *LI,
DominatorTreeBase<BlockT> &DomTree) {
typedef GraphTraits<Inverse<BlockT*> > InvBlockTraits;
unsigned NumBlocks = 0;
unsigned NumSubloops = 0;
// Perform a backward CFG traversal using a worklist.
std::vector<BlockT *> ReverseCFGWorklist(Backedges.begin(), Backedges.end());
while (!ReverseCFGWorklist.empty()) {
BlockT *PredBB = ReverseCFGWorklist.back();
ReverseCFGWorklist.pop_back();
LoopT *Subloop = LI->getLoopFor(PredBB);
if (!Subloop) {
if (!DomTree.isReachableFromEntry(PredBB))
continue;
// This is an undiscovered block. Map it to the current loop.
LI->changeLoopFor(PredBB, L);
++NumBlocks;
if (PredBB == L->getHeader())
continue;
// Push all block predecessors on the worklist.
ReverseCFGWorklist.insert(ReverseCFGWorklist.end(),
InvBlockTraits::child_begin(PredBB),
InvBlockTraits::child_end(PredBB));
}
else {
// This is a discovered block. Find its outermost discovered loop.
while (LoopT *Parent = Subloop->getParentLoop())
Subloop = Parent;
// If it is already discovered to be a subloop of this loop, continue.
if (Subloop == L)
continue;
// Discover a subloop of this loop.
Subloop->setParentLoop(L);
++NumSubloops;
NumBlocks += Subloop->getBlocks().capacity();
PredBB = Subloop->getHeader();
// Continue traversal along predecessors that are not loop-back edges from
// within this subloop tree itself. Note that a predecessor may directly
// reach another subloop that is not yet discovered to be a subloop of
// this loop, which we must traverse.
for (typename InvBlockTraits::ChildIteratorType PI =
InvBlockTraits::child_begin(PredBB),
PE = InvBlockTraits::child_end(PredBB); PI != PE; ++PI) {
if (LI->getLoopFor(*PI) != Subloop)
ReverseCFGWorklist.push_back(*PI);
}
}
}
L->getSubLoopsVector().reserve(NumSubloops);
L->getBlocksVector().reserve(NumBlocks);
}
namespace {
/// Populate all loop data in a stable order during a single forward DFS.
template<class BlockT, class LoopT>
class PopulateLoopsDFS {
typedef GraphTraits<BlockT*> BlockTraits;
typedef typename BlockTraits::ChildIteratorType SuccIterTy;
LoopInfoBase<BlockT, LoopT> *LI;
DenseSet<const BlockT *> VisitedBlocks;
std::vector<std::pair<BlockT*, SuccIterTy> > DFSStack;
public:
PopulateLoopsDFS(LoopInfoBase<BlockT, LoopT> *li):
LI(li) {}
void traverse(BlockT *EntryBlock);
protected:
void insertIntoLoop(BlockT *Block);
BlockT *dfsSource() { return DFSStack.back().first; }
SuccIterTy &dfsSucc() { return DFSStack.back().second; }
SuccIterTy dfsSuccEnd() { return BlockTraits::child_end(dfsSource()); }
void pushBlock(BlockT *Block) {
DFSStack.push_back(std::make_pair(Block, BlockTraits::child_begin(Block)));
}
};
} // anonymous
/// Top-level driver for the forward DFS within the loop.
template<class BlockT, class LoopT>
void PopulateLoopsDFS<BlockT, LoopT>::traverse(BlockT *EntryBlock) {
pushBlock(EntryBlock);
VisitedBlocks.insert(EntryBlock);
while (!DFSStack.empty()) {
// Traverse the leftmost path as far as possible.
while (dfsSucc() != dfsSuccEnd()) {
BlockT *BB = *dfsSucc();
++dfsSucc();
if (!VisitedBlocks.insert(BB).second)
continue;
// Push the next DFS successor onto the stack.
pushBlock(BB);
}
// Visit the top of the stack in postorder and backtrack.
insertIntoLoop(dfsSource());
DFSStack.pop_back();
}
}
/// Add a single Block to its ancestor loops in PostOrder. If the block is a
/// subloop header, add the subloop to its parent in PostOrder, then reverse the
/// Block and Subloop vectors of the now complete subloop to achieve RPO.
template<class BlockT, class LoopT>
void PopulateLoopsDFS<BlockT, LoopT>::insertIntoLoop(BlockT *Block) {
LoopT *Subloop = LI->getLoopFor(Block);
if (Subloop && Block == Subloop->getHeader()) {
// We reach this point once per subloop after processing all the blocks in
// the subloop.
if (Subloop->getParentLoop())
Subloop->getParentLoop()->getSubLoopsVector().push_back(Subloop);
else
LI->addTopLevelLoop(Subloop);
// For convenience, Blocks and Subloops are inserted in postorder. Reverse
// the lists, except for the loop header, which is always at the beginning.
std::reverse(Subloop->getBlocksVector().begin()+1,
Subloop->getBlocksVector().end());
std::reverse(Subloop->getSubLoopsVector().begin(),
Subloop->getSubLoopsVector().end());
Subloop = Subloop->getParentLoop();
}
for (; Subloop; Subloop = Subloop->getParentLoop())
Subloop->getBlocksVector().push_back(Block);
}
/// Analyze LoopInfo discovers loops during a postorder DominatorTree traversal
/// interleaved with backward CFG traversals within each subloop
/// (discoverAndMapSubloop). The backward traversal skips inner subloops, so
/// this part of the algorithm is linear in the number of CFG edges. Subloop and
/// Block vectors are then populated during a single forward CFG traversal
/// (PopulateLoopDFS).
///
/// During the two CFG traversals each block is seen three times:
/// 1) Discovered and mapped by a reverse CFG traversal.
/// 2) Visited during a forward DFS CFG traversal.
/// 3) Reverse-inserted in the loop in postorder following forward DFS.
///
/// The Block vectors are inclusive, so step 3 requires loop-depth number of
/// insertions per block.
template<class BlockT, class LoopT>
void LoopInfoBase<BlockT, LoopT>::
Analyze(DominatorTreeBase<BlockT> &DomTree) {
// Postorder traversal of the dominator tree.
DomTreeNodeBase<BlockT>* DomRoot = DomTree.getRootNode();
for (po_iterator<DomTreeNodeBase<BlockT>*> DomIter = po_begin(DomRoot),
DomEnd = po_end(DomRoot); DomIter != DomEnd; ++DomIter) {
BlockT *Header = DomIter->getBlock();
SmallVector<BlockT *, 4> Backedges;
// Check each predecessor of the potential loop header.
typedef GraphTraits<Inverse<BlockT*> > InvBlockTraits;
for (typename InvBlockTraits::ChildIteratorType PI =
InvBlockTraits::child_begin(Header),
PE = InvBlockTraits::child_end(Header); PI != PE; ++PI) {
BlockT *Backedge = *PI;
// If Header dominates predBB, this is a new loop. Collect the backedges.
if (DomTree.dominates(Header, Backedge)
&& DomTree.isReachableFromEntry(Backedge)) {
Backedges.push_back(Backedge);
}
}
// Perform a backward CFG traversal to discover and map blocks in this loop.
if (!Backedges.empty()) {
LoopT *L = new LoopT(Header);
discoverAndMapSubloop(L, ArrayRef<BlockT*>(Backedges), this, DomTree);
}
}
// Perform a single forward CFG traversal to populate block and subloop
// vectors for all loops.
PopulateLoopsDFS<BlockT, LoopT> DFS(this);
DFS.traverse(DomRoot->getBlock());
}
// Debugging
template<class BlockT, class LoopT>
void LoopInfoBase<BlockT, LoopT>::print(raw_ostream &OS) const {
for (unsigned i = 0; i < TopLevelLoops.size(); ++i)
TopLevelLoops[i]->print(OS);
#if 0
for (DenseMap<BasicBlock*, LoopT*>::const_iterator I = BBMap.begin(),
E = BBMap.end(); I != E; ++I)
OS << "BB '" << I->first->getName() << "' level = "
<< I->second->getLoopDepth() << "\n";
#endif
}
} // End llvm namespace
#endif

View File

@ -109,6 +109,16 @@ class LoopBlocksDFS {
}
};
/// Specialize po_iterator_storage to record postorder numbers.
template<> class po_iterator_storage<LoopBlocksTraversal, true> {
LoopBlocksTraversal &LBT;
public:
po_iterator_storage(LoopBlocksTraversal &lbs) : LBT(lbs) {}
// These functions are defined below.
bool insertEdge(BasicBlock *From, BasicBlock *To);
void finishPostorder(BasicBlock *BB);
};
/// Traverse the blocks in a loop using a depth-first search.
class LoopBlocksTraversal {
public:
@ -155,31 +165,17 @@ class LoopBlocksTraversal {
DFS.PostBlocks.push_back(BB);
DFS.PostNumbers[BB] = DFS.PostBlocks.size();
}
//===----------------------------------------------------------------------
// Implement part of the std::set interface for the purpose of driving the
// generic po_iterator.
/// Return true if the block is outside the loop or has already been visited.
/// Sorry if this is counterintuitive.
bool count(BasicBlock *BB) const {
return !DFS.L->contains(LI->getLoopFor(BB)) || DFS.PostNumbers.count(BB);
}
/// If this block is contained in the loop and has not been visited, return
/// true and assign a preorder number. This is a proxy for visitPreorder
/// called by POIterator.
bool insert(BasicBlock *BB) {
return visitPreorder(BB);
}
};
/// Specialize DFSetTraits to record postorder numbers.
template<> struct DFSetTraits<LoopBlocksTraversal> {
static void finishPostorder(BasicBlock *BB, LoopBlocksTraversal& LBT) {
LBT.finishPostorder(BB);
}
};
inline bool po_iterator_storage<LoopBlocksTraversal, true>::
insertEdge(BasicBlock *From, BasicBlock *To) {
return LBT.visitPreorder(To);
}
inline void po_iterator_storage<LoopBlocksTraversal, true>::
finishPostorder(BasicBlock *BB) {
LBT.finishPostorder(BB);
}
} // End namespace llvm

View File

@ -15,6 +15,15 @@
#ifndef LLVM_ANALYSIS_MEMORYBUILTINS_H
#define LLVM_ANALYSIS_MEMORYBUILTINS_H
#include "llvm/IRBuilder.h"
#include "llvm/Operator.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/Support/DataTypes.h"
#include "llvm/Support/InstVisitor.h"
#include "llvm/Support/TargetFolder.h"
#include "llvm/Support/ValueHandle.h"
namespace llvm {
class CallInst;
class PointerType;
@ -22,24 +31,44 @@ class TargetData;
class Type;
class Value;
/// \brief Tests if a value is a call or invoke to a library function that
/// allocates or reallocates memory (either malloc, calloc, realloc, or strdup
/// like).
bool isAllocationFn(const Value *V, bool LookThroughBitCast = false);
/// \brief Tests if a value is a call or invoke to a function that returns a
/// NoAlias pointer (including malloc/calloc/realloc/strdup-like functions).
bool isNoAliasFn(const Value *V, bool LookThroughBitCast = false);
/// \brief Tests if a value is a call or invoke to a library function that
/// allocates uninitialized memory (such as malloc).
bool isMallocLikeFn(const Value *V, bool LookThroughBitCast = false);
/// \brief Tests if a value is a call or invoke to a library function that
/// allocates zero-filled memory (such as calloc).
bool isCallocLikeFn(const Value *V, bool LookThroughBitCast = false);
/// \brief Tests if a value is a call or invoke to a library function that
/// allocates memory (either malloc, calloc, or strdup like).
bool isAllocLikeFn(const Value *V, bool LookThroughBitCast = false);
/// \brief Tests if a value is a call or invoke to a library function that
/// reallocates memory (such as realloc).
bool isReallocLikeFn(const Value *V, bool LookThroughBitCast = false);
//===----------------------------------------------------------------------===//
// malloc Call Utility Functions.
//
/// isMalloc - Returns true if the value is either a malloc call or a bitcast of
/// the result of a malloc call
bool isMalloc(const Value *I);
/// extractMallocCall - Returns the corresponding CallInst if the instruction
/// is a malloc call. Since CallInst::CreateMalloc() only creates calls, we
/// ignore InvokeInst here.
const CallInst *extractMallocCall(const Value *I);
CallInst *extractMallocCall(Value *I);
/// extractMallocCallFromBitCast - Returns the corresponding CallInst if the
/// instruction is a bitcast of the result of a malloc call.
const CallInst *extractMallocCallFromBitCast(const Value *I);
CallInst *extractMallocCallFromBitCast(Value *I);
static inline CallInst *extractMallocCall(Value *I) {
return const_cast<CallInst*>(extractMallocCall((const Value*)I));
}
/// isArrayMalloc - Returns the corresponding CallInst if the instruction
/// is a call to malloc whose array size can be determined and the array size
@ -67,7 +96,20 @@ Type *getMallocAllocatedType(const CallInst *CI);
/// determined.
Value *getMallocArraySize(CallInst *CI, const TargetData *TD,
bool LookThroughSExt = false);
//===----------------------------------------------------------------------===//
// calloc Call Utility Functions.
//
/// extractCallocCall - Returns the corresponding CallInst if the instruction
/// is a calloc call.
const CallInst *extractCallocCall(const Value *I);
static inline CallInst *extractCallocCall(Value *I) {
return const_cast<CallInst*>(extractCallocCall((const Value*)I));
}
//===----------------------------------------------------------------------===//
// free Call Utility Functions.
//
@ -79,6 +121,131 @@ static inline CallInst *isFreeCall(Value *I) {
return const_cast<CallInst*>(isFreeCall((const Value*)I));
}
//===----------------------------------------------------------------------===//
// Utility functions to compute size of objects.
//
/// \brief Compute the size of the object pointed by Ptr. Returns true and the
/// object size in Size if successful, and false otherwise.
/// If RoundToAlign is true, then Size is rounded up to the aligment of allocas,
/// byval arguments, and global variables.
bool getObjectSize(const Value *Ptr, uint64_t &Size, const TargetData *TD,
bool RoundToAlign = false);
typedef std::pair<APInt, APInt> SizeOffsetType;
/// \brief Evaluate the size and offset of an object ponted by a Value*
/// statically. Fails if size or offset are not known at compile time.
class ObjectSizeOffsetVisitor
: public InstVisitor<ObjectSizeOffsetVisitor, SizeOffsetType> {
const TargetData *TD;
bool RoundToAlign;
unsigned IntTyBits;
APInt Zero;
APInt align(APInt Size, uint64_t Align);
SizeOffsetType unknown() {
return std::make_pair(APInt(), APInt());
}
public:
ObjectSizeOffsetVisitor(const TargetData *TD, LLVMContext &Context,
bool RoundToAlign = false);
SizeOffsetType compute(Value *V);
bool knownSize(SizeOffsetType &SizeOffset) {
return SizeOffset.first.getBitWidth() > 1;
}
bool knownOffset(SizeOffsetType &SizeOffset) {
return SizeOffset.second.getBitWidth() > 1;
}
bool bothKnown(SizeOffsetType &SizeOffset) {
return knownSize(SizeOffset) && knownOffset(SizeOffset);
}
SizeOffsetType visitAllocaInst(AllocaInst &I);
SizeOffsetType visitArgument(Argument &A);
SizeOffsetType visitCallSite(CallSite CS);
SizeOffsetType visitConstantPointerNull(ConstantPointerNull&);
SizeOffsetType visitExtractElementInst(ExtractElementInst &I);
SizeOffsetType visitExtractValueInst(ExtractValueInst &I);
SizeOffsetType visitGEPOperator(GEPOperator &GEP);
SizeOffsetType visitGlobalVariable(GlobalVariable &GV);
SizeOffsetType visitIntToPtrInst(IntToPtrInst&);
SizeOffsetType visitLoadInst(LoadInst &I);
SizeOffsetType visitPHINode(PHINode&);
SizeOffsetType visitSelectInst(SelectInst &I);
SizeOffsetType visitUndefValue(UndefValue&);
SizeOffsetType visitInstruction(Instruction &I);
};
typedef std::pair<Value*, Value*> SizeOffsetEvalType;
/// \brief Evaluate the size and offset of an object ponted by a Value*.
/// May create code to compute the result at run-time.
class ObjectSizeOffsetEvaluator
: public InstVisitor<ObjectSizeOffsetEvaluator, SizeOffsetEvalType> {
typedef IRBuilder<true, TargetFolder> BuilderTy;
typedef std::pair<WeakVH, WeakVH> WeakEvalType;
typedef DenseMap<const Value*, WeakEvalType> CacheMapTy;
typedef SmallPtrSet<const Value*, 8> PtrSetTy;
const TargetData *TD;
LLVMContext &Context;
BuilderTy Builder;
ObjectSizeOffsetVisitor Visitor;
IntegerType *IntTy;
Value *Zero;
CacheMapTy CacheMap;
PtrSetTy SeenVals;
SizeOffsetEvalType unknown() {
return std::make_pair((Value*)0, (Value*)0);
}
SizeOffsetEvalType compute_(Value *V);
public:
ObjectSizeOffsetEvaluator(const TargetData *TD, LLVMContext &Context);
SizeOffsetEvalType compute(Value *V);
bool knownSize(SizeOffsetEvalType SizeOffset) {
return SizeOffset.first;
}
bool knownOffset(SizeOffsetEvalType SizeOffset) {
return SizeOffset.second;
}
bool anyKnown(SizeOffsetEvalType SizeOffset) {
return knownSize(SizeOffset) || knownOffset(SizeOffset);
}
bool bothKnown(SizeOffsetEvalType SizeOffset) {
return knownSize(SizeOffset) && knownOffset(SizeOffset);
}
SizeOffsetEvalType visitAllocaInst(AllocaInst &I);
SizeOffsetEvalType visitCallSite(CallSite CS);
SizeOffsetEvalType visitExtractElementInst(ExtractElementInst &I);
SizeOffsetEvalType visitExtractValueInst(ExtractValueInst &I);
SizeOffsetEvalType visitGEPOperator(GEPOperator &GEP);
SizeOffsetEvalType visitIntToPtrInst(IntToPtrInst&);
SizeOffsetEvalType visitLoadInst(LoadInst &I);
SizeOffsetEvalType visitPHINode(PHINode &PHI);
SizeOffsetEvalType visitSelectInst(SelectInst &I);
SizeOffsetEvalType visitInstruction(Instruction &I);
};
} // End llvm namespace
#endif

View File

@ -124,11 +124,11 @@ namespace llvm {
}
/// isClobber - Return true if this MemDepResult represents a query that is
/// a instruction clobber dependency.
/// an instruction clobber dependency.
bool isClobber() const { return Value.getInt() == Clobber; }
/// isDef - Return true if this MemDepResult represents a query that is
/// a instruction definition dependency.
/// an instruction definition dependency.
bool isDef() const { return Value.getInt() == Def; }
/// isNonLocal - Return true if this MemDepResult represents a query that
@ -431,9 +431,6 @@ namespace llvm {
void RemoveCachedNonLocalPointerDependencies(ValueIsLoadPair P);
AliasAnalysis::ModRefResult
getModRefInfo(const Instruction *Inst, const AliasAnalysis::Location &Loc);
/// verifyRemoved - Verify that the specified instruction does not occur
/// in our internal data structures.
void verifyRemoved(Instruction *Inst) const;

View File

@ -28,19 +28,16 @@ class BasicBlock;
class ProfileInfoLoader {
const std::string &Filename;
Module &M;
std::vector<std::string> CommandLines;
std::vector<unsigned> FunctionCounts;
std::vector<unsigned> BlockCounts;
std::vector<unsigned> EdgeCounts;
std::vector<unsigned> OptimalEdgeCounts;
std::vector<unsigned> BBTrace;
bool Warned;
public:
// ProfileInfoLoader ctor - Read the specified profiling data file, exiting
// the program if the file is invalid or broken.
ProfileInfoLoader(const char *ToolName, const std::string &Filename,
Module &M);
ProfileInfoLoader(const char *ToolName, const std::string &Filename);
static const unsigned Uncounted;

View File

@ -473,25 +473,85 @@ class Region : public RegionNode {
const_iterator end() const { return children.end(); }
//@}
/// @name BasicBlock Iterators
/// @name BasicBlock Node Iterators
///
/// These iterators iterate over all BasicBlock RegionNodes that are
/// contained in this Region. The iterator also iterates over BasicBlocks
/// that are elements of a subregion of this Region. It is therefore called a
/// flat iterator.
/// contained in this Region. The iterator also iterates over BasicBlock
/// RegionNodes that are elements of a subregion of this Region. It is
/// therefore called a flat iterator.
//@{
typedef df_iterator<RegionNode*, SmallPtrSet<RegionNode*, 8>, false,
GraphTraits<FlatIt<RegionNode*> > > block_iterator;
GraphTraits<FlatIt<RegionNode*> > > block_node_iterator;
typedef df_iterator<const RegionNode*, SmallPtrSet<const RegionNode*, 8>,
false, GraphTraits<FlatIt<const RegionNode*> > >
const_block_iterator;
const_block_node_iterator;
block_iterator block_begin();
block_iterator block_end();
block_node_iterator block_node_begin();
block_node_iterator block_node_end();
const_block_iterator block_begin() const;
const_block_iterator block_end() const;
const_block_node_iterator block_node_begin() const;
const_block_node_iterator block_node_end() const;
//@}
/// @name BasicBlock Iterators
///
/// These iterators iterate over all BasicBlocks that are contained in this
/// Region. The iterator also iterates over BasicBlocks that are elements of
/// a subregion of this Region. It is therefore called a flat iterator.
//@{
template <bool IsConst>
class block_iterator_wrapper
: public df_iterator<typename conditional<IsConst,
const BasicBlock,
BasicBlock>::type*> {
typedef df_iterator<typename conditional<IsConst,
const BasicBlock,
BasicBlock>::type*>
super;
public:
typedef block_iterator_wrapper<IsConst> Self;
typedef typename super::pointer pointer;
// Construct the begin iterator.
block_iterator_wrapper(pointer Entry, pointer Exit) : super(df_begin(Entry))
{
// Mark the exit of the region as visited, so that the children of the
// exit and the exit itself, i.e. the block outside the region will never
// be visited.
super::Visited.insert(Exit);
}
// Construct the end iterator.
block_iterator_wrapper() : super(df_end<pointer>((BasicBlock *)0)) {}
/*implicit*/ block_iterator_wrapper(super I) : super(I) {}
// FIXME: Even a const_iterator returns a non-const BasicBlock pointer.
// This was introduced for backwards compatibility, but should
// be removed as soon as all users are fixed.
BasicBlock *operator*() const {
return const_cast<BasicBlock*>(super::operator*());
}
};
typedef block_iterator_wrapper<false> block_iterator;
typedef block_iterator_wrapper<true> const_block_iterator;
block_iterator block_begin() {
return block_iterator(getEntry(), getExit());
}
block_iterator block_end() {
return block_iterator();
}
const_block_iterator block_begin() const {
return const_block_iterator(getEntry(), getExit());
}
const_block_iterator block_end() const {
return const_block_iterator();
}
//@}
/// @name Element Iterators

View File

@ -30,7 +30,7 @@
#include "llvm/Support/Allocator.h"
#include "llvm/Support/ConstantRange.h"
#include "llvm/ADT/FoldingSet.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/DenseSet.h"
#include <map>
namespace llvm {
@ -250,6 +250,9 @@ namespace llvm {
///
ValueExprMapType ValueExprMap;
/// Mark predicate values currently being processed by isImpliedCond.
DenseSet<Value*> PendingLoopPredicates;
/// ExitLimit - Information about the number of loop iterations for
/// which a loop exit's branch condition evaluates to the not-taken path.
/// This is a temporary pair of exact and max expressions that are
@ -834,7 +837,8 @@ namespace llvm {
///
bool SimplifyICmpOperands(ICmpInst::Predicate &Pred,
const SCEV *&LHS,
const SCEV *&RHS);
const SCEV *&RHS,
unsigned Depth = 0);
/// getLoopDisposition - Return the "disposition" of the given SCEV with
/// respect to the given loop.

View File

@ -14,9 +14,9 @@
#ifndef LLVM_ANALYSIS_SCALAREVOLUTION_EXPANDER_H
#define LLVM_ANALYSIS_SCALAREVOLUTION_EXPANDER_H
#include "llvm/IRBuilder.h"
#include "llvm/Analysis/ScalarEvolutionExpressions.h"
#include "llvm/Analysis/ScalarEvolutionNormalization.h"
#include "llvm/Support/IRBuilder.h"
#include "llvm/Support/TargetFolder.h"
#include "llvm/Support/ValueHandle.h"
#include <set>
@ -24,6 +24,10 @@
namespace llvm {
class TargetLowering;
/// Return true if the given expression is safe to expand in the sense that
/// all materialized values are safe to speculate.
bool isSafeToExpand(const SCEV *S);
/// SCEVExpander - This class uses information about analyze scalars to
/// rewrite expressions in canonical form.
///

View File

@ -15,6 +15,7 @@
#define LLVM_ANALYSIS_SCALAREVOLUTION_EXPRESSIONS_H
#include "llvm/Analysis/ScalarEvolution.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/Support/ErrorHandling.h"
namespace llvm {
@ -493,6 +494,74 @@ namespace llvm {
llvm_unreachable("Invalid use of SCEVCouldNotCompute!");
}
};
/// Visit all nodes in the expression tree using worklist traversal.
///
/// Visitor implements:
/// // return true to follow this node.
/// bool follow(const SCEV *S);
/// // return true to terminate the search.
/// bool isDone();
template<typename SV>
class SCEVTraversal {
SV &Visitor;
SmallVector<const SCEV *, 8> Worklist;
SmallPtrSet<const SCEV *, 8> Visited;
void push(const SCEV *S) {
if (Visited.insert(S) && Visitor.follow(S))
Worklist.push_back(S);
}
public:
SCEVTraversal(SV& V): Visitor(V) {}
void visitAll(const SCEV *Root) {
push(Root);
while (!Worklist.empty() && !Visitor.isDone()) {
const SCEV *S = Worklist.pop_back_val();
switch (S->getSCEVType()) {
case scConstant:
case scUnknown:
break;
case scTruncate:
case scZeroExtend:
case scSignExtend:
push(cast<SCEVCastExpr>(S)->getOperand());
break;
case scAddExpr:
case scMulExpr:
case scSMaxExpr:
case scUMaxExpr:
case scAddRecExpr: {
const SCEVNAryExpr *NAry = cast<SCEVNAryExpr>(S);
for (SCEVNAryExpr::op_iterator I = NAry->op_begin(),
E = NAry->op_end(); I != E; ++I) {
push(*I);
}
break;
}
case scUDivExpr: {
const SCEVUDivExpr *UDiv = cast<SCEVUDivExpr>(S);
push(UDiv->getLHS());
push(UDiv->getRHS());
break;
}
case scCouldNotCompute:
llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!");
default:
llvm_unreachable("Unknown SCEV kind!");
}
}
}
};
/// Use SCEVTraversal to visit all nodes in the givien expression tree.
template<typename SV>
void visitAll(const SCEV *Root, SV& Visitor) {
SCEVTraversal<SV> T(Visitor);
T.visitAll(Root);
}
}
#endif

View File

@ -151,6 +151,14 @@ namespace llvm {
return GetUnderlyingObject(const_cast<Value *>(V), TD, MaxLookup);
}
/// GetUnderlyingObjects - This method is similar to GetUnderlyingObject
/// except that it can look through phi and select instructions and return
/// multiple objects.
void GetUnderlyingObjects(Value *V,
SmallVectorImpl<Value *> &Objects,
const TargetData *TD = 0,
unsigned MaxLookup = 6);
/// onlyUsedByLifetimeMarkers - Return true if the only users of this pointer
/// are lifetime markers.
bool onlyUsedByLifetimeMarkers(const Value *V);

View File

@ -16,6 +16,7 @@
#define LLVM_ATTRIBUTES_H
#include "llvm/Support/MathExtras.h"
#include "llvm/ADT/ArrayRef.h"
#include <cassert>
#include <string>
@ -45,14 +46,9 @@ class Attributes {
Attributes() : Bits(0) { }
explicit Attributes(uint64_t Val) : Bits(Val) { }
/*implicit*/ Attributes(Attribute::AttrConst Val) : Bits(Val.v) { }
Attributes(const Attributes &Attrs) : Bits(Attrs.Bits) { }
// This is a "safe bool() operator".
operator const void *() const { return Bits ? this : 0; }
bool isEmptyOrSingleton() const { return (Bits & (Bits - 1)) == 0; }
Attributes &operator = (const Attributes &Attrs) {
Bits = Attrs.Bits;
return *this;
}
bool operator == (const Attributes &Attrs) const {
return Bits == Attrs.Bits;
}
@ -138,6 +134,9 @@ DECLARE_LLVM_ATTRIBUTE(NonLazyBind,1U<<31) ///< Function is called early and/or
/// often, so lazy binding isn't
/// worthwhile.
DECLARE_LLVM_ATTRIBUTE(AddressSafety,1ULL<<32) ///< Address safety checking is on.
DECLARE_LLVM_ATTRIBUTE(IANSDialect,1ULL<<33) ///< Inline asm non-standard dialect.
/// When not set, ATT dialect assumed.
/// When set implies the Intel dialect.
#undef DECLARE_LLVM_ATTRIBUTE
@ -163,14 +162,16 @@ const AttrConst FunctionOnly = {NoReturn_i | NoUnwind_i | ReadNone_i |
ReadOnly_i | NoInline_i | AlwaysInline_i | OptimizeForSize_i |
StackProtect_i | StackProtectReq_i | NoRedZone_i | NoImplicitFloat_i |
Naked_i | InlineHint_i | StackAlignment_i |
UWTable_i | NonLazyBind_i | ReturnsTwice_i | AddressSafety_i};
UWTable_i | NonLazyBind_i | ReturnsTwice_i | AddressSafety_i |
IANSDialect_i};
/// @brief Parameter attributes that do not apply to vararg call arguments.
const AttrConst VarArgsIncompatible = {StructRet_i};
/// @brief Attributes that are mutually incompatible.
const AttrConst MutuallyIncompatible[4] = {
{ByVal_i | InReg_i | Nest_i | StructRet_i},
const AttrConst MutuallyIncompatible[5] = {
{ByVal_i | Nest_i | StructRet_i},
{ByVal_i | Nest_i | InReg_i },
{ZExt_i | SExt_i},
{ReadNone_i | ReadOnly_i},
{NoInline_i | AlwaysInline_i}
@ -222,6 +223,50 @@ inline unsigned getStackAlignmentFromAttrs(Attributes A) {
return 1U << ((StackAlign.Raw() >> 26) - 1);
}
/// This returns an integer containing an encoding of all the
/// LLVM attributes found in the given attribute bitset. Any
/// change to this encoding is a breaking change to bitcode
/// compatibility.
inline uint64_t encodeLLVMAttributesForBitcode(Attributes Attrs) {
// FIXME: It doesn't make sense to store the alignment information as an
// expanded out value, we should store it as a log2 value. However, we can't
// just change that here without breaking bitcode compatibility. If this ever
// becomes a problem in practice, we should introduce new tag numbers in the
// bitcode file and have those tags use a more efficiently encoded alignment
// field.
// Store the alignment in the bitcode as a 16-bit raw value instead of a
// 5-bit log2 encoded value. Shift the bits above the alignment up by
// 11 bits.
uint64_t EncodedAttrs = Attrs.Raw() & 0xffff;
if (Attrs & Attribute::Alignment)
EncodedAttrs |= (1ull << 16) <<
(((Attrs & Attribute::Alignment).Raw()-1) >> 16);
EncodedAttrs |= (Attrs.Raw() & (0xfffull << 21)) << 11;
return EncodedAttrs;
}
/// This returns an attribute bitset containing the LLVM attributes
/// that have been decoded from the given integer. This function
/// must stay in sync with 'encodeLLVMAttributesForBitcode'.
inline Attributes decodeLLVMAttributesForBitcode(uint64_t EncodedAttrs) {
// The alignment is stored as a 16-bit raw value from bits 31--16.
// We shift the bits above 31 down by 11 bits.
unsigned Alignment = (EncodedAttrs & (0xffffull << 16)) >> 16;
assert((!Alignment || isPowerOf2_32(Alignment)) &&
"Alignment must be a power of two.");
Attributes Attrs(EncodedAttrs & 0xffff);
if (Alignment)
Attrs |= Attribute::constructAlignmentFromInt(Alignment);
Attrs |= Attributes((EncodedAttrs & (0xfffull << 32)) >> 11);
return Attrs;
}
/// The set of Attributes set in Attributes is converted to a
/// string of equivalent mnemonics. This is, presumably, for writing out
@ -268,16 +313,8 @@ class AttrListPtr {
// Attribute List Construction and Mutation
//===--------------------------------------------------------------------===//
/// get - Return a Attributes list with the specified parameter in it.
static AttrListPtr get(const AttributeWithIndex *Attr, unsigned NumAttrs);
/// get - Return a Attribute list with the parameters specified by the
/// consecutive random access iterator range.
template <typename Iter>
static AttrListPtr get(const Iter &I, const Iter &E) {
if (I == E) return AttrListPtr(); // Empty list.
return get(&*I, static_cast<unsigned>(E-I));
}
/// get - Return a Attributes list with the specified parameters in it.
static AttrListPtr get(ArrayRef<AttributeWithIndex> Attrs);
/// addAttr - Add the specified attribute at the specified index to this
/// attribute list. Since attribute lists are immutable, this

View File

@ -47,14 +47,13 @@ class ArchiveMember : public ilist_node<ArchiveMember> {
/// characteristics of the member. The various "is" methods below provide
/// access to the flags. The flags are not user settable.
enum Flags {
CompressedFlag = 1, ///< Member is a normal compressed file
SVR4SymbolTableFlag = 2, ///< Member is a SVR4 symbol table
BSD4SymbolTableFlag = 4, ///< Member is a BSD4 symbol table
LLVMSymbolTableFlag = 8, ///< Member is an LLVM symbol table
BitcodeFlag = 16, ///< Member is bitcode
HasPathFlag = 64, ///< Member has a full or partial path
HasLongFilenameFlag = 128, ///< Member uses the long filename syntax
StringTableFlag = 256 ///< Member is an ar(1) format string table
SVR4SymbolTableFlag = 1, ///< Member is a SVR4 symbol table
BSD4SymbolTableFlag = 2, ///< Member is a BSD4 symbol table
LLVMSymbolTableFlag = 4, ///< Member is an LLVM symbol table
BitcodeFlag = 8, ///< Member is bitcode
HasPathFlag = 16, ///< Member has a full or partial path
HasLongFilenameFlag = 32, ///< Member uses the long filename syntax
StringTableFlag = 64 ///< Member is an ar(1) format string table
};
/// @}
@ -109,11 +108,6 @@ class ArchiveMember : public ilist_node<ArchiveMember> {
/// @brief Get the data content of the archive member
const char* getData() const { return data; }
/// This method determines if the member is a regular compressed file.
/// @returns true iff the archive member is a compressed regular file.
/// @brief Determine if the member is a compressed regular file.
bool isCompressed() const { return flags&CompressedFlag; }
/// @returns true iff the member is a SVR4 (non-LLVM) symbol table
/// @brief Determine if this member is a SVR4 symbol table.
bool isSVR4SymbolTable() const { return flags&SVR4SymbolTableFlag; }
@ -427,7 +421,6 @@ class Archive {
bool writeToDisk(
bool CreateSymbolTable=false, ///< Create Symbol table
bool TruncateNames=false, ///< Truncate the filename to 15 chars
bool Compress=false, ///< Compress files
std::string* ErrMessage=0 ///< If non-null, where error msg is set
);
@ -494,7 +487,6 @@ class Archive {
std::ofstream& ARFile, ///< The file to write member onto
bool CreateSymbolTable, ///< Should symbol table be created?
bool TruncateNames, ///< Should names be truncated to 11 chars?
bool ShouldCompress, ///< Should the member be compressed?
std::string* ErrMessage ///< If non-null, place were error msg is set
);

View File

@ -71,8 +71,8 @@ namespace llvm {
/// isBitcodeWrapper - Return true if the given bytes are the magic bytes
/// for an LLVM IR bitcode wrapper.
///
static inline bool isBitcodeWrapper(const unsigned char *BufPtr,
const unsigned char *BufEnd) {
inline bool isBitcodeWrapper(const unsigned char *BufPtr,
const unsigned char *BufEnd) {
// See if you can find the hidden message in the magic bytes :-).
// (Hint: it's a little-endian encoding.)
return BufPtr != BufEnd &&
@ -85,8 +85,8 @@ namespace llvm {
/// isRawBitcode - Return true if the given bytes are the magic bytes for
/// raw LLVM IR bitcode (without a wrapper).
///
static inline bool isRawBitcode(const unsigned char *BufPtr,
const unsigned char *BufEnd) {
inline bool isRawBitcode(const unsigned char *BufPtr,
const unsigned char *BufEnd) {
// These bytes sort of have a hidden message, but it's not in
// little-endian this time, and it's a little redundant.
return BufPtr != BufEnd &&
@ -99,8 +99,8 @@ namespace llvm {
/// isBitcode - Return true if the given bytes are the magic bytes for
/// LLVM IR bitcode, either with or without a wrapper.
///
static bool inline isBitcode(const unsigned char *BufPtr,
const unsigned char *BufEnd) {
inline bool isBitcode(const unsigned char *BufPtr,
const unsigned char *BufEnd) {
return isBitcodeWrapper(BufPtr, BufEnd) ||
isRawBitcode(BufPtr, BufEnd);
}
@ -121,9 +121,9 @@ namespace llvm {
/// BC file.
/// If 'VerifyBufferSize' is true, check that the buffer is large enough to
/// contain the whole bitcode file.
static inline bool SkipBitcodeWrapperHeader(const unsigned char *&BufPtr,
const unsigned char *&BufEnd,
bool VerifyBufferSize) {
inline bool SkipBitcodeWrapperHeader(const unsigned char *&BufPtr,
const unsigned char *&BufEnd,
bool VerifyBufferSize) {
enum {
KnownHeaderSize = 4*4, // Size of header we read.
OffsetField = 2*4, // Offset in bytes to Offset field.

View File

@ -354,6 +354,13 @@ namespace llvm {
void EmitLabelPlusOffset(const MCSymbol *Label, uint64_t Offset,
unsigned Size) const;
/// EmitLabelReference - Emit something like ".long Label"
/// where the size in bytes of the directive is specified by Size and Label
/// specifies the label.
void EmitLabelReference(const MCSymbol *Label, unsigned Size) const {
EmitLabelPlusOffset(Label, 0, Size);
}
//===------------------------------------------------------------------===//
// Dwarf Emission Helper Routines
//===------------------------------------------------------------------===//

View File

@ -28,6 +28,7 @@
#include "llvm/CodeGen/MachineBasicBlock.h"
#include "llvm/ADT/DenseMap.h"
#include <map>
namespace llvm {
@ -36,7 +37,7 @@ class MachineInstr;
class MachineLoopInfo;
class MachineDominatorTree;
class InstrItineraryData;
class ScheduleDAGInstrs;
class DefaultVLIWScheduler;
class SUnit;
class DFAPacketizer {
@ -77,6 +78,8 @@ class DFAPacketizer {
// reserveResources - Reserve the resources occupied by a machine
// instruction and change the current state to reflect that change.
void reserveResources(llvm::MachineInstr *MI);
const InstrItineraryData *getInstrItins() const { return InstrItins; }
};
// VLIWPacketizerList - Implements a simple VLIW packetizer using DFA. The
@ -87,20 +90,21 @@ class DFAPacketizer {
// and machine resource is marked as taken. If any dependency is found, a target
// API call is made to prune the dependence.
class VLIWPacketizerList {
protected:
const TargetMachine &TM;
const MachineFunction &MF;
const TargetInstrInfo *TII;
// Encapsulate data types not exposed to the target interface.
ScheduleDAGInstrs *SchedulerImpl;
// The VLIW Scheduler.
DefaultVLIWScheduler *VLIWScheduler;
protected:
// Vector of instructions assigned to the current packet.
std::vector<MachineInstr*> CurrentPacketMIs;
// DFA resource tracker.
DFAPacketizer *ResourceTracker;
// Scheduling units.
std::vector<SUnit> SUnits;
// Generate MI -> SU map.
std::map<MachineInstr*, SUnit*> MIToSUnit;
public:
VLIWPacketizerList(
@ -118,17 +122,32 @@ class VLIWPacketizerList {
DFAPacketizer *getResourceTracker() {return ResourceTracker;}
// addToPacket - Add MI to the current packet.
void addToPacket(MachineInstr *MI);
virtual MachineBasicBlock::iterator addToPacket(MachineInstr *MI) {
MachineBasicBlock::iterator MII = MI;
CurrentPacketMIs.push_back(MI);
ResourceTracker->reserveResources(MI);
return MII;
}
// endPacket - End the current packet.
void endPacket(MachineBasicBlock *MBB, MachineInstr *I);
void endPacket(MachineBasicBlock *MBB, MachineInstr *MI);
// initPacketizerState - perform initialization before packetizing
// an instruction. This function is supposed to be overrided by
// the target dependent packetizer.
virtual void initPacketizerState(void) { return; }
// ignorePseudoInstruction - Ignore bundling of pseudo instructions.
bool ignorePseudoInstruction(MachineInstr *I, MachineBasicBlock *MBB);
virtual bool ignorePseudoInstruction(MachineInstr *I,
MachineBasicBlock *MBB) {
return false;
}
// isSoloInstruction - return true if instruction I must end previous
// packet.
bool isSoloInstruction(MachineInstr *I);
// isSoloInstruction - return true if instruction MI can not be packetized
// with any other instruction, which means that MI itself is a packet.
virtual bool isSoloInstruction(MachineInstr *MI) {
return true;
}
// isLegalToPacketizeTogether - Is it legal to packetize SUI and SUJ
// together.
@ -141,6 +160,7 @@ class VLIWPacketizerList {
virtual bool isLegalToPruneDependencies(SUnit *SUI, SUnit *SUJ) {
return false;
}
};
}

View File

@ -46,7 +46,7 @@ class EdgeBundles : public MachineFunctionPass {
unsigned getNumBundles() const { return EC.getNumClasses(); }
/// getBlocks - Return an array of blocks that are connected to Bundle.
ArrayRef<unsigned> getBlocks(unsigned Bundle) { return Blocks[Bundle]; }
ArrayRef<unsigned> getBlocks(unsigned Bundle) const { return Blocks[Bundle]; }
/// getMachineFunction - Return the last machine function computed.
const MachineFunction *getMachineFunction() const { return MF; }

View File

@ -34,6 +34,7 @@ class MachineFrameInfo;
class MachineRegisterInfo;
class TargetData;
class TargetInstrInfo;
class TargetLibraryInfo;
class TargetLowering;
class TargetMachine;
class TargetRegisterClass;
@ -57,6 +58,7 @@ class FastISel {
const TargetInstrInfo &TII;
const TargetLowering &TLI;
const TargetRegisterInfo &TRI;
const TargetLibraryInfo *LibInfo;
/// The position of the last instruction for materializing constants
/// for use in the current block. It resets to EmitStartPt when it
@ -144,7 +146,8 @@ class FastISel {
virtual ~FastISel();
protected:
explicit FastISel(FunctionLoweringInfo &funcInfo);
explicit FastISel(FunctionLoweringInfo &funcInfo,
const TargetLibraryInfo *libInfo);
/// TargetSelectInstruction - This method is called by target-independent
/// code when the normal FastISel process fails to select an instruction.
@ -299,6 +302,15 @@ class FastISel {
unsigned Op1, bool Op1IsKill,
uint64_t Imm);
/// FastEmitInst_rrii - Emit a MachineInstr with two register operands,
/// two immediates operands, and a result register in the given register
/// class.
unsigned FastEmitInst_rrii(unsigned MachineInstOpcode,
const TargetRegisterClass *RC,
unsigned Op0, bool Op0IsKill,
unsigned Op1, bool Op1IsKill,
uint64_t Imm1, uint64_t Imm2);
/// FastEmitInst_i - Emit a MachineInstr with a single immediate
/// operand, and a result register in the given register class.
unsigned FastEmitInst_i(unsigned MachineInstrOpcode,

View File

@ -48,18 +48,18 @@ namespace llvm {
/// PointKind - The type of a collector-safe point.
///
enum PointKind {
Loop, //< Instr is a loop (backwards branch).
Return, //< Instr is a return instruction.
PreCall, //< Instr is a call instruction.
PostCall //< Instr is the return address of a call.
Loop, ///< Instr is a loop (backwards branch).
Return, ///< Instr is a return instruction.
PreCall, ///< Instr is a call instruction.
PostCall ///< Instr is the return address of a call.
};
}
/// GCPoint - Metadata for a collector-safe point in machine code.
///
struct GCPoint {
GC::PointKind Kind; //< The kind of the safe point.
MCSymbol *Label; //< A label.
GC::PointKind Kind; ///< The kind of the safe point.
MCSymbol *Label; ///< A label.
DebugLoc Loc;
GCPoint(GC::PointKind K, MCSymbol *L, DebugLoc DL)
@ -69,9 +69,10 @@ namespace llvm {
/// GCRoot - Metadata for a pointer to an object managed by the garbage
/// collector.
struct GCRoot {
int Num; //< Usually a frame index.
int StackOffset; //< Offset from the stack pointer.
const Constant *Metadata;//< Metadata straight from the call to llvm.gcroot.
int Num; ///< Usually a frame index.
int StackOffset; ///< Offset from the stack pointer.
const Constant *Metadata; ///< Metadata straight from the call
///< to llvm.gcroot.
GCRoot(int N, const Constant *MD) : Num(N), StackOffset(-1), Metadata(MD) {}
};

View File

@ -65,14 +65,14 @@ namespace llvm {
list_type Functions;
protected:
unsigned NeededSafePoints; //< Bitmask of required safe points.
bool CustomReadBarriers; //< Default is to insert loads.
bool CustomWriteBarriers; //< Default is to insert stores.
bool CustomRoots; //< Default is to pass through to backend.
bool CustomSafePoints; //< Default is to use NeededSafePoints
// to find safe points.
bool InitRoots; //< If set, roots are nulled during lowering.
bool UsesMetadata; //< If set, backend must emit metadata tables.
unsigned NeededSafePoints; ///< Bitmask of required safe points.
bool CustomReadBarriers; ///< Default is to insert loads.
bool CustomWriteBarriers; ///< Default is to insert stores.
bool CustomRoots; ///< Default is to pass through to backend.
bool CustomSafePoints; ///< Default is to use NeededSafePoints
///< to find safe points.
bool InitRoots; ///< If set, roots are nulled during lowering.
bool UsesMetadata; ///< If set, backend must emit metadata tables.
public:
GCStrategy();

View File

@ -37,87 +37,87 @@ namespace ISD {
/// and getMachineOpcode() member functions of SDNode.
///
enum NodeType {
// DELETED_NODE - This is an illegal value that is used to catch
// errors. This opcode is not a legal opcode for any node.
/// DELETED_NODE - This is an illegal value that is used to catch
/// errors. This opcode is not a legal opcode for any node.
DELETED_NODE,
// EntryToken - This is the marker used to indicate the start of the region.
/// EntryToken - This is the marker used to indicate the start of a region.
EntryToken,
// TokenFactor - This node takes multiple tokens as input and produces a
// single token result. This is used to represent the fact that the operand
// operators are independent of each other.
/// TokenFactor - This node takes multiple tokens as input and produces a
/// single token result. This is used to represent the fact that the operand
/// operators are independent of each other.
TokenFactor,
// AssertSext, AssertZext - These nodes record if a register contains a
// value that has already been zero or sign extended from a narrower type.
// These nodes take two operands. The first is the node that has already
// been extended, and the second is a value type node indicating the width
// of the extension
/// AssertSext, AssertZext - These nodes record if a register contains a
/// value that has already been zero or sign extended from a narrower type.
/// These nodes take two operands. The first is the node that has already
/// been extended, and the second is a value type node indicating the width
/// of the extension
AssertSext, AssertZext,
// Various leaf nodes.
/// Various leaf nodes.
BasicBlock, VALUETYPE, CONDCODE, Register, RegisterMask,
Constant, ConstantFP,
GlobalAddress, GlobalTLSAddress, FrameIndex,
JumpTable, ConstantPool, ExternalSymbol, BlockAddress,
// The address of the GOT
/// The address of the GOT
GLOBAL_OFFSET_TABLE,
// FRAMEADDR, RETURNADDR - These nodes represent llvm.frameaddress and
// llvm.returnaddress on the DAG. These nodes take one operand, the index
// of the frame or return address to return. An index of zero corresponds
// to the current function's frame or return address, an index of one to the
// parent's frame or return address, and so on.
/// FRAMEADDR, RETURNADDR - These nodes represent llvm.frameaddress and
/// llvm.returnaddress on the DAG. These nodes take one operand, the index
/// of the frame or return address to return. An index of zero corresponds
/// to the current function's frame or return address, an index of one to
/// the parent's frame or return address, and so on.
FRAMEADDR, RETURNADDR,
// FRAME_TO_ARGS_OFFSET - This node represents offset from frame pointer to
// first (possible) on-stack argument. This is needed for correct stack
// adjustment during unwind.
/// FRAME_TO_ARGS_OFFSET - This node represents offset from frame pointer to
/// first (possible) on-stack argument. This is needed for correct stack
/// adjustment during unwind.
FRAME_TO_ARGS_OFFSET,
// RESULT, OUTCHAIN = EXCEPTIONADDR(INCHAIN) - This node represents the
// address of the exception block on entry to an landing pad block.
/// RESULT, OUTCHAIN = EXCEPTIONADDR(INCHAIN) - This node represents the
/// address of the exception block on entry to an landing pad block.
EXCEPTIONADDR,
// RESULT, OUTCHAIN = LSDAADDR(INCHAIN) - This node represents the
// address of the Language Specific Data Area for the enclosing function.
/// RESULT, OUTCHAIN = LSDAADDR(INCHAIN) - This node represents the
/// address of the Language Specific Data Area for the enclosing function.
LSDAADDR,
// RESULT, OUTCHAIN = EHSELECTION(INCHAIN, EXCEPTION) - This node represents
// the selection index of the exception thrown.
/// RESULT, OUTCHAIN = EHSELECTION(INCHAIN, EXCEPTION) - This node
/// represents the selection index of the exception thrown.
EHSELECTION,
// OUTCHAIN = EH_RETURN(INCHAIN, OFFSET, HANDLER) - This node represents
// 'eh_return' gcc dwarf builtin, which is used to return from
// exception. The general meaning is: adjust stack by OFFSET and pass
// execution to HANDLER. Many platform-related details also :)
/// OUTCHAIN = EH_RETURN(INCHAIN, OFFSET, HANDLER) - This node represents
/// 'eh_return' gcc dwarf builtin, which is used to return from
/// exception. The general meaning is: adjust stack by OFFSET and pass
/// execution to HANDLER. Many platform-related details also :)
EH_RETURN,
// RESULT, OUTCHAIN = EH_SJLJ_SETJMP(INCHAIN, buffer)
// This corresponds to the eh.sjlj.setjmp intrinsic.
// It takes an input chain and a pointer to the jump buffer as inputs
// and returns an outchain.
/// RESULT, OUTCHAIN = EH_SJLJ_SETJMP(INCHAIN, buffer)
/// This corresponds to the eh.sjlj.setjmp intrinsic.
/// It takes an input chain and a pointer to the jump buffer as inputs
/// and returns an outchain.
EH_SJLJ_SETJMP,
// OUTCHAIN = EH_SJLJ_LONGJMP(INCHAIN, buffer)
// This corresponds to the eh.sjlj.longjmp intrinsic.
// It takes an input chain and a pointer to the jump buffer as inputs
// and returns an outchain.
/// OUTCHAIN = EH_SJLJ_LONGJMP(INCHAIN, buffer)
/// This corresponds to the eh.sjlj.longjmp intrinsic.
/// It takes an input chain and a pointer to the jump buffer as inputs
/// and returns an outchain.
EH_SJLJ_LONGJMP,
// TargetConstant* - Like Constant*, but the DAG does not do any folding,
// simplification, or lowering of the constant. They are used for constants
// which are known to fit in the immediate fields of their users, or for
// carrying magic numbers which are not values which need to be materialized
// in registers.
/// TargetConstant* - Like Constant*, but the DAG does not do any folding,
/// simplification, or lowering of the constant. They are used for constants
/// which are known to fit in the immediate fields of their users, or for
/// carrying magic numbers which are not values which need to be
/// materialized in registers.
TargetConstant,
TargetConstantFP,
// TargetGlobalAddress - Like GlobalAddress, but the DAG does no folding or
// anything else with this node, and this is valid in the target-specific
// dag, turning into a GlobalAddress operand.
/// TargetGlobalAddress - Like GlobalAddress, but the DAG does no folding or
/// anything else with this node, and this is valid in the target-specific
/// dag, turning into a GlobalAddress operand.
TargetGlobalAddress,
TargetGlobalTLSAddress,
TargetFrameIndex,
@ -126,6 +126,11 @@ namespace ISD {
TargetExternalSymbol,
TargetBlockAddress,
/// TargetIndex - Like a constant pool entry, but with completely
/// target-dependent semantics. Holds target flags, a 32-bit index, and a
/// 64-bit index. Targets can use this however they like.
TargetIndex,
/// RESULT = INTRINSIC_WO_CHAIN(INTRINSICID, arg1, arg2, ...)
/// This node represents a target intrinsic function with no side effects.
/// The first operand is the ID number of the intrinsic from the
@ -148,93 +153,94 @@ namespace ISD {
/// namespace. The operands to the intrinsic follow.
INTRINSIC_VOID,
// CopyToReg - This node has three operands: a chain, a register number to
// set to this value, and a value.
/// CopyToReg - This node has three operands: a chain, a register number to
/// set to this value, and a value.
CopyToReg,
// CopyFromReg - This node indicates that the input value is a virtual or
// physical register that is defined outside of the scope of this
// SelectionDAG. The register is available from the RegisterSDNode object.
/// CopyFromReg - This node indicates that the input value is a virtual or
/// physical register that is defined outside of the scope of this
/// SelectionDAG. The register is available from the RegisterSDNode object.
CopyFromReg,
// UNDEF - An undefined node
/// UNDEF - An undefined node.
UNDEF,
// EXTRACT_ELEMENT - This is used to get the lower or upper (determined by
// a Constant, which is required to be operand #1) half of the integer or
// float value specified as operand #0. This is only for use before
// legalization, for values that will be broken into multiple registers.
/// EXTRACT_ELEMENT - This is used to get the lower or upper (determined by
/// a Constant, which is required to be operand #1) half of the integer or
/// float value specified as operand #0. This is only for use before
/// legalization, for values that will be broken into multiple registers.
EXTRACT_ELEMENT,
// BUILD_PAIR - This is the opposite of EXTRACT_ELEMENT in some ways. Given
// two values of the same integer value type, this produces a value twice as
// big. Like EXTRACT_ELEMENT, this can only be used before legalization.
/// BUILD_PAIR - This is the opposite of EXTRACT_ELEMENT in some ways.
/// Given two values of the same integer value type, this produces a value
/// twice as big. Like EXTRACT_ELEMENT, this can only be used before
/// legalization.
BUILD_PAIR,
// MERGE_VALUES - This node takes multiple discrete operands and returns
// them all as its individual results. This nodes has exactly the same
// number of inputs and outputs. This node is useful for some pieces of the
// code generator that want to think about a single node with multiple
// results, not multiple nodes.
/// MERGE_VALUES - This node takes multiple discrete operands and returns
/// them all as its individual results. This nodes has exactly the same
/// number of inputs and outputs. This node is useful for some pieces of the
/// code generator that want to think about a single node with multiple
/// results, not multiple nodes.
MERGE_VALUES,
// Simple integer binary arithmetic operators.
/// Simple integer binary arithmetic operators.
ADD, SUB, MUL, SDIV, UDIV, SREM, UREM,
// SMUL_LOHI/UMUL_LOHI - Multiply two integers of type iN, producing
// a signed/unsigned value of type i[2*N], and return the full value as
// two results, each of type iN.
/// SMUL_LOHI/UMUL_LOHI - Multiply two integers of type iN, producing
/// a signed/unsigned value of type i[2*N], and return the full value as
/// two results, each of type iN.
SMUL_LOHI, UMUL_LOHI,
// SDIVREM/UDIVREM - Divide two integers and produce both a quotient and
// remainder result.
/// SDIVREM/UDIVREM - Divide two integers and produce both a quotient and
/// remainder result.
SDIVREM, UDIVREM,
// CARRY_FALSE - This node is used when folding other nodes,
// like ADDC/SUBC, which indicate the carry result is always false.
/// CARRY_FALSE - This node is used when folding other nodes,
/// like ADDC/SUBC, which indicate the carry result is always false.
CARRY_FALSE,
// Carry-setting nodes for multiple precision addition and subtraction.
// These nodes take two operands of the same value type, and produce two
// results. The first result is the normal add or sub result, the second
// result is the carry flag result.
/// Carry-setting nodes for multiple precision addition and subtraction.
/// These nodes take two operands of the same value type, and produce two
/// results. The first result is the normal add or sub result, the second
/// result is the carry flag result.
ADDC, SUBC,
// Carry-using nodes for multiple precision addition and subtraction. These
// nodes take three operands: The first two are the normal lhs and rhs to
// the add or sub, and the third is the input carry flag. These nodes
// produce two results; the normal result of the add or sub, and the output
// carry flag. These nodes both read and write a carry flag to allow them
// to them to be chained together for add and sub of arbitrarily large
// values.
/// Carry-using nodes for multiple precision addition and subtraction. These
/// nodes take three operands: The first two are the normal lhs and rhs to
/// the add or sub, and the third is the input carry flag. These nodes
/// produce two results; the normal result of the add or sub, and the output
/// carry flag. These nodes both read and write a carry flag to allow them
/// to them to be chained together for add and sub of arbitrarily large
/// values.
ADDE, SUBE,
// RESULT, BOOL = [SU]ADDO(LHS, RHS) - Overflow-aware nodes for addition.
// These nodes take two operands: the normal LHS and RHS to the add. They
// produce two results: the normal result of the add, and a boolean that
// indicates if an overflow occurred (*not* a flag, because it may be stored
// to memory, etc.). If the type of the boolean is not i1 then the high
// bits conform to getBooleanContents.
// These nodes are generated from the llvm.[su]add.with.overflow intrinsics.
/// RESULT, BOOL = [SU]ADDO(LHS, RHS) - Overflow-aware nodes for addition.
/// These nodes take two operands: the normal LHS and RHS to the add. They
/// produce two results: the normal result of the add, and a boolean that
/// indicates if an overflow occurred (*not* a flag, because it may be store
/// to memory, etc.). If the type of the boolean is not i1 then the high
/// bits conform to getBooleanContents.
/// These nodes are generated from llvm.[su]add.with.overflow intrinsics.
SADDO, UADDO,
// Same for subtraction
/// Same for subtraction.
SSUBO, USUBO,
// Same for multiplication
/// Same for multiplication.
SMULO, UMULO,
// Simple binary floating point operators.
/// Simple binary floating point operators.
FADD, FSUB, FMUL, FMA, FDIV, FREM,
// FCOPYSIGN(X, Y) - Return the value of X with the sign of Y. NOTE: This
// DAG node does not require that X and Y have the same type, just that they
// are both floating point. X and the result must have the same type.
// FCOPYSIGN(f32, f64) is allowed.
/// FCOPYSIGN(X, Y) - Return the value of X with the sign of Y. NOTE: This
/// DAG node does not require that X and Y have the same type, just that the
/// are both floating point. X and the result must have the same type.
/// FCOPYSIGN(f32, f64) is allowed.
FCOPYSIGN,
// INT = FGETSIGN(FP) - Return the sign bit of the specified floating point
// value as an integer 0/1 value.
/// INT = FGETSIGN(FP) - Return the sign bit of the specified floating point
/// value as an integer 0/1 value.
FGETSIGN,
/// BUILD_VECTOR(ELT0, ELT1, ELT2, ELT3,...) - Return a vector with the
@ -292,13 +298,14 @@ namespace ISD {
/// than the vector element type, and is implicitly truncated to it.
SCALAR_TO_VECTOR,
// MULHU/MULHS - Multiply high - Multiply two integers of type iN, producing
// an unsigned/signed value of type i[2*N], then return the top part.
/// MULHU/MULHS - Multiply high - Multiply two integers of type iN,
/// producing an unsigned/signed value of type i[2*N], then return the top
/// part.
MULHU, MULHS,
/// Bitwise operators - logical and, logical or, logical xor.
AND, OR, XOR,
/// Shift and rotation operations. After legalization, the type of the
/// shift amount is known to be TLI.getShiftAmountTy(). Before legalization
/// the shift amount can be any type, but care must be taken to ensure it is
@ -306,7 +313,6 @@ namespace ISD {
/// legalization, types like i1024 can occur and i8 doesn't have enough bits
/// to represent the shift amount. By convention, DAGCombine and
/// SelectionDAGBuilder forces these shift amounts to i32 for simplicity.
///
SHL, SRA, SRL, ROTL, ROTR,
/// Byte Swap and Counting operators.
@ -315,67 +321,67 @@ namespace ISD {
/// Bit counting operators with an undefined result for zero inputs.
CTTZ_ZERO_UNDEF, CTLZ_ZERO_UNDEF,
// Select(COND, TRUEVAL, FALSEVAL). If the type of the boolean COND is not
// i1 then the high bits must conform to getBooleanContents.
/// Select(COND, TRUEVAL, FALSEVAL). If the type of the boolean COND is not
/// i1 then the high bits must conform to getBooleanContents.
SELECT,
// Select with a vector condition (op #0) and two vector operands (ops #1
// and #2), returning a vector result. All vectors have the same length.
// Much like the scalar select and setcc, each bit in the condition selects
// whether the corresponding result element is taken from op #1 or op #2.
// At first, the VSELECT condition is of vXi1 type. Later, targets may change
// the condition type in order to match the VSELECT node using a a pattern.
// The condition follows the BooleanContent format of the target.
/// Select with a vector condition (op #0) and two vector operands (ops #1
/// and #2), returning a vector result. All vectors have the same length.
/// Much like the scalar select and setcc, each bit in the condition selects
/// whether the corresponding result element is taken from op #1 or op #2.
/// At first, the VSELECT condition is of vXi1 type. Later, targets may
/// change the condition type in order to match the VSELECT node using a
/// pattern. The condition follows the BooleanContent format of the target.
VSELECT,
// Select with condition operator - This selects between a true value and
// a false value (ops #2 and #3) based on the boolean result of comparing
// the lhs and rhs (ops #0 and #1) of a conditional expression with the
// condition code in op #4, a CondCodeSDNode.
/// Select with condition operator - This selects between a true value and
/// a false value (ops #2 and #3) based on the boolean result of comparing
/// the lhs and rhs (ops #0 and #1) of a conditional expression with the
/// condition code in op #4, a CondCodeSDNode.
SELECT_CC,
// SetCC operator - This evaluates to a true value iff the condition is
// true. If the result value type is not i1 then the high bits conform
// to getBooleanContents. The operands to this are the left and right
// operands to compare (ops #0, and #1) and the condition code to compare
// them with (op #2) as a CondCodeSDNode. If the operands are vector types
// then the result type must also be a vector type.
/// SetCC operator - This evaluates to a true value iff the condition is
/// true. If the result value type is not i1 then the high bits conform
/// to getBooleanContents. The operands to this are the left and right
/// operands to compare (ops #0, and #1) and the condition code to compare
/// them with (op #2) as a CondCodeSDNode. If the operands are vector types
/// then the result type must also be a vector type.
SETCC,
// SHL_PARTS/SRA_PARTS/SRL_PARTS - These operators are used for expanded
// integer shift operations, just like ADD/SUB_PARTS. The operation
// ordering is:
// [Lo,Hi] = op [LoLHS,HiLHS], Amt
/// SHL_PARTS/SRA_PARTS/SRL_PARTS - These operators are used for expanded
/// integer shift operations, just like ADD/SUB_PARTS. The operation
/// ordering is:
/// [Lo,Hi] = op [LoLHS,HiLHS], Amt
SHL_PARTS, SRA_PARTS, SRL_PARTS,
// Conversion operators. These are all single input single output
// operations. For all of these, the result type must be strictly
// wider or narrower (depending on the operation) than the source
// type.
/// Conversion operators. These are all single input single output
/// operations. For all of these, the result type must be strictly
/// wider or narrower (depending on the operation) than the source
/// type.
// SIGN_EXTEND - Used for integer types, replicating the sign bit
// into new bits.
/// SIGN_EXTEND - Used for integer types, replicating the sign bit
/// into new bits.
SIGN_EXTEND,
// ZERO_EXTEND - Used for integer types, zeroing the new bits.
/// ZERO_EXTEND - Used for integer types, zeroing the new bits.
ZERO_EXTEND,
// ANY_EXTEND - Used for integer types. The high bits are undefined.
/// ANY_EXTEND - Used for integer types. The high bits are undefined.
ANY_EXTEND,
// TRUNCATE - Completely drop the high bits.
/// TRUNCATE - Completely drop the high bits.
TRUNCATE,
// [SU]INT_TO_FP - These operators convert integers (whose interpreted sign
// depends on the first letter) to floating point.
/// [SU]INT_TO_FP - These operators convert integers (whose interpreted sign
/// depends on the first letter) to floating point.
SINT_TO_FP,
UINT_TO_FP,
// SIGN_EXTEND_INREG - This operator atomically performs a SHL/SRA pair to
// sign extend a small value in a large integer register (e.g. sign
// extending the low 8 bits of a 32-bit register to fill the top 24 bits
// with the 7th bit). The size of the smaller type is indicated by the 1th
// operand, a ValueType node.
/// SIGN_EXTEND_INREG - This operator atomically performs a SHL/SRA pair to
/// sign extend a small value in a large integer register (e.g. sign
/// extending the low 8 bits of a 32-bit register to fill the top 24 bits
/// with the 7th bit). The size of the smaller type is indicated by the 1th
/// operand, a ValueType node.
SIGN_EXTEND_INREG,
/// FP_TO_[US]INT - Convert a floating point value to a signed or unsigned
@ -396,12 +402,12 @@ namespace ISD {
/// FP_EXTEND(FP_ROUND(X,0)) because the extra bits aren't removed.
FP_ROUND,
// FLT_ROUNDS_ - Returns current rounding mode:
// -1 Undefined
// 0 Round to 0
// 1 Round to nearest
// 2 Round to +inf
// 3 Round to -inf
/// FLT_ROUNDS_ - Returns current rounding mode:
/// -1 Undefined
/// 0 Round to 0
/// 1 Round to nearest
/// 2 Round to +inf
/// 3 Round to -inf
FLT_ROUNDS_,
/// X = FP_ROUND_INREG(Y, VT) - This operator takes an FP register, and
@ -414,208 +420,211 @@ namespace ISD {
/// X = FP_EXTEND(Y) - Extend a smaller FP type into a larger FP type.
FP_EXTEND,
// BITCAST - This operator converts between integer, vector and FP
// values, as if the value was stored to memory with one type and loaded
// from the same address with the other type (or equivalently for vector
// format conversions, etc). The source and result are required to have
// the same bit size (e.g. f32 <-> i32). This can also be used for
// int-to-int or fp-to-fp conversions, but that is a noop, deleted by
// getNode().
/// BITCAST - This operator converts between integer, vector and FP
/// values, as if the value was stored to memory with one type and loaded
/// from the same address with the other type (or equivalently for vector
/// format conversions, etc). The source and result are required to have
/// the same bit size (e.g. f32 <-> i32). This can also be used for
/// int-to-int or fp-to-fp conversions, but that is a noop, deleted by
/// getNode().
BITCAST,
// CONVERT_RNDSAT - This operator is used to support various conversions
// between various types (float, signed, unsigned and vectors of those
// types) with rounding and saturation. NOTE: Avoid using this operator as
// most target don't support it and the operator might be removed in the
// future. It takes the following arguments:
// 0) value
// 1) dest type (type to convert to)
// 2) src type (type to convert from)
// 3) rounding imm
// 4) saturation imm
// 5) ISD::CvtCode indicating the type of conversion to do
/// CONVERT_RNDSAT - This operator is used to support various conversions
/// between various types (float, signed, unsigned and vectors of those
/// types) with rounding and saturation. NOTE: Avoid using this operator as
/// most target don't support it and the operator might be removed in the
/// future. It takes the following arguments:
/// 0) value
/// 1) dest type (type to convert to)
/// 2) src type (type to convert from)
/// 3) rounding imm
/// 4) saturation imm
/// 5) ISD::CvtCode indicating the type of conversion to do
CONVERT_RNDSAT,
// FP16_TO_FP32, FP32_TO_FP16 - These operators are used to perform
// promotions and truncation for half-precision (16 bit) floating
// numbers. We need special nodes since FP16 is a storage-only type with
// special semantics of operations.
/// FP16_TO_FP32, FP32_TO_FP16 - These operators are used to perform
/// promotions and truncation for half-precision (16 bit) floating
/// numbers. We need special nodes since FP16 is a storage-only type with
/// special semantics of operations.
FP16_TO_FP32, FP32_TO_FP16,
// FNEG, FABS, FSQRT, FSIN, FCOS, FPOWI, FPOW,
// FLOG, FLOG2, FLOG10, FEXP, FEXP2,
// FCEIL, FTRUNC, FRINT, FNEARBYINT, FFLOOR - Perform various unary floating
// point operations. These are inspired by libm.
/// FNEG, FABS, FSQRT, FSIN, FCOS, FPOWI, FPOW,
/// FLOG, FLOG2, FLOG10, FEXP, FEXP2,
/// FCEIL, FTRUNC, FRINT, FNEARBYINT, FFLOOR - Perform various unary
/// floating point operations. These are inspired by libm.
FNEG, FABS, FSQRT, FSIN, FCOS, FPOWI, FPOW,
FLOG, FLOG2, FLOG10, FEXP, FEXP2,
FCEIL, FTRUNC, FRINT, FNEARBYINT, FFLOOR,
// LOAD and STORE have token chains as their first operand, then the same
// operands as an LLVM load/store instruction, then an offset node that
// is added / subtracted from the base pointer to form the address (for
// indexed memory ops).
/// LOAD and STORE have token chains as their first operand, then the same
/// operands as an LLVM load/store instruction, then an offset node that
/// is added / subtracted from the base pointer to form the address (for
/// indexed memory ops).
LOAD, STORE,
// DYNAMIC_STACKALLOC - Allocate some number of bytes on the stack aligned
// to a specified boundary. This node always has two return values: a new
// stack pointer value and a chain. The first operand is the token chain,
// the second is the number of bytes to allocate, and the third is the
// alignment boundary. The size is guaranteed to be a multiple of the stack
// alignment, and the alignment is guaranteed to be bigger than the stack
// alignment (if required) or 0 to get standard stack alignment.
/// DYNAMIC_STACKALLOC - Allocate some number of bytes on the stack aligned
/// to a specified boundary. This node always has two return values: a new
/// stack pointer value and a chain. The first operand is the token chain,
/// the second is the number of bytes to allocate, and the third is the
/// alignment boundary. The size is guaranteed to be a multiple of the
/// stack alignment, and the alignment is guaranteed to be bigger than the
/// stack alignment (if required) or 0 to get standard stack alignment.
DYNAMIC_STACKALLOC,
// Control flow instructions. These all have token chains.
/// Control flow instructions. These all have token chains.
// BR - Unconditional branch. The first operand is the chain
// operand, the second is the MBB to branch to.
/// BR - Unconditional branch. The first operand is the chain
/// operand, the second is the MBB to branch to.
BR,
// BRIND - Indirect branch. The first operand is the chain, the second
// is the value to branch to, which must be of the same type as the target's
// pointer type.
/// BRIND - Indirect branch. The first operand is the chain, the second
/// is the value to branch to, which must be of the same type as the
/// target's pointer type.
BRIND,
// BR_JT - Jumptable branch. The first operand is the chain, the second
// is the jumptable index, the last one is the jumptable entry index.
/// BR_JT - Jumptable branch. The first operand is the chain, the second
/// is the jumptable index, the last one is the jumptable entry index.
BR_JT,
// BRCOND - Conditional branch. The first operand is the chain, the
// second is the condition, the third is the block to branch to if the
// condition is true. If the type of the condition is not i1, then the
// high bits must conform to getBooleanContents.
/// BRCOND - Conditional branch. The first operand is the chain, the
/// second is the condition, the third is the block to branch to if the
/// condition is true. If the type of the condition is not i1, then the
/// high bits must conform to getBooleanContents.
BRCOND,
// BR_CC - Conditional branch. The behavior is like that of SELECT_CC, in
// that the condition is represented as condition code, and two nodes to
// compare, rather than as a combined SetCC node. The operands in order are
// chain, cc, lhs, rhs, block to branch to if condition is true.
/// BR_CC - Conditional branch. The behavior is like that of SELECT_CC, in
/// that the condition is represented as condition code, and two nodes to
/// compare, rather than as a combined SetCC node. The operands in order
/// are chain, cc, lhs, rhs, block to branch to if condition is true.
BR_CC,
// INLINEASM - Represents an inline asm block. This node always has two
// return values: a chain and a flag result. The inputs are as follows:
// Operand #0 : Input chain.
// Operand #1 : a ExternalSymbolSDNode with a pointer to the asm string.
// Operand #2 : a MDNodeSDNode with the !srcloc metadata.
// Operand #3 : HasSideEffect, IsAlignStack bits.
// After this, it is followed by a list of operands with this format:
// ConstantSDNode: Flags that encode whether it is a mem or not, the
// of operands that follow, etc. See InlineAsm.h.
// ... however many operands ...
// Operand #last: Optional, an incoming flag.
//
// The variable width operands are required to represent target addressing
// modes as a single "operand", even though they may have multiple
// SDOperands.
/// INLINEASM - Represents an inline asm block. This node always has two
/// return values: a chain and a flag result. The inputs are as follows:
/// Operand #0 : Input chain.
/// Operand #1 : a ExternalSymbolSDNode with a pointer to the asm string.
/// Operand #2 : a MDNodeSDNode with the !srcloc metadata.
/// Operand #3 : HasSideEffect, IsAlignStack bits.
/// After this, it is followed by a list of operands with this format:
/// ConstantSDNode: Flags that encode whether it is a mem or not, the
/// of operands that follow, etc. See InlineAsm.h.
/// ... however many operands ...
/// Operand #last: Optional, an incoming flag.
///
/// The variable width operands are required to represent target addressing
/// modes as a single "operand", even though they may have multiple
/// SDOperands.
INLINEASM,
// EH_LABEL - Represents a label in mid basic block used to track
// locations needed for debug and exception handling tables. These nodes
// take a chain as input and return a chain.
/// EH_LABEL - Represents a label in mid basic block used to track
/// locations needed for debug and exception handling tables. These nodes
/// take a chain as input and return a chain.
EH_LABEL,
// STACKSAVE - STACKSAVE has one operand, an input chain. It produces a
// value, the same type as the pointer type for the system, and an output
// chain.
/// STACKSAVE - STACKSAVE has one operand, an input chain. It produces a
/// value, the same type as the pointer type for the system, and an output
/// chain.
STACKSAVE,
// STACKRESTORE has two operands, an input chain and a pointer to restore to
// it returns an output chain.
/// STACKRESTORE has two operands, an input chain and a pointer to restore
/// to it returns an output chain.
STACKRESTORE,
// CALLSEQ_START/CALLSEQ_END - These operators mark the beginning and end of
// a call sequence, and carry arbitrary information that target might want
// to know. The first operand is a chain, the rest are specified by the
// target and not touched by the DAG optimizers.
// CALLSEQ_START..CALLSEQ_END pairs may not be nested.
/// CALLSEQ_START/CALLSEQ_END - These operators mark the beginning and end
/// of a call sequence, and carry arbitrary information that target might
/// want to know. The first operand is a chain, the rest are specified by
/// the target and not touched by the DAG optimizers.
/// CALLSEQ_START..CALLSEQ_END pairs may not be nested.
CALLSEQ_START, // Beginning of a call sequence
CALLSEQ_END, // End of a call sequence
// VAARG - VAARG has four operands: an input chain, a pointer, a SRCVALUE,
// and the alignment. It returns a pair of values: the vaarg value and a
// new chain.
/// VAARG - VAARG has four operands: an input chain, a pointer, a SRCVALUE,
/// and the alignment. It returns a pair of values: the vaarg value and a
/// new chain.
VAARG,
// VACOPY - VACOPY has five operands: an input chain, a destination pointer,
// a source pointer, a SRCVALUE for the destination, and a SRCVALUE for the
// source.
/// VACOPY - VACOPY has 5 operands: an input chain, a destination pointer,
/// a source pointer, a SRCVALUE for the destination, and a SRCVALUE for the
/// source.
VACOPY,
// VAEND, VASTART - VAEND and VASTART have three operands: an input chain, a
// pointer, and a SRCVALUE.
/// VAEND, VASTART - VAEND and VASTART have three operands: an input chain,
/// pointer, and a SRCVALUE.
VAEND, VASTART,
// SRCVALUE - This is a node type that holds a Value* that is used to
// make reference to a value in the LLVM IR.
/// SRCVALUE - This is a node type that holds a Value* that is used to
/// make reference to a value in the LLVM IR.
SRCVALUE,
// MDNODE_SDNODE - This is a node that holdes an MDNode*, which is used to
// reference metadata in the IR.
/// MDNODE_SDNODE - This is a node that holdes an MDNode*, which is used to
/// reference metadata in the IR.
MDNODE_SDNODE,
// PCMARKER - This corresponds to the pcmarker intrinsic.
/// PCMARKER - This corresponds to the pcmarker intrinsic.
PCMARKER,
// READCYCLECOUNTER - This corresponds to the readcyclecounter intrinsic.
// The only operand is a chain and a value and a chain are produced. The
// value is the contents of the architecture specific cycle counter like
// register (or other high accuracy low latency clock source)
/// READCYCLECOUNTER - This corresponds to the readcyclecounter intrinsic.
/// The only operand is a chain and a value and a chain are produced. The
/// value is the contents of the architecture specific cycle counter like
/// register (or other high accuracy low latency clock source)
READCYCLECOUNTER,
// HANDLENODE node - Used as a handle for various purposes.
/// HANDLENODE node - Used as a handle for various purposes.
HANDLENODE,
// INIT_TRAMPOLINE - This corresponds to the init_trampoline intrinsic. It
// takes as input a token chain, the pointer to the trampoline, the pointer
// to the nested function, the pointer to pass for the 'nest' parameter, a
// SRCVALUE for the trampoline and another for the nested function (allowing
// targets to access the original Function*). It produces a token chain as
// output.
/// INIT_TRAMPOLINE - This corresponds to the init_trampoline intrinsic. It
/// takes as input a token chain, the pointer to the trampoline, the pointer
/// to the nested function, the pointer to pass for the 'nest' parameter, a
/// SRCVALUE for the trampoline and another for the nested function
/// (allowing targets to access the original Function*).
/// It produces a token chain as output.
INIT_TRAMPOLINE,
// ADJUST_TRAMPOLINE - This corresponds to the adjust_trampoline intrinsic.
// It takes a pointer to the trampoline and produces a (possibly) new
// pointer to the same trampoline with platform-specific adjustments
// applied. The pointer it returns points to an executable block of code.
/// ADJUST_TRAMPOLINE - This corresponds to the adjust_trampoline intrinsic.
/// It takes a pointer to the trampoline and produces a (possibly) new
/// pointer to the same trampoline with platform-specific adjustments
/// applied. The pointer it returns points to an executable block of code.
ADJUST_TRAMPOLINE,
// TRAP - Trapping instruction
/// TRAP - Trapping instruction
TRAP,
// PREFETCH - This corresponds to a prefetch intrinsic. It takes chains are
// their first operand. The other operands are the address to prefetch,
// read / write specifier, locality specifier and instruction / data cache
// specifier.
/// DEBUGTRAP - Trap intended to get the attention of a debugger.
DEBUGTRAP,
/// PREFETCH - This corresponds to a prefetch intrinsic. The first operand
/// is the chain. The other operands are the address to prefetch,
/// read / write specifier, locality specifier and instruction / data cache
/// specifier.
PREFETCH,
// OUTCHAIN = MEMBARRIER(INCHAIN, load-load, load-store, store-load,
// store-store, device)
// This corresponds to the memory.barrier intrinsic.
// it takes an input chain, 4 operands to specify the type of barrier, an
// operand specifying if the barrier applies to device and uncached memory
// and produces an output chain.
/// OUTCHAIN = MEMBARRIER(INCHAIN, load-load, load-store, store-load,
/// store-store, device)
/// This corresponds to the memory.barrier intrinsic.
/// it takes an input chain, 4 operands to specify the type of barrier, an
/// operand specifying if the barrier applies to device and uncached memory
/// and produces an output chain.
MEMBARRIER,
// OUTCHAIN = ATOMIC_FENCE(INCHAIN, ordering, scope)
// This corresponds to the fence instruction. It takes an input chain, and
// two integer constants: an AtomicOrdering and a SynchronizationScope.
/// OUTCHAIN = ATOMIC_FENCE(INCHAIN, ordering, scope)
/// This corresponds to the fence instruction. It takes an input chain, and
/// two integer constants: an AtomicOrdering and a SynchronizationScope.
ATOMIC_FENCE,
// Val, OUTCHAIN = ATOMIC_LOAD(INCHAIN, ptr)
// This corresponds to "load atomic" instruction.
/// Val, OUTCHAIN = ATOMIC_LOAD(INCHAIN, ptr)
/// This corresponds to "load atomic" instruction.
ATOMIC_LOAD,
// OUTCHAIN = ATOMIC_LOAD(INCHAIN, ptr, val)
// This corresponds to "store atomic" instruction.
/// OUTCHAIN = ATOMIC_LOAD(INCHAIN, ptr, val)
/// This corresponds to "store atomic" instruction.
ATOMIC_STORE,
// Val, OUTCHAIN = ATOMIC_CMP_SWAP(INCHAIN, ptr, cmp, swap)
// This corresponds to the cmpxchg instruction.
/// Val, OUTCHAIN = ATOMIC_CMP_SWAP(INCHAIN, ptr, cmp, swap)
/// This corresponds to the cmpxchg instruction.
ATOMIC_CMP_SWAP,
// Val, OUTCHAIN = ATOMIC_SWAP(INCHAIN, ptr, amt)
// Val, OUTCHAIN = ATOMIC_LOAD_[OpName](INCHAIN, ptr, amt)
// These correspond to the atomicrmw instruction.
/// Val, OUTCHAIN = ATOMIC_SWAP(INCHAIN, ptr, amt)
/// Val, OUTCHAIN = ATOMIC_LOAD_[OpName](INCHAIN, ptr, amt)
/// These correspond to the atomicrmw instruction.
ATOMIC_SWAP,
ATOMIC_LOAD_ADD,
ATOMIC_LOAD_SUB,
@ -790,16 +799,16 @@ namespace ISD {
/// CvtCode enum - This enum defines the various converts CONVERT_RNDSAT
/// supports.
enum CvtCode {
CVT_FF, // Float from Float
CVT_FS, // Float from Signed
CVT_FU, // Float from Unsigned
CVT_SF, // Signed from Float
CVT_UF, // Unsigned from Float
CVT_SS, // Signed from Signed
CVT_SU, // Signed from Unsigned
CVT_US, // Unsigned from Signed
CVT_UU, // Unsigned from Unsigned
CVT_INVALID // Marker - Invalid opcode
CVT_FF, /// Float from Float
CVT_FS, /// Float from Signed
CVT_FU, /// Float from Unsigned
CVT_SF, /// Signed from Float
CVT_UF, /// Unsigned from Float
CVT_SS, /// Signed from Signed
CVT_SU, /// Signed from Unsigned
CVT_US, /// Unsigned from Signed
CVT_UU, /// Unsigned from Unsigned
CVT_INVALID /// Marker - Invalid opcode
};
} // end llvm::ISD namespace

View File

@ -158,7 +158,10 @@ class LexicalScope {
public:
LexicalScope(LexicalScope *P, const MDNode *D, const MDNode *I, bool A)
: Parent(P), Desc(D), InlinedAtLocation(I), AbstractScope(A),
LastInsn(0), FirstInsn(0), DFSIn(0), DFSOut(0), IndentLevel(0) {
LastInsn(0), FirstInsn(0), DFSIn(0), DFSOut(0) {
#ifndef NDEBUG
IndentLevel = 0;
#endif
if (Parent)
Parent->addChild(this);
}
@ -241,7 +244,9 @@ class LexicalScope {
const MachineInstr *FirstInsn; // First instruction of this scope.
unsigned DFSIn, DFSOut; // In & Out Depth use to determine
// scope nesting.
#ifndef NDEBUG
mutable unsigned IndentLevel; // Private state for dump()
#endif
};
} // end llvm namespace

View File

@ -40,15 +40,6 @@ namespace llvm {
/// definition and use points.
///
class VNInfo {
private:
enum {
HAS_PHI_KILL = 1,
IS_PHI_DEF = 1 << 1,
IS_UNUSED = 1 << 2
};
unsigned char flags;
public:
typedef BumpPtrAllocator Allocator;
@ -60,60 +51,30 @@ namespace llvm {
/// VNInfo constructor.
VNInfo(unsigned i, SlotIndex d)
: flags(0), id(i), def(d)
: id(i), def(d)
{ }
/// VNInfo construtor, copies values from orig, except for the value number.
VNInfo(unsigned i, const VNInfo &orig)
: flags(orig.flags), id(i), def(orig.def)
: id(i), def(orig.def)
{ }
/// Copy from the parameter into this VNInfo.
void copyFrom(VNInfo &src) {
flags = src.flags;
def = src.def;
}
/// Used for copying value number info.
unsigned getFlags() const { return flags; }
void setFlags(unsigned flags) { this->flags = flags; }
/// Merge flags from another VNInfo
void mergeFlags(const VNInfo *VNI) {
flags = (flags | VNI->flags) & ~IS_UNUSED;
}
/// Returns true if one or more kills are PHI nodes.
/// Obsolete, do not use!
bool hasPHIKill() const { return flags & HAS_PHI_KILL; }
/// Set the PHI kill flag on this value.
void setHasPHIKill(bool hasKill) {
if (hasKill)
flags |= HAS_PHI_KILL;
else
flags &= ~HAS_PHI_KILL;
}
/// Returns true if this value is defined by a PHI instruction (or was,
/// PHI instrucions may have been eliminated).
bool isPHIDef() const { return flags & IS_PHI_DEF; }
/// Set the "phi def" flag on this value.
void setIsPHIDef(bool phiDef) {
if (phiDef)
flags |= IS_PHI_DEF;
else
flags &= ~IS_PHI_DEF;
}
/// PHI-defs begin at a block boundary, all other defs begin at register or
/// EC slots.
bool isPHIDef() const { return def.isBlock(); }
/// Returns true if this value is unused.
bool isUnused() const { return flags & IS_UNUSED; }
/// Set the "is unused" flag on this value.
void setIsUnused(bool unused) {
if (unused)
flags |= IS_UNUSED;
else
flags &= ~IS_UNUSED;
}
bool isUnused() const { return !def.isValid(); }
/// Mark this value as unused.
void markUnused() { def = SlotIndex(); }
};
/// LiveRange structure - This represents a simple register range in the
@ -274,6 +235,11 @@ namespace llvm {
return VNI;
}
/// createDeadDef - Make sure the interval has a value defined at Def.
/// If one already exists, return it. Otherwise allocate a new value and
/// add liveness for a dead def.
VNInfo *createDeadDef(SlotIndex Def, VNInfo::Allocator &VNInfoAllocator);
/// Create a copy of the given value. The new value will be identical except
/// for the Value number.
VNInfo *createValueCopy(const VNInfo *orig,
@ -288,17 +254,6 @@ namespace llvm {
/// unused values.
void RenumberValues(LiveIntervals &lis);
/// isOnlyLROfValNo - Return true if the specified live range is the only
/// one defined by the its val#.
bool isOnlyLROfValNo(const LiveRange *LR) {
for (const_iterator I = begin(), E = end(); I != E; ++I) {
const LiveRange *Tmp = I;
if (Tmp != LR && Tmp->valno == LR->valno)
return false;
}
return true;
}
/// MergeValueNumberInto - This method is called when two value nubmers
/// are found to be equivalent. This eliminates V1, replacing all
/// LiveRanges with the V1 value number with the V2 value number. This can
@ -377,14 +332,6 @@ namespace llvm {
return I == end() ? 0 : &*I;
}
const LiveRange *getLiveRangeBefore(SlotIndex Idx) const {
return getLiveRangeContaining(Idx.getPrevSlot());
}
LiveRange *getLiveRangeBefore(SlotIndex Idx) {
return getLiveRangeContaining(Idx.getPrevSlot());
}
/// getVNInfoAt - Return the VNInfo that is live at Idx, or NULL.
VNInfo *getVNInfoAt(SlotIndex Idx) const {
const_iterator I = FindLiveRangeContaining(Idx);
@ -411,11 +358,6 @@ namespace llvm {
return I != end() && I->start <= Idx ? I : end();
}
/// findDefinedVNInfo - Find the by the specified
/// index (register interval) or defined
VNInfo *findDefinedVNInfoForRegInt(SlotIndex Idx) const;
/// overlaps - Return true if the intersection of the two live intervals is
/// not empty.
bool overlaps(const LiveInterval& other) const {
@ -498,10 +440,6 @@ namespace llvm {
weight = HUGE_VALF;
}
/// ComputeJoinedWeight - Set the weight of a live interval after
/// Other has been merged into it.
void ComputeJoinedWeight(const LiveInterval &Other);
bool operator<(const LiveInterval& other) const {
const SlotIndex &thisIndex = beginIndex();
const SlotIndex &otherIndex = other.beginIndex();
@ -509,15 +447,27 @@ namespace llvm {
(thisIndex == otherIndex && reg < other.reg));
}
void print(raw_ostream &OS, const TargetRegisterInfo *TRI = 0) const;
void print(raw_ostream &OS) const;
void dump() const;
/// \brief Walk the interval and assert if any invariants fail to hold.
///
/// Note that this is a no-op when asserts are disabled.
#ifdef NDEBUG
void verify() const {}
#else
void verify() const;
#endif
private:
Ranges::iterator addRangeFrom(LiveRange LR, Ranges::iterator From);
void extendIntervalEndTo(Ranges::iterator I, SlotIndex NewEnd);
Ranges::iterator extendIntervalStartTo(Ranges::iterator I, SlotIndex NewStr);
void markValNoForDeletion(VNInfo *V);
void mergeIntervalRanges(const LiveInterval &RHS,
VNInfo *LHSValNo = 0,
const VNInfo *RHSValNo = 0);
LiveInterval& operator=(const LiveInterval& rhs); // DO NOT IMPLEMENT
@ -528,6 +478,91 @@ namespace llvm {
return OS;
}
/// LiveRangeQuery - Query information about a live range around a given
/// instruction. This class hides the implementation details of live ranges,
/// and it should be used as the primary interface for examining live ranges
/// around instructions.
///
class LiveRangeQuery {
VNInfo *EarlyVal;
VNInfo *LateVal;
SlotIndex EndPoint;
bool Kill;
public:
/// Create a LiveRangeQuery for the given live range and instruction index.
/// The sub-instruction slot of Idx doesn't matter, only the instruction it
/// refers to is considered.
LiveRangeQuery(const LiveInterval &LI, SlotIndex Idx)
: EarlyVal(0), LateVal(0), Kill(false) {
// Find the segment that enters the instruction.
LiveInterval::const_iterator I = LI.find(Idx.getBaseIndex());
LiveInterval::const_iterator E = LI.end();
if (I == E)
return;
// Is this an instruction live-in segment?
if (SlotIndex::isEarlierInstr(I->start, Idx)) {
EarlyVal = I->valno;
EndPoint = I->end;
// Move to the potentially live-out segment.
if (SlotIndex::isSameInstr(Idx, I->end)) {
Kill = true;
if (++I == E)
return;
}
}
// I now points to the segment that may be live-through, or defined by
// this instr. Ignore segments starting after the current instr.
if (SlotIndex::isEarlierInstr(Idx, I->start))
return;
LateVal = I->valno;
EndPoint = I->end;
}
/// Return the value that is live-in to the instruction. This is the value
/// that will be read by the instruction's use operands. Return NULL if no
/// value is live-in.
VNInfo *valueIn() const {
return EarlyVal;
}
/// Return true if the live-in value is killed by this instruction. This
/// means that either the live range ends at the instruction, or it changes
/// value.
bool isKill() const {
return Kill;
}
/// Return true if this instruction has a dead def.
bool isDeadDef() const {
return EndPoint.isDead();
}
/// Return the value leaving the instruction, if any. This can be a
/// live-through value, or a live def. A dead def returns NULL.
VNInfo *valueOut() const {
return isDeadDef() ? 0 : LateVal;
}
/// Return the value defined by this instruction, if any. This includes
/// dead defs, it is the value created by the instruction's def operands.
VNInfo *valueDefined() const {
return EarlyVal == LateVal ? 0 : LateVal;
}
/// Return the end point of the last live range segment to interact with
/// the instruction, if any.
///
/// The end point is an invalid SlotIndex only if the live range doesn't
/// intersect the instruction at all.
///
/// The end point may be at or past the end of the instruction's basic
/// block. That means the value was live out of the block.
SlotIndex endPoint() const {
return EndPoint;
}
};
/// ConnectedVNInfoEqClasses - Helper class that can divide VNInfos in a
/// LiveInterval into equivalence clases of connected components. A
/// LiveInterval that has multiple connected components can be broken into

View File

@ -20,12 +20,13 @@
#ifndef LLVM_CODEGEN_LIVEINTERVAL_ANALYSIS_H
#define LLVM_CODEGEN_LIVEINTERVAL_ANALYSIS_H
#include "llvm/Target/TargetRegisterInfo.h"
#include "llvm/CodeGen/MachineBasicBlock.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
#include "llvm/CodeGen/LiveInterval.h"
#include "llvm/CodeGen/SlotIndexes.h"
#include "llvm/ADT/BitVector.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/IndexedMap.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/Support/Allocator.h"
@ -35,7 +36,9 @@
namespace llvm {
class AliasAnalysis;
class LiveRangeCalc;
class LiveVariables;
class MachineDominatorTree;
class MachineLoopInfo;
class TargetRegisterInfo;
class MachineRegisterInfo;
@ -44,27 +47,29 @@ namespace llvm {
class VirtRegMap;
class LiveIntervals : public MachineFunctionPass {
MachineFunction* mf_;
MachineRegisterInfo* mri_;
const TargetMachine* tm_;
const TargetRegisterInfo* tri_;
const TargetInstrInfo* tii_;
AliasAnalysis *aa_;
LiveVariables* lv_;
SlotIndexes* indexes_;
MachineFunction* MF;
MachineRegisterInfo* MRI;
const TargetMachine* TM;
const TargetRegisterInfo* TRI;
const TargetInstrInfo* TII;
AliasAnalysis *AA;
LiveVariables* LV;
SlotIndexes* Indexes;
MachineDominatorTree *DomTree;
LiveRangeCalc *LRCalc;
/// Special pool allocator for VNInfo's (LiveInterval val#).
///
VNInfo::Allocator VNInfoAllocator;
typedef DenseMap<unsigned, LiveInterval*> Reg2IntervalMap;
Reg2IntervalMap r2iMap_;
/// Live interval pointers for all the virtual registers.
IndexedMap<LiveInterval*, VirtReg2IndexFunctor> VirtRegIntervals;
/// allocatableRegs_ - A bit vector of allocatable registers.
BitVector allocatableRegs_;
/// AllocatableRegs - A bit vector of allocatable registers.
BitVector AllocatableRegs;
/// reservedRegs_ - A bit vector of reserved registers.
BitVector reservedRegs_;
/// ReservedRegs - A bit vector of reserved registers.
BitVector ReservedRegs;
/// RegMaskSlots - Sorted list of instructions with register mask operands.
/// Always use the 'r' slot, RegMasks are normal clobbers, not early
@ -92,83 +97,59 @@ namespace llvm {
/// block.
SmallVector<std::pair<unsigned, unsigned>, 8> RegMaskBlocks;
/// RegUnitIntervals - Keep a live interval for each register unit as a way
/// of tracking fixed physreg interference.
SmallVector<LiveInterval*, 0> RegUnitIntervals;
public:
static char ID; // Pass identification, replacement for typeid
LiveIntervals() : MachineFunctionPass(ID) {
initializeLiveIntervalsPass(*PassRegistry::getPassRegistry());
}
LiveIntervals();
virtual ~LiveIntervals();
// Calculate the spill weight to assign to a single instruction.
static float getSpillWeight(bool isDef, bool isUse, unsigned loopDepth);
typedef Reg2IntervalMap::iterator iterator;
typedef Reg2IntervalMap::const_iterator const_iterator;
const_iterator begin() const { return r2iMap_.begin(); }
const_iterator end() const { return r2iMap_.end(); }
iterator begin() { return r2iMap_.begin(); }
iterator end() { return r2iMap_.end(); }
unsigned getNumIntervals() const { return (unsigned)r2iMap_.size(); }
LiveInterval &getInterval(unsigned reg) {
Reg2IntervalMap::iterator I = r2iMap_.find(reg);
assert(I != r2iMap_.end() && "Interval does not exist for register");
return *I->second;
LiveInterval &getInterval(unsigned Reg) {
LiveInterval *LI = VirtRegIntervals[Reg];
assert(LI && "Interval does not exist for virtual register");
return *LI;
}
const LiveInterval &getInterval(unsigned reg) const {
Reg2IntervalMap::const_iterator I = r2iMap_.find(reg);
assert(I != r2iMap_.end() && "Interval does not exist for register");
return *I->second;
const LiveInterval &getInterval(unsigned Reg) const {
return const_cast<LiveIntervals*>(this)->getInterval(Reg);
}
bool hasInterval(unsigned reg) const {
return r2iMap_.count(reg);
bool hasInterval(unsigned Reg) const {
return VirtRegIntervals.inBounds(Reg) && VirtRegIntervals[Reg];
}
/// isAllocatable - is the physical register reg allocatable in the current
/// function?
bool isAllocatable(unsigned reg) const {
return allocatableRegs_.test(reg);
return AllocatableRegs.test(reg);
}
/// isReserved - is the physical register reg reserved in the current
/// function
bool isReserved(unsigned reg) const {
return reservedRegs_.test(reg);
return ReservedRegs.test(reg);
}
/// getScaledIntervalSize - get the size of an interval in "units,"
/// where every function is composed of one thousand units. This
/// measure scales properly with empty index slots in the function.
double getScaledIntervalSize(LiveInterval& I) {
return (1000.0 * I.getSize()) / indexes_->getIndexesLength();
// Interval creation.
LiveInterval &getOrCreateInterval(unsigned Reg) {
if (!hasInterval(Reg)) {
VirtRegIntervals.grow(Reg);
VirtRegIntervals[Reg] = createInterval(Reg);
}
return getInterval(Reg);
}
/// getFuncInstructionCount - Return the number of instructions in the
/// current function.
unsigned getFuncInstructionCount() {
return indexes_->getFunctionSize();
// Interval removal.
void removeInterval(unsigned Reg) {
delete VirtRegIntervals[Reg];
VirtRegIntervals[Reg] = 0;
}
/// getApproximateInstructionCount - computes an estimate of the number
/// of instructions in a given LiveInterval.
unsigned getApproximateInstructionCount(LiveInterval& I) {
double IntervalPercentage = getScaledIntervalSize(I) / 1000.0;
return (unsigned)(IntervalPercentage * indexes_->getFunctionSize());
}
// Interval creation
LiveInterval &getOrCreateInterval(unsigned reg) {
Reg2IntervalMap::iterator I = r2iMap_.find(reg);
if (I == r2iMap_.end())
I = r2iMap_.insert(std::make_pair(reg, createInterval(reg))).first;
return *I->second;
}
/// dupInterval - Duplicate a live interval. The caller is responsible for
/// managing the allocated memory.
LiveInterval *dupInterval(LiveInterval *li);
/// addLiveRangeToEndOfBlock - Given a register and an instruction,
/// adds a live range from that instruction to the end of its MBB.
LiveRange addLiveRangeToEndOfBlock(unsigned reg,
@ -184,42 +165,38 @@ namespace llvm {
bool shrinkToUses(LiveInterval *li,
SmallVectorImpl<MachineInstr*> *dead = 0);
// Interval removal
void removeInterval(unsigned Reg) {
DenseMap<unsigned, LiveInterval*>::iterator I = r2iMap_.find(Reg);
delete I->second;
r2iMap_.erase(I);
SlotIndexes *getSlotIndexes() const {
return Indexes;
}
SlotIndexes *getSlotIndexes() const {
return indexes_;
AliasAnalysis *getAliasAnalysis() const {
return AA;
}
/// isNotInMIMap - returns true if the specified machine instr has been
/// removed or was never entered in the map.
bool isNotInMIMap(const MachineInstr* Instr) const {
return !indexes_->hasIndex(Instr);
return !Indexes->hasIndex(Instr);
}
/// Returns the base index of the given instruction.
SlotIndex getInstructionIndex(const MachineInstr *instr) const {
return indexes_->getInstructionIndex(instr);
return Indexes->getInstructionIndex(instr);
}
/// Returns the instruction associated with the given index.
MachineInstr* getInstructionFromIndex(SlotIndex index) const {
return indexes_->getInstructionFromIndex(index);
return Indexes->getInstructionFromIndex(index);
}
/// Return the first index in the given basic block.
SlotIndex getMBBStartIdx(const MachineBasicBlock *mbb) const {
return indexes_->getMBBStartIdx(mbb);
return Indexes->getMBBStartIdx(mbb);
}
/// Return the last index in the given basic block.
SlotIndex getMBBEndIdx(const MachineBasicBlock *mbb) const {
return indexes_->getMBBEndIdx(mbb);
return Indexes->getMBBEndIdx(mbb);
}
bool isLiveInToMBB(const LiveInterval &li,
@ -233,24 +210,24 @@ namespace llvm {
}
MachineBasicBlock* getMBBFromIndex(SlotIndex index) const {
return indexes_->getMBBFromIndex(index);
return Indexes->getMBBFromIndex(index);
}
SlotIndex InsertMachineInstrInMaps(MachineInstr *MI) {
return indexes_->insertMachineInstrInMaps(MI);
return Indexes->insertMachineInstrInMaps(MI);
}
void RemoveMachineInstrFromMaps(MachineInstr *MI) {
indexes_->removeMachineInstrFromMaps(MI);
Indexes->removeMachineInstrFromMaps(MI);
}
void ReplaceMachineInstrInMaps(MachineInstr *MI, MachineInstr *NewMI) {
indexes_->replaceMachineInstrInMaps(MI, NewMI);
Indexes->replaceMachineInstrInMaps(MI, NewMI);
}
bool findLiveInMBBs(SlotIndex Start, SlotIndex End,
SmallVectorImpl<MachineBasicBlock*> &MBBs) const {
return indexes_->findLiveInMBBs(Start, End, MBBs);
return Indexes->findLiveInMBBs(Start, End, MBBs);
}
VNInfo::Allocator& getVNInfoAllocator() { return VNInfoAllocator; }
@ -264,18 +241,15 @@ namespace llvm {
/// print - Implement the dump method.
virtual void print(raw_ostream &O, const Module* = 0) const;
/// isReMaterializable - Returns true if every definition of MI of every
/// val# of the specified interval is re-materializable. Also returns true
/// by reference if all of the defs are load instructions.
bool isReMaterializable(const LiveInterval &li,
const SmallVectorImpl<LiveInterval*> *SpillIs,
bool &isLoad);
/// intervalIsInOneMBB - If LI is confined to a single basic block, return
/// a pointer to that block. If LI is live in to or out of any block,
/// return NULL.
MachineBasicBlock *intervalIsInOneMBB(const LiveInterval &LI) const;
/// Returns true if VNI is killed by any PHI-def values in LI.
/// This may conservatively return true to avoid expensive computations.
bool hasPHIKill(const LiveInterval &LI, const VNInfo *VNI) const;
/// addKillFlags - Add kill flags to any instruction that kills a virtual
/// register.
void addKillFlags();
@ -337,13 +311,47 @@ namespace llvm {
bool checkRegMaskInterference(LiveInterval &LI,
BitVector &UsableRegs);
// Register unit functions.
//
// Fixed interference occurs when MachineInstrs use physregs directly
// instead of virtual registers. This typically happens when passing
// arguments to a function call, or when instructions require operands in
// fixed registers.
//
// Each physreg has one or more register units, see MCRegisterInfo. We
// track liveness per register unit to handle aliasing registers more
// efficiently.
/// getRegUnit - Return the live range for Unit.
/// It will be computed if it doesn't exist.
LiveInterval &getRegUnit(unsigned Unit) {
LiveInterval *LI = RegUnitIntervals[Unit];
if (!LI) {
// Compute missing ranges on demand.
RegUnitIntervals[Unit] = LI = new LiveInterval(Unit, HUGE_VALF);
computeRegUnitInterval(LI);
}
return *LI;
}
/// getCachedRegUnit - Return the live range for Unit if it has already
/// been computed, or NULL if it hasn't been computed yet.
LiveInterval *getCachedRegUnit(unsigned Unit) {
return RegUnitIntervals[Unit];
}
private:
/// computeIntervals - Compute live intervals.
void computeIntervals();
/// Compute live intervals for all virtual registers.
void computeVirtRegs();
/// Compute RegMaskSlots and RegMaskBits.
void computeRegMasks();
/// handleRegisterDef - update intervals for a register def
/// (calls handlePhysicalRegisterDef and
/// handleVirtualRegisterDef)
/// (calls handleVirtualRegisterDef)
void handleRegisterDef(MachineBasicBlock *MBB,
MachineBasicBlock::iterator MI,
SlotIndex MIIdx,
@ -363,43 +371,15 @@ namespace llvm {
unsigned MOIdx,
LiveInterval& interval);
/// handlePhysicalRegisterDef - update intervals for a physical register
/// def.
void handlePhysicalRegisterDef(MachineBasicBlock* mbb,
MachineBasicBlock::iterator mi,
SlotIndex MIIdx, MachineOperand& MO,
LiveInterval &interval);
/// handleLiveInRegister - Create interval for a livein register.
void handleLiveInRegister(MachineBasicBlock* mbb,
SlotIndex MIIdx,
LiveInterval &interval);
/// getReMatImplicitUse - If the remat definition MI has one (for now, we
/// only allow one) virtual register operand, then its uses are implicitly
/// using the register. Returns the virtual register.
unsigned getReMatImplicitUse(const LiveInterval &li,
MachineInstr *MI) const;
/// isValNoAvailableAt - Return true if the val# of the specified interval
/// which reaches the given instruction also reaches the specified use
/// index.
bool isValNoAvailableAt(const LiveInterval &li, MachineInstr *MI,
SlotIndex UseIdx) const;
/// isReMaterializable - Returns true if the definition MI of the specified
/// val# of the specified interval is re-materializable. Also returns true
/// by reference if the def is a load.
bool isReMaterializable(const LiveInterval &li, const VNInfo *ValNo,
MachineInstr *MI,
const SmallVectorImpl<LiveInterval*> *SpillIs,
bool &isLoad);
static LiveInterval* createInterval(unsigned Reg);
void printInstrs(raw_ostream &O) const;
void dumpInstrs() const;
void computeLiveInRegUnits();
void computeRegUnitInterval(LiveInterval*);
void computeVirtRegInterval(LiveInterval*);
class HMEditor;
};
} // End llvm namespace

View File

@ -55,29 +55,29 @@ class LiveRangeEdit {
};
private:
LiveInterval &parent_;
SmallVectorImpl<LiveInterval*> &newRegs_;
LiveInterval *Parent;
SmallVectorImpl<LiveInterval*> &NewRegs;
MachineRegisterInfo &MRI;
LiveIntervals &LIS;
VirtRegMap *VRM;
const TargetInstrInfo &TII;
Delegate *const delegate_;
Delegate *const TheDelegate;
/// firstNew_ - Index of the first register added to newRegs_.
const unsigned firstNew_;
/// FirstNew - Index of the first register added to NewRegs.
const unsigned FirstNew;
/// scannedRemattable_ - true when remattable values have been identified.
bool scannedRemattable_;
/// ScannedRemattable - true when remattable values have been identified.
bool ScannedRemattable;
/// remattable_ - Values defined by remattable instructions as identified by
/// Remattable - Values defined by remattable instructions as identified by
/// tii.isTriviallyReMaterializable().
SmallPtrSet<const VNInfo*,4> remattable_;
SmallPtrSet<const VNInfo*,4> Remattable;
/// rematted_ - Values that were actually rematted, and so need to have their
/// Rematted - Values that were actually rematted, and so need to have their
/// live range trimmed or entirely removed.
SmallPtrSet<const VNInfo*,4> rematted_;
SmallPtrSet<const VNInfo*,4> Rematted;
/// scanRemattable - Identify the parent_ values that may rematerialize.
/// scanRemattable - Identify the Parent values that may rematerialize.
void scanRemattable(AliasAnalysis *aa);
/// allUsesAvailableAt - Return true if all registers used by OrigMI at
@ -99,32 +99,35 @@ class LiveRangeEdit {
/// @param vrm Map of virtual registers to physical registers for this
/// function. If NULL, no virtual register map updates will
/// be done. This could be the case if called before Regalloc.
LiveRangeEdit(LiveInterval &parent,
LiveRangeEdit(LiveInterval *parent,
SmallVectorImpl<LiveInterval*> &newRegs,
MachineFunction &MF,
LiveIntervals &lis,
VirtRegMap *vrm,
Delegate *delegate = 0)
: parent_(parent), newRegs_(newRegs),
: Parent(parent), NewRegs(newRegs),
MRI(MF.getRegInfo()), LIS(lis), VRM(vrm),
TII(*MF.getTarget().getInstrInfo()),
delegate_(delegate),
firstNew_(newRegs.size()),
scannedRemattable_(false) {}
TheDelegate(delegate),
FirstNew(newRegs.size()),
ScannedRemattable(false) {}
LiveInterval &getParent() const { return parent_; }
unsigned getReg() const { return parent_.reg; }
LiveInterval &getParent() const {
assert(Parent && "No parent LiveInterval");
return *Parent;
}
unsigned getReg() const { return getParent().reg; }
/// Iterator for accessing the new registers added by this edit.
typedef SmallVectorImpl<LiveInterval*>::const_iterator iterator;
iterator begin() const { return newRegs_.begin()+firstNew_; }
iterator end() const { return newRegs_.end(); }
unsigned size() const { return newRegs_.size()-firstNew_; }
iterator begin() const { return NewRegs.begin()+FirstNew; }
iterator end() const { return NewRegs.end(); }
unsigned size() const { return NewRegs.size()-FirstNew; }
bool empty() const { return size() == 0; }
LiveInterval *get(unsigned idx) const { return newRegs_[idx+firstNew_]; }
LiveInterval *get(unsigned idx) const { return NewRegs[idx+FirstNew]; }
ArrayRef<LiveInterval*> regs() const {
return makeArrayRef(newRegs_).slice(firstNew_);
return makeArrayRef(NewRegs).slice(FirstNew);
}
/// createFrom - Create a new virtual register based on OldReg.
@ -174,12 +177,12 @@ class LiveRangeEdit {
/// markRematerialized - explicitly mark a value as rematerialized after doing
/// it manually.
void markRematerialized(const VNInfo *ParentVNI) {
rematted_.insert(ParentVNI);
Rematted.insert(ParentVNI);
}
/// didRematerialize - Return true if ParentVNI was rematerialized anywhere.
bool didRematerialize(const VNInfo *ParentVNI) const {
return rematted_.count(ParentVNI);
return Rematted.count(ParentVNI);
}
/// eraseVirtReg - Notify the delegate that Reg is no longer in use, and try

View File

@ -143,10 +143,7 @@ class MachineBasicBlock : public ilist_node<MachineBasicBlock> {
IterTy MII;
public:
bundle_iterator(IterTy mii) : MII(mii) {
assert(!MII->isInsideBundle() &&
"It's not legal to initialize bundle_iterator with a bundled MI");
}
bundle_iterator(IterTy mii) : MII(mii) {}
bundle_iterator(Ty &mi) : MII(mi) {
assert(!mi.isInsideBundle() &&
@ -156,7 +153,10 @@ class MachineBasicBlock : public ilist_node<MachineBasicBlock> {
assert((!mi || !mi->isInsideBundle()) &&
"It's not legal to initialize bundle_iterator with a bundled MI");
}
bundle_iterator(const bundle_iterator &I) : MII(I.MII) {}
// Template allows conversion from const to nonconst.
template<class OtherTy, class OtherIterTy>
bundle_iterator(const bundle_iterator<OtherTy, OtherIterTy> &I)
: MII(I.getInstrIterator()) {}
bundle_iterator() : MII(0) {}
Ty &operator*() const { return *MII; }
@ -173,29 +173,24 @@ class MachineBasicBlock : public ilist_node<MachineBasicBlock> {
// Increment and decrement operators...
bundle_iterator &operator--() { // predecrement - Back up
do {
--MII;
} while (MII->isInsideBundle());
do --MII;
while (MII->isInsideBundle());
return *this;
}
bundle_iterator &operator++() { // preincrement - Advance
do {
++MII;
} while (MII->isInsideBundle());
IterTy E = MII->getParent()->instr_end();
do ++MII;
while (MII != E && MII->isInsideBundle());
return *this;
}
bundle_iterator operator--(int) { // postdecrement operators...
bundle_iterator tmp = *this;
do {
--MII;
} while (MII->isInsideBundle());
--*this;
return tmp;
}
bundle_iterator operator++(int) { // postincrement operators...
bundle_iterator tmp = *this;
do {
++MII;
} while (MII->isInsideBundle());
++*this;
return tmp;
}
@ -235,42 +230,14 @@ class MachineBasicBlock : public ilist_node<MachineBasicBlock> {
reverse_instr_iterator instr_rend () { return Insts.rend(); }
const_reverse_instr_iterator instr_rend () const { return Insts.rend(); }
iterator begin() { return Insts.begin(); }
const_iterator begin() const { return Insts.begin(); }
iterator end() {
instr_iterator II = instr_end();
if (II != instr_begin()) {
while (II->isInsideBundle())
--II;
}
return II;
}
const_iterator end() const {
const_instr_iterator II = instr_end();
if (II != instr_begin()) {
while (II->isInsideBundle())
--II;
}
return II;
}
reverse_iterator rbegin() {
reverse_instr_iterator II = instr_rbegin();
if (II != instr_rend()) {
while (II->isInsideBundle())
++II;
}
return II;
}
const_reverse_iterator rbegin() const {
const_reverse_instr_iterator II = instr_rbegin();
if (II != instr_rend()) {
while (II->isInsideBundle())
++II;
}
return II;
}
reverse_iterator rend () { return Insts.rend(); }
const_reverse_iterator rend () const { return Insts.rend(); }
iterator begin() { return instr_begin(); }
const_iterator begin() const { return instr_begin(); }
iterator end () { return instr_end(); }
const_iterator end () const { return instr_end(); }
reverse_iterator rbegin() { return instr_rbegin(); }
const_reverse_iterator rbegin() const { return instr_rbegin(); }
reverse_iterator rend () { return instr_rend(); }
const_reverse_iterator rend () const { return instr_rend(); }
// Machine-CFG iterators
@ -412,6 +379,10 @@ class MachineBasicBlock : public ilist_node<MachineBasicBlock> {
/// which refer to fromMBB to refer to this.
void transferSuccessorsAndUpdatePHIs(MachineBasicBlock *fromMBB);
/// isPredecessor - Return true if the specified MBB is a predecessor of this
/// block.
bool isPredecessor(const MachineBasicBlock *MBB) const;
/// isSuccessor - Return true if the specified MBB is a successor of this
/// block.
bool isSuccessor(const MachineBasicBlock *MBB) const;

View File

@ -359,7 +359,7 @@ class MachineFrameInfo {
assert(unsigned(ObjectIdx+NumFixedObjects) < Objects.size() &&
"Invalid Object Idx!");
Objects[ObjectIdx+NumFixedObjects].Alignment = Align;
MaxAlignment = std::max(MaxAlignment, Align);
ensureMaxAlignment(Align);
}
/// NeedsStackProtector - Returns true if the object may need stack
@ -416,9 +416,11 @@ class MachineFrameInfo {
///
unsigned getMaxAlignment() const { return MaxAlignment; }
/// setMaxAlignment - Set the preferred alignment.
///
void setMaxAlignment(unsigned Align) { MaxAlignment = Align; }
/// ensureMaxAlignment - Make sure the function is at least Align bytes
/// aligned.
void ensureMaxAlignment(unsigned Align) {
if (MaxAlignment < Align) MaxAlignment = Align;
}
/// AdjustsStack - Return true if this function adjusts the stack -- e.g.,
/// when calling another function. This is only valid during and after
@ -485,7 +487,7 @@ class MachineFrameInfo {
Objects.push_back(StackObject(Size, Alignment, 0, false, isSS, MayNeedSP));
int Index = (int)Objects.size() - NumFixedObjects - 1;
assert(Index >= 0 && "Bad frame index!");
MaxAlignment = std::max(MaxAlignment, Alignment);
ensureMaxAlignment(Alignment);
return Index;
}
@ -496,7 +498,7 @@ class MachineFrameInfo {
int CreateSpillStackObject(uint64_t Size, unsigned Alignment) {
CreateStackObject(Size, Alignment, true, false);
int Index = (int)Objects.size() - NumFixedObjects - 1;
MaxAlignment = std::max(MaxAlignment, Alignment);
ensureMaxAlignment(Alignment);
return Index;
}
@ -515,7 +517,7 @@ class MachineFrameInfo {
int CreateVariableSizedObject(unsigned Alignment) {
HasVarSizedObjects = true;
Objects.push_back(StackObject(0, Alignment, 0, false, false, true));
MaxAlignment = std::max(MaxAlignment, Alignment);
ensureMaxAlignment(Alignment);
return (int)Objects.size()-NumFixedObjects-1;
}

View File

@ -189,8 +189,8 @@ class MachineFunction {
///
void setAlignment(unsigned A) { Alignment = A; }
/// EnsureAlignment - Make sure the function is at least 1 << A bytes aligned.
void EnsureAlignment(unsigned A) {
/// ensureAlignment - Make sure the function is at least 1 << A bytes aligned.
void ensureAlignment(unsigned A) {
if (Alignment < A) Alignment = A;
}

View File

@ -420,6 +420,12 @@ class MachineInstr : public ilist_node<MachineInstr> {
return hasProperty(MCID::Bitcast, Type);
}
/// isSelect - Return true if this instruction is a select instruction.
///
bool isSelect(QueryType Type = IgnoreBundle) const {
return hasProperty(MCID::Select, Type);
}
/// isNotDuplicable - Return true if this instruction cannot be safely
/// duplicated. For example, if the instruction has a unique labels attached
/// to it, duplicating it would cause multiple definition errors.
@ -635,6 +641,30 @@ class MachineInstr : public ilist_node<MachineInstr> {
getOperand(0).getSubReg() == getOperand(1).getSubReg();
}
/// isTransient - Return true if this is a transient instruction that is
/// either very likely to be eliminated during register allocation (such as
/// copy-like instructions), or if this instruction doesn't have an
/// execution-time cost.
bool isTransient() const {
switch(getOpcode()) {
default: return false;
// Copy-like instructions are usually eliminated during register allocation.
case TargetOpcode::PHI:
case TargetOpcode::COPY:
case TargetOpcode::INSERT_SUBREG:
case TargetOpcode::SUBREG_TO_REG:
case TargetOpcode::REG_SEQUENCE:
// Pseudo-instructions that don't produce any real output.
case TargetOpcode::IMPLICIT_DEF:
case TargetOpcode::KILL:
case TargetOpcode::PROLOG_LABEL:
case TargetOpcode::EH_LABEL:
case TargetOpcode::GC_LABEL:
case TargetOpcode::DBG_VALUE:
return true;
}
}
/// getBundleSize - Return the number of instructions inside the MI bundle.
unsigned getBundleSize() const;
@ -912,12 +942,12 @@ class MachineInstr : public ilist_node<MachineInstr> {
/// RemoveRegOperandsFromUseLists - Unlink all of the register operands in
/// this instruction from their respective use lists. This requires that the
/// operands already be on their use lists.
void RemoveRegOperandsFromUseLists();
void RemoveRegOperandsFromUseLists(MachineRegisterInfo&);
/// AddRegOperandsToUseLists - Add all of the register operands in
/// this instruction from their respective use lists. This requires that the
/// operands not be on their use lists yet.
void AddRegOperandsToUseLists(MachineRegisterInfo &RegInfo);
void AddRegOperandsToUseLists(MachineRegisterInfo&);
/// hasPropertyInBundle - Slow path for hasProperty when we're dealing with a
/// bundle.

View File

@ -34,6 +34,7 @@ namespace RegState {
Undef = 0x20,
EarlyClobber = 0x40,
Debug = 0x80,
InternalRead = 0x100,
DefineNoRead = Define | Undef,
ImplicitDefine = Implicit | Define,
ImplicitKill = Implicit | Kill
@ -67,7 +68,8 @@ class MachineInstrBuilder {
flags & RegState::Undef,
flags & RegState::EarlyClobber,
SubReg,
flags & RegState::Debug));
flags & RegState::Debug,
flags & RegState::InternalRead));
return *this;
}
@ -106,6 +108,12 @@ class MachineInstrBuilder {
return *this;
}
const MachineInstrBuilder &addTargetIndex(unsigned Idx, int64_t Offset = 0,
unsigned char TargetFlags = 0) const {
MI->addOperand(MachineOperand::CreateTargetIndex(Idx, Offset, TargetFlags));
return *this;
}
const MachineInstrBuilder &addJumpTableIndex(unsigned Idx,
unsigned char TargetFlags = 0) const {
MI->addOperand(MachineOperand::CreateJTI(Idx, TargetFlags));
@ -310,6 +318,9 @@ inline unsigned getDeadRegState(bool B) {
inline unsigned getUndefRegState(bool B) {
return B ? RegState::Undef : 0;
}
inline unsigned getInternalReadRegState(bool B) {
return B ? RegState::InternalRead : 0;
}
} // End llvm namespace

View File

@ -43,14 +43,14 @@ bool finalizeBundles(MachineFunction &MF);
/// getBundleStart - Returns the first instruction in the bundle containing MI.
///
static inline MachineInstr *getBundleStart(MachineInstr *MI) {
inline MachineInstr *getBundleStart(MachineInstr *MI) {
MachineBasicBlock::instr_iterator I = MI;
while (I->isInsideBundle())
--I;
return I;
}
static inline const MachineInstr *getBundleStart(const MachineInstr *MI) {
inline const MachineInstr *getBundleStart(const MachineInstr *MI) {
MachineBasicBlock::const_instr_iterator I = MI;
while (I->isInsideBundle())
--I;

View File

@ -10,9 +10,9 @@
// The MachineJumpTableInfo class keeps track of jump tables referenced by
// lowered switch instructions in the MachineFunction.
//
// Instructions reference the address of these jump tables through the use of
// MO_JumpTableIndex values. When emitting assembly or machine code, these
// virtual address references are converted to refer to the address of the
// Instructions reference the address of these jump tables through the use of
// MO_JumpTableIndex values. When emitting assembly or machine code, these
// virtual address references are converted to refer to the address of the
// function jump tables.
//
//===----------------------------------------------------------------------===//
@ -34,11 +34,11 @@ class raw_ostream;
struct MachineJumpTableEntry {
/// MBBs - The vector of basic blocks from which to create the jump table.
std::vector<MachineBasicBlock*> MBBs;
explicit MachineJumpTableEntry(const std::vector<MachineBasicBlock*> &M)
: MBBs(M) {}
};
class MachineJumpTableInfo {
public:
/// JTEntryKind - This enum indicates how each entry of the jump table is
@ -57,7 +57,7 @@ class MachineJumpTableInfo {
/// with a relocation as gp-relative, e.g.:
/// .gprel32 LBB123
EK_GPRel32BlockAddress,
/// EK_LabelDifference32 - Each entry is the address of the block minus
/// the address of the jump table. This is used for PIC jump tables where
/// gprel32 is not supported. e.g.:
@ -80,18 +80,18 @@ class MachineJumpTableInfo {
std::vector<MachineJumpTableEntry> JumpTables;
public:
explicit MachineJumpTableInfo(JTEntryKind Kind): EntryKind(Kind) {}
JTEntryKind getEntryKind() const { return EntryKind; }
/// getEntrySize - Return the size of each entry in the jump table.
unsigned getEntrySize(const TargetData &TD) const;
/// getEntryAlignment - Return the alignment of each entry in the jump table.
unsigned getEntryAlignment(const TargetData &TD) const;
/// createJumpTableIndex - Create a new jump table.
///
unsigned createJumpTableIndex(const std::vector<MachineBasicBlock*> &DestBBs);
/// isEmpty - Return true if there are no jump tables.
///
bool isEmpty() const { return JumpTables.empty(); }
@ -105,7 +105,7 @@ class MachineJumpTableInfo {
void RemoveJumpTable(unsigned Idx) {
JumpTables[Idx].MBBs.clear();
}
/// ReplaceMBBInJumpTables - If Old is the target of any jump tables, update
/// the jump tables to branch to New instead.
bool ReplaceMBBInJumpTables(MachineBasicBlock *Old, MachineBasicBlock *New);

View File

@ -7,7 +7,7 @@
//
//===----------------------------------------------------------------------===//
//
// This file defines the MachineLoopInfo class that is used to identify natural
// This file defines the MachineLoopInfo class that is used to identify natural
// loops and determine the loop depth of various nodes of the CFG. Note that
// natural loops may actually be several loops that share the same header node.
//
@ -35,6 +35,12 @@
namespace llvm {
// Implementation in LoopInfoImpl.h
#ifdef __GNUC__
class MachineLoop;
__extension__ extern template class LoopBase<MachineBasicBlock, MachineLoop>;
#endif
class MachineLoop : public LoopBase<MachineBasicBlock, MachineLoop> {
public:
MachineLoop();
@ -57,6 +63,12 @@ class MachineLoop : public LoopBase<MachineBasicBlock, MachineLoop> {
: LoopBase<MachineBasicBlock, MachineLoop>(MBB) {}
};
// Implementation in LoopInfoImpl.h
#ifdef __GNUC__
__extension__ extern template
class LoopInfoBase<MachineBasicBlock, MachineLoop>;
#endif
class MachineLoopInfo : public MachineFunctionPass {
LoopInfoBase<MachineBasicBlock, MachineLoop> LI;
friend class LoopBase<MachineBasicBlock, MachineLoop>;

View File

@ -14,6 +14,7 @@
#ifndef LLVM_CODEGEN_MACHINEOPERAND_H
#define LLVM_CODEGEN_MACHINEOPERAND_H
#include "llvm/ADT/Hashing.h"
#include "llvm/Support/DataTypes.h"
#include <cassert>
@ -44,6 +45,7 @@ class MachineOperand {
MO_MachineBasicBlock, ///< MachineBasicBlock reference
MO_FrameIndex, ///< Abstract Stack Frame Index
MO_ConstantPoolIndex, ///< Address of indexed Constant in Constant Pool
MO_TargetIndex, ///< Target-dependent index+offset operand.
MO_JumpTableIndex, ///< Address of indexed Jump Table for switch
MO_ExternalSymbol, ///< Name of external global symbol
MO_GlobalAddress, ///< Address of a global value
@ -148,7 +150,7 @@ class MachineOperand {
struct { // For MO_Register.
// Register number is in SmallContents.RegNo.
MachineOperand **Prev; // Access list for register.
MachineOperand *Prev; // Access list for register. See MRI.
MachineOperand *Next;
} Reg;
@ -214,6 +216,8 @@ class MachineOperand {
bool isFI() const { return OpKind == MO_FrameIndex; }
/// isCPI - Tests if this is a MO_ConstantPoolIndex operand.
bool isCPI() const { return OpKind == MO_ConstantPoolIndex; }
/// isTargetIndex - Tests if this is a MO_TargetIndex operand.
bool isTargetIndex() const { return OpKind == MO_TargetIndex; }
/// isJTI - Tests if this is a MO_JumpTableIndex operand.
bool isJTI() const { return OpKind == MO_JumpTableIndex; }
/// isGlobal - Tests if this is a MO_GlobalAddress operand.
@ -301,13 +305,6 @@ class MachineOperand {
return !isUndef() && !isInternalRead() && (isUse() || getSubReg());
}
/// getNextOperandForReg - Return the next MachineOperand in the function that
/// uses or defines this register.
MachineOperand *getNextOperandForReg() const {
assert(isReg() && "This is not a register operand!");
return Contents.Reg.Next;
}
//===--------------------------------------------------------------------===//
// Mutators for Register Operands
//===--------------------------------------------------------------------===//
@ -334,17 +331,9 @@ class MachineOperand {
///
void substPhysReg(unsigned Reg, const TargetRegisterInfo&);
void setIsUse(bool Val = true) {
assert(isReg() && "Wrong MachineOperand accessor");
assert((Val || !isDebug()) && "Marking a debug operation as def");
IsDef = !Val;
}
void setIsUse(bool Val = true) { setIsDef(!Val); }
void setIsDef(bool Val = true) {
assert(isReg() && "Wrong MachineOperand accessor");
assert((!Val || !isDebug()) && "Marking a debug operation as def");
IsDef = Val;
}
void setIsDef(bool Val = true);
void setImplicit(bool Val = true) {
assert(isReg() && "Wrong MachineOperand accessor");
@ -407,7 +396,7 @@ class MachineOperand {
}
int getIndex() const {
assert((isFI() || isCPI() || isJTI()) &&
assert((isFI() || isCPI() || isTargetIndex() || isJTI()) &&
"Wrong MachineOperand accessor");
return Contents.OffsetedInfo.Val.Index;
}
@ -430,8 +419,8 @@ class MachineOperand {
/// getOffset - Return the offset from the symbol in this operand. This always
/// returns 0 for ExternalSymbol operands.
int64_t getOffset() const {
assert((isGlobal() || isSymbol() || isCPI() || isBlockAddress()) &&
"Wrong MachineOperand accessor");
assert((isGlobal() || isSymbol() || isCPI() || isTargetIndex() ||
isBlockAddress()) && "Wrong MachineOperand accessor");
return (int64_t(Contents.OffsetedInfo.OffsetHi) << 32) |
SmallContents.OffsetLo;
}
@ -478,14 +467,14 @@ class MachineOperand {
}
void setOffset(int64_t Offset) {
assert((isGlobal() || isSymbol() || isCPI() || isBlockAddress()) &&
"Wrong MachineOperand accessor");
assert((isGlobal() || isSymbol() || isCPI() || isTargetIndex() ||
isBlockAddress()) && "Wrong MachineOperand accessor");
SmallContents.OffsetLo = unsigned(Offset);
Contents.OffsetedInfo.OffsetHi = int(Offset >> 32);
}
void setIndex(int Idx) {
assert((isFI() || isCPI() || isJTI()) &&
assert((isFI() || isCPI() || isTargetIndex() || isJTI()) &&
"Wrong MachineOperand accessor");
Contents.OffsetedInfo.Val.Index = Idx;
}
@ -503,6 +492,13 @@ class MachineOperand {
/// operand. Note: This method ignores isKill and isDead properties.
bool isIdenticalTo(const MachineOperand &Other) const;
/// \brief MachineOperand hash_value overload.
///
/// Note that this includes the same information in the hash that
/// isIdenticalTo uses for comparison. It is thus suited for use in hash
/// tables which use that function for equality comparisons only.
friend hash_code hash_value(const MachineOperand &MO);
/// ChangeToImmediate - Replace this operand with a new immediate operand of
/// the specified value. If an operand is known to be an immediate already,
/// the setImm method should be used.
@ -542,14 +538,15 @@ class MachineOperand {
bool isUndef = false,
bool isEarlyClobber = false,
unsigned SubReg = 0,
bool isDebug = false) {
bool isDebug = false,
bool isInternalRead = false) {
MachineOperand Op(MachineOperand::MO_Register);
Op.IsDef = isDef;
Op.IsImp = isImp;
Op.IsKill = isKill;
Op.IsDead = isDead;
Op.IsUndef = isUndef;
Op.IsInternalRead = false;
Op.IsInternalRead = isInternalRead;
Op.IsEarlyClobber = isEarlyClobber;
Op.IsDebug = isDebug;
Op.SmallContents.RegNo = Reg;
@ -578,6 +575,14 @@ class MachineOperand {
Op.setTargetFlags(TargetFlags);
return Op;
}
static MachineOperand CreateTargetIndex(unsigned Idx, int64_t Offset,
unsigned char TargetFlags = 0) {
MachineOperand Op(MachineOperand::MO_TargetIndex);
Op.setIndex(Idx);
Op.setOffset(Offset);
Op.setTargetFlags(TargetFlags);
return Op;
}
static MachineOperand CreateJTI(unsigned Idx,
unsigned char TargetFlags = 0) {
MachineOperand Op(MachineOperand::MO_JumpTableIndex);
@ -653,15 +658,6 @@ class MachineOperand {
assert(isReg() && "Can only add reg operand to use lists");
return Contents.Reg.Prev != 0;
}
/// AddRegOperandToRegInfo - Add this register operand to the specified
/// MachineRegisterInfo. If it is null, then the next/prev fields should be
/// explicitly nulled out.
void AddRegOperandToRegInfo(MachineRegisterInfo *RegInfo);
/// RemoveRegOperandFromRegInfo - Remove this register operand from the
/// MachineRegisterInfo it is linked with.
void RemoveRegOperandFromRegInfo();
};
inline raw_ostream &operator<<(raw_ostream &OS, const MachineOperand& MO) {

View File

@ -26,7 +26,7 @@ namespace llvm {
typedef void *(*MachinePassCtor)();
//===----------------------------------------------------------------------===//
//===----------------------------------------------------------------------===//
///
/// MachinePassRegistryListener - Listener to adds and removals of nodes in
/// registration list.
@ -42,7 +42,7 @@ class MachinePassRegistryListener {
};
//===----------------------------------------------------------------------===//
//===----------------------------------------------------------------------===//
///
/// MachinePassRegistryNode - Machine pass node stored in registration list.
///
@ -55,7 +55,7 @@ class MachinePassRegistryNode {
const char *Name; // Name of function pass.
const char *Description; // Description string.
MachinePassCtor Ctor; // Function pass creator.
public:
MachinePassRegistryNode(const char *N, const char *D, MachinePassCtor C)
@ -72,11 +72,11 @@ class MachinePassRegistryNode {
const char *getDescription() const { return Description; }
MachinePassCtor getCtor() const { return Ctor; }
void setNext(MachinePassRegistryNode *N) { Next = N; }
};
//===----------------------------------------------------------------------===//
//===----------------------------------------------------------------------===//
///
/// MachinePassRegistry - Track the registration of machine passes.
///
@ -88,7 +88,7 @@ class MachinePassRegistry {
MachinePassRegistryNode *List; // List of registry nodes.
MachinePassCtor Default; // Default function pass creator.
MachinePassRegistryListener* Listener;// Listener for list adds are removes.
public:
// NO CONSTRUCTOR - we don't want static constructor ordering to mess
@ -99,6 +99,7 @@ class MachinePassRegistry {
MachinePassRegistryNode *getList() { return List; }
MachinePassCtor getDefault() { return Default; }
void setDefault(MachinePassCtor C) { Default = C; }
void setDefault(StringRef Name);
void setListener(MachinePassRegistryListener *L) { Listener = L; }
/// Add - Adds a function pass to the registration list.
@ -126,7 +127,7 @@ class RegisterPassParser : public MachinePassRegistryListener,
void initialize(cl::Option &O) {
cl::parser<typename RegistryClass::FunctionPassCtor>::initialize(O);
// Add existing passes to option.
for (RegistryClass *Node = RegistryClass::getList();
Node; Node = Node->getNext()) {
@ -134,7 +135,7 @@ class RegisterPassParser : public MachinePassRegistryListener,
(typename RegistryClass::FunctionPassCtor)Node->getCtor(),
Node->getDescription());
}
// Make sure we listen for list changes.
RegistryClass::setListener(this);
}

View File

@ -57,6 +57,26 @@ class MachineRegisterInfo {
/// physical registers.
MachineOperand **PhysRegUseDefLists;
/// getRegUseDefListHead - Return the head pointer for the register use/def
/// list for the specified virtual or physical register.
MachineOperand *&getRegUseDefListHead(unsigned RegNo) {
if (TargetRegisterInfo::isVirtualRegister(RegNo))
return VRegInfo[RegNo].second;
return PhysRegUseDefLists[RegNo];
}
MachineOperand *getRegUseDefListHead(unsigned RegNo) const {
if (TargetRegisterInfo::isVirtualRegister(RegNo))
return VRegInfo[RegNo].second;
return PhysRegUseDefLists[RegNo];
}
/// Get the next element in the use-def chain.
static MachineOperand *getNextOperandForReg(const MachineOperand *MO) {
assert(MO && MO->isReg() && "This is not a register operand!");
return MO->Contents.Reg.Next;
}
/// UsedPhysRegs - This is a bit vector that is computed and set by the
/// register allocator, and must be kept up to date by passes that run after
/// register allocation (though most don't modify this). This is used
@ -129,12 +149,21 @@ class MachineRegisterInfo {
// Register Info
//===--------------------------------------------------------------------===//
// Strictly for use by MachineInstr.cpp.
void addRegOperandToUseList(MachineOperand *MO);
// Strictly for use by MachineInstr.cpp.
void removeRegOperandFromUseList(MachineOperand *MO);
/// reg_begin/reg_end - Provide iteration support to walk over all definitions
/// and uses of a register within the MachineFunction that corresponds to this
/// MachineRegisterInfo object.
template<bool Uses, bool Defs, bool SkipDebug>
class defusechain_iterator;
// Make it a friend so it can access getNextOperandForReg().
template<bool, bool, bool> friend class defusechain_iterator;
/// reg_iterator/reg_begin/reg_end - Walk all defs and uses of the specified
/// register.
typedef defusechain_iterator<true,true,false> reg_iterator;
@ -172,6 +201,15 @@ class MachineRegisterInfo {
/// specified register (it may be live-in).
bool def_empty(unsigned RegNo) const { return def_begin(RegNo) == def_end(); }
/// hasOneDef - Return true if there is exactly one instruction defining the
/// specified register.
bool hasOneDef(unsigned RegNo) const {
def_iterator DI = def_begin(RegNo);
if (DI == def_end())
return false;
return ++DI == def_end();
}
/// use_iterator/use_begin/use_end - Walk all uses of the specified register.
typedef defusechain_iterator<true,false,false> use_iterator;
use_iterator use_begin(unsigned RegNo) const {
@ -185,7 +223,12 @@ class MachineRegisterInfo {
/// hasOneUse - Return true if there is exactly one instruction using the
/// specified register.
bool hasOneUse(unsigned RegNo) const;
bool hasOneUse(unsigned RegNo) const {
use_iterator UI = use_begin(RegNo);
if (UI == use_end())
return false;
return ++UI == use_end();
}
/// use_nodbg_iterator/use_nodbg_begin/use_nodbg_end - Walk all uses of the
/// specified register, skipping those marked as Debug.
@ -218,25 +261,16 @@ class MachineRegisterInfo {
/// constraints.
void replaceRegWith(unsigned FromReg, unsigned ToReg);
/// getRegUseDefListHead - Return the head pointer for the register use/def
/// list for the specified virtual or physical register.
MachineOperand *&getRegUseDefListHead(unsigned RegNo) {
if (TargetRegisterInfo::isVirtualRegister(RegNo))
return VRegInfo[RegNo].second;
return PhysRegUseDefLists[RegNo];
}
MachineOperand *getRegUseDefListHead(unsigned RegNo) const {
if (TargetRegisterInfo::isVirtualRegister(RegNo))
return VRegInfo[RegNo].second;
return PhysRegUseDefLists[RegNo];
}
/// getVRegDef - Return the machine instr that defines the specified virtual
/// register or null if none is found. This assumes that the code is in SSA
/// form, so there should only be one definition.
MachineInstr *getVRegDef(unsigned Reg) const;
/// getUniqueVRegDef - Return the unique machine instr that defines the
/// specified virtual register or null if none is found. If there are
/// multiple definitions or no definition, return null.
MachineInstr *getUniqueVRegDef(unsigned Reg) const;
/// clearKillFlags - Iterate over all the uses of the given register and
/// clear the kill flag from the MachineOperand. This function is used by
/// optimization passes which extend register lifetimes and need only
@ -336,7 +370,7 @@ class MachineRegisterInfo {
bool isPhysRegOrOverlapUsed(unsigned Reg) const {
if (UsedPhysRegMask.test(Reg))
return true;
for (const uint16_t *AI = TRI->getOverlaps(Reg); *AI; ++AI)
for (MCRegAliasIterator AI(Reg, TRI, true); AI.isValid(); ++AI)
if (UsedPhysRegs.test(*AI))
return true;
return false;
@ -434,10 +468,6 @@ class MachineRegisterInfo {
const TargetRegisterInfo &TRI,
const TargetInstrInfo &TII);
private:
void HandleVRegListReallocation();
public:
/// defusechain_iterator - This class provides iterator support for machine
/// operands in the function that use or define a specific register. If
/// ReturnUses is true it returns uses of registers, if ReturnDefs is true it
@ -481,13 +511,22 @@ class MachineRegisterInfo {
// Iterator traversal: forward iteration only
defusechain_iterator &operator++() { // Preincrement
assert(Op && "Cannot increment end iterator!");
Op = Op->getNextOperandForReg();
Op = getNextOperandForReg(Op);
// If this is an operand we don't care about, skip it.
while (Op && ((!ReturnUses && Op->isUse()) ||
(!ReturnDefs && Op->isDef()) ||
(SkipDebug && Op->isDebug())))
Op = Op->getNextOperandForReg();
// All defs come before the uses, so stop def_iterator early.
if (!ReturnUses) {
if (Op) {
if (Op->isUse())
Op = 0;
else
assert(!Op->isDebug() && "Can't have debug defs");
}
} else {
// If this is an operand we don't care about, skip it.
while (Op && ((!ReturnDefs && Op->isDef()) ||
(SkipDebug && Op->isDebug())))
Op = getNextOperandForReg(Op);
}
return *this;
}

View File

@ -19,7 +19,7 @@
// createCustomMachineSched);
//
// Inside <Target>PassConfig:
// enablePass(MachineSchedulerID);
// enablePass(&MachineSchedulerID);
// MachineSchedRegistry::setDefault(createCustomMachineSched);
//
//===----------------------------------------------------------------------===//
@ -35,6 +35,7 @@ class AliasAnalysis;
class LiveIntervals;
class MachineDominatorTree;
class MachineLoopInfo;
class RegisterClassInfo;
class ScheduleDAGInstrs;
/// MachineSchedContext provides enough context from the MachineScheduler pass
@ -47,7 +48,10 @@ struct MachineSchedContext {
AliasAnalysis *AA;
LiveIntervals *LIS;
MachineSchedContext(): MF(0), MLI(0), MDT(0), PassConfig(0), AA(0), LIS(0) {}
RegisterClassInfo *RegClassInfo;
MachineSchedContext();
virtual ~MachineSchedContext();
};
/// MachineSchedRegistry provides a selection of available machine instruction
@ -81,6 +85,9 @@ class MachineSchedRegistry : public MachinePassRegistryNode {
static void setDefault(ScheduleDAGCtor C) {
Registry.setDefault((MachinePassCtor)C);
}
static void setDefault(StringRef Name) {
Registry.setDefault(Name);
}
static void setListener(MachinePassRegistryListener *L) {
Registry.setListener(L);
}

View File

@ -24,6 +24,7 @@ namespace llvm {
class FunctionPass;
class MachineFunctionPass;
class PassInfo;
class PassManagerBase;
class TargetLowering;
class TargetRegisterClass;
class raw_ostream;
@ -31,8 +32,6 @@ namespace llvm {
namespace llvm {
extern char &NoPassID; // Allow targets to choose not to run a pass.
class PassConfigImpl;
/// Target-Independent Code Generator Pass Configuration Options.
@ -54,9 +53,15 @@ class TargetPassConfig : public ImmutablePass {
/// optimization after regalloc.
static char PostRAMachineLICMID;
private:
PassManagerBase *PM;
AnalysisID StartAfter;
AnalysisID StopAfter;
bool Started;
bool Stopped;
protected:
TargetMachine *TM;
PassManagerBase *PM;
PassConfigImpl *Impl; // Internal data structures
bool Initialized; // Flagged after all passes are configured.
@ -91,6 +96,18 @@ class TargetPassConfig : public ImmutablePass {
CodeGenOpt::Level getOptLevel() const { return TM->getOptLevel(); }
/// setStartStopPasses - Set the StartAfter and StopAfter passes to allow
/// running only a portion of the normal code-gen pass sequence. If the
/// Start pass ID is zero, then compilation will begin at the normal point;
/// otherwise, clear the Started flag to indicate that passes should not be
/// added until the starting pass is seen. If the Stop pass ID is zero,
/// then compilation will continue to the end.
void setStartStopPasses(AnalysisID Start, AnalysisID Stop) {
StartAfter = Start;
StopAfter = Stop;
Started = (StartAfter == 0);
}
void setDisableVerify(bool Disable) { setOpt(DisableVerify, Disable); }
bool getEnableTailMerge() const { return EnableTailMerge; }
@ -98,16 +115,19 @@ class TargetPassConfig : public ImmutablePass {
/// Allow the target to override a specific pass without overriding the pass
/// pipeline. When passes are added to the standard pipeline at the
/// point where StadardID is expected, add TargetID in its place.
void substitutePass(char &StandardID, char &TargetID);
/// point where StandardID is expected, add TargetID in its place.
void substitutePass(AnalysisID StandardID, AnalysisID TargetID);
/// Insert InsertedPassID pass after TargetPassID pass.
void insertPass(AnalysisID TargetPassID, AnalysisID InsertedPassID);
/// Allow the target to enable a specific standard pass by default.
void enablePass(char &ID) { substitutePass(ID, ID); }
void enablePass(AnalysisID PassID) { substitutePass(PassID, PassID); }
/// Allow the target to disable a specific standard pass by default.
void disablePass(char &ID) { substitutePass(ID, NoPassID); }
void disablePass(AnalysisID PassID) { substitutePass(PassID, 0); }
/// Return the pass ssubtituted for StandardID by the target.
/// Return the pass substituted for StandardID by the target.
/// If no substitution exists, return StandardID.
AnalysisID getPassSubstitution(AnalysisID StandardID) const;
@ -118,6 +138,9 @@ class TargetPassConfig : public ImmutablePass {
/// transforms following machine independent optimization.
virtual void addIRPasses();
/// Add passes to lower exception handling for the code generator.
void addPassesToHandleExceptions();
/// Add common passes that perform LLVM IR to IR transforms in preparation for
/// instruction selection.
virtual void addISelPrepare();
@ -172,6 +195,18 @@ class TargetPassConfig : public ImmutablePass {
/// LLVMTargetMachine provides standard regalloc passes for most targets.
virtual void addOptimizedRegAlloc(FunctionPass *RegAllocPass);
/// addPreRewrite - Add passes to the optimized register allocation pipeline
/// after register allocation is complete, but before virtual registers are
/// rewritten to physical registers.
///
/// These passes must preserve VirtRegMap and LiveIntervals, and when running
/// after RABasic or RAGreedy, they should take advantage of LiveRegMatrix.
/// When these passes run, VirtRegMap contains legal physreg assignments for
/// all virtual registers.
virtual bool addPreRewrite() {
return false;
}
/// addFinalizeRegAlloc - This method may be implemented by targets that want
/// to run passes within the regalloc pipeline, immediately after the register
/// allocation pass itself. These passes run as soon as virtual regisiters
@ -216,8 +251,12 @@ class TargetPassConfig : public ImmutablePass {
///
/// Add a CodeGen pass at this point in the pipeline after checking overrides.
/// Return the pass that was added, or NoPassID.
AnalysisID addPass(char &ID);
/// Return the pass that was added, or zero if no pass was added.
AnalysisID addPass(AnalysisID PassID);
/// Add a pass to the PassManager if that pass is supposed to be run, as
/// determined by the StartAfter and StopAfter options.
void addPass(Pass *P);
/// addMachinePasses helper to create the target-selected or overriden
/// regalloc pass.
@ -226,7 +265,7 @@ class TargetPassConfig : public ImmutablePass {
/// printAndVerify - Add a pass to dump then verify the machine function, if
/// those steps are enabled.
///
void printAndVerify(const char *Banner) const;
void printAndVerify(const char *Banner);
};
} // namespace llvm
@ -276,6 +315,10 @@ namespace llvm {
/// This pass is still in development
extern char &StrongPHIEliminationID;
/// LiveIntervals - This analysis keeps track of the live ranges of virtual
/// and physical registers.
extern char &LiveIntervalsID;
/// LiveStacks pass. An analysis keeping track of the liveness of stack slots.
extern char &LiveStacksID;
@ -297,6 +340,10 @@ namespace llvm {
/// basic blocks.
extern char &SpillPlacementID;
/// VirtRegRewriter pass. Rewrite virtual registers to physical registers as
/// assigned in VirtRegMap.
extern char &VirtRegRewriterID;
/// UnreachableMachineBlockElimination - This pass removes unreachable
/// machine basic blocks.
extern char &UnreachableMachineBlockElimID;
@ -342,10 +389,21 @@ namespace llvm {
/// branches.
extern char &BranchFolderPassID;
/// MachineFunctionPrinterPass - This pass prints out MachineInstr's.
extern char &MachineFunctionPrinterPassID;
/// TailDuplicate - Duplicate blocks with unconditional branches
/// into tails of their predecessors.
extern char &TailDuplicateID;
/// MachineTraceMetrics - This pass computes critical path and CPU resource
/// usage in an ensemble of traces.
extern char &MachineTraceMetricsID;
/// EarlyIfConverter - This pass performs if-conversion on SSA form by
/// inserting cmov instructions.
extern char &EarlyIfConverterID;
/// IfConverter - This pass performs machine code if conversion.
extern char &IfConverterID;

View File

@ -1,51 +0,0 @@
//===-------------- llvm/CodeGen/ProcessImplicitDefs.h ----------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CODEGEN_PROCESSIMPLICITDEFS_H
#define LLVM_CODEGEN_PROCESSIMPLICITDEFS_H
#include "llvm/CodeGen/MachineFunctionPass.h"
#include "llvm/ADT/SmallSet.h"
namespace llvm {
class MachineInstr;
class TargetInstrInfo;
class TargetRegisterInfo;
class MachineRegisterInfo;
class LiveVariables;
/// Process IMPLICIT_DEF instructions and make sure there is one implicit_def
/// for each use. Add isUndef marker to implicit_def defs and their uses.
class ProcessImplicitDefs : public MachineFunctionPass {
const TargetInstrInfo *TII;
const TargetRegisterInfo *TRI;
MachineRegisterInfo *MRI;
LiveVariables *LV;
bool CanTurnIntoImplicitDef(MachineInstr *MI, unsigned Reg,
unsigned OpIdx,
SmallSet<unsigned, 8> &ImpDefRegs);
public:
static char ID;
ProcessImplicitDefs() : MachineFunctionPass(ID) {
initializeProcessImplicitDefsPass(*PassRegistry::getPassRegistry());
}
virtual void getAnalysisUsage(AnalysisUsage &au) const;
virtual bool runOnMachineFunction(MachineFunction &fn);
};
}
#endif // LLVM_CODEGEN_PROCESSIMPLICITDEFS_H

View File

@ -0,0 +1,282 @@
//===-- RegisterPressure.h - Dynamic Register Pressure -*- C++ -*-------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file defines the RegisterPressure class which can be used to track
// MachineInstr level register pressure.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CODEGEN_REGISTERPRESSURE_H
#define LLVM_CODEGEN_REGISTERPRESSURE_H
#include "llvm/CodeGen/SlotIndexes.h"
#include "llvm/Target/TargetRegisterInfo.h"
#include "llvm/ADT/SparseSet.h"
namespace llvm {
class LiveIntervals;
class RegisterClassInfo;
class MachineInstr;
/// Base class for register pressure results.
struct RegisterPressure {
/// Map of max reg pressure indexed by pressure set ID, not class ID.
std::vector<unsigned> MaxSetPressure;
/// List of live in registers.
SmallVector<unsigned,8> LiveInRegs;
SmallVector<unsigned,8> LiveOutRegs;
/// Increase register pressure for each pressure set impacted by this register
/// class. Normally called by RegPressureTracker, but may be called manually
/// to account for live through (global liveness).
void increase(const TargetRegisterClass *RC, const TargetRegisterInfo *TRI);
/// Decrease register pressure for each pressure set impacted by this register
/// class. This is only useful to account for spilling or rematerialization.
void decrease(const TargetRegisterClass *RC, const TargetRegisterInfo *TRI);
void dump(const TargetRegisterInfo *TRI);
};
/// RegisterPressure computed within a region of instructions delimited by
/// TopIdx and BottomIdx. During pressure computation, the maximum pressure per
/// register pressure set is increased. Once pressure within a region is fully
/// computed, the live-in and live-out sets are recorded.
///
/// This is preferable to RegionPressure when LiveIntervals are available,
/// because delimiting regions by SlotIndex is more robust and convenient than
/// holding block iterators. The block contents can change without invalidating
/// the pressure result.
struct IntervalPressure : RegisterPressure {
/// Record the boundary of the region being tracked.
SlotIndex TopIdx;
SlotIndex BottomIdx;
void reset();
void openTop(SlotIndex NextTop);
void openBottom(SlotIndex PrevBottom);
};
/// RegisterPressure computed within a region of instructions delimited by
/// TopPos and BottomPos. This is a less precise version of IntervalPressure for
/// use when LiveIntervals are unavailable.
struct RegionPressure : RegisterPressure {
/// Record the boundary of the region being tracked.
MachineBasicBlock::const_iterator TopPos;
MachineBasicBlock::const_iterator BottomPos;
void reset();
void openTop(MachineBasicBlock::const_iterator PrevTop);
void openBottom(MachineBasicBlock::const_iterator PrevBottom);
};
/// An element of pressure difference that identifies the pressure set and
/// amount of increase or decrease in units of pressure.
struct PressureElement {
unsigned PSetID;
int UnitIncrease;
PressureElement(): PSetID(~0U), UnitIncrease(0) {}
PressureElement(unsigned id, int inc): PSetID(id), UnitIncrease(inc) {}
bool isValid() const { return PSetID != ~0U; }
};
/// Store the effects of a change in pressure on things that MI scheduler cares
/// about.
///
/// Excess records the value of the largest difference in register units beyond
/// the target's pressure limits across the affected pressure sets, where
/// largest is defined as the absolute value of the difference. Negative
/// ExcessUnits indicates a reduction in pressure that had already exceeded the
/// target's limits.
///
/// CriticalMax records the largest increase in the tracker's max pressure that
/// exceeds the critical limit for some pressure set determined by the client.
///
/// CurrentMax records the largest increase in the tracker's max pressure that
/// exceeds the current limit for some pressure set determined by the client.
struct RegPressureDelta {
PressureElement Excess;
PressureElement CriticalMax;
PressureElement CurrentMax;
RegPressureDelta() {}
};
/// Track the current register pressure at some position in the instruction
/// stream, and remember the high water mark within the region traversed. This
/// does not automatically consider live-through ranges. The client may
/// independently adjust for global liveness.
///
/// Each RegPressureTracker only works within a MachineBasicBlock. Pressure can
/// be tracked across a larger region by storing a RegisterPressure result at
/// each block boundary and explicitly adjusting pressure to account for block
/// live-in and live-out register sets.
///
/// RegPressureTracker holds a reference to a RegisterPressure result that it
/// computes incrementally. During downward tracking, P.BottomIdx or P.BottomPos
/// is invalid until it reaches the end of the block or closeRegion() is
/// explicitly called. Similarly, P.TopIdx is invalid during upward
/// tracking. Changing direction has the side effect of closing region, and
/// traversing past TopIdx or BottomIdx reopens it.
class RegPressureTracker {
const MachineFunction *MF;
const TargetRegisterInfo *TRI;
const RegisterClassInfo *RCI;
const MachineRegisterInfo *MRI;
const LiveIntervals *LIS;
/// We currently only allow pressure tracking within a block.
const MachineBasicBlock *MBB;
/// Track the max pressure within the region traversed so far.
RegisterPressure &P;
/// Run in two modes dependending on whether constructed with IntervalPressure
/// or RegisterPressure. If requireIntervals is false, LIS are ignored.
bool RequireIntervals;
/// Register pressure corresponds to liveness before this instruction
/// iterator. It may point to the end of the block rather than an instruction.
MachineBasicBlock::const_iterator CurrPos;
/// Pressure map indexed by pressure set ID, not class ID.
std::vector<unsigned> CurrSetPressure;
/// List of live registers.
SparseSet<unsigned> LivePhysRegs;
SparseSet<unsigned, VirtReg2IndexFunctor> LiveVirtRegs;
public:
RegPressureTracker(IntervalPressure &rp) :
MF(0), TRI(0), RCI(0), LIS(0), MBB(0), P(rp), RequireIntervals(true) {}
RegPressureTracker(RegionPressure &rp) :
MF(0), TRI(0), RCI(0), LIS(0), MBB(0), P(rp), RequireIntervals(false) {}
void init(const MachineFunction *mf, const RegisterClassInfo *rci,
const LiveIntervals *lis, const MachineBasicBlock *mbb,
MachineBasicBlock::const_iterator pos);
/// Force liveness of registers. Particularly useful to initialize the
/// livein/out state of the tracker before the first call to advance/recede.
void addLiveRegs(ArrayRef<unsigned> Regs);
/// Get the MI position corresponding to this register pressure.
MachineBasicBlock::const_iterator getPos() const { return CurrPos; }
// Reset the MI position corresponding to the register pressure. This allows
// schedulers to move instructions above the RegPressureTracker's
// CurrPos. Since the pressure is computed before CurrPos, the iterator
// position changes while pressure does not.
void setPos(MachineBasicBlock::const_iterator Pos) { CurrPos = Pos; }
/// Recede across the previous instruction.
bool recede();
/// Advance across the current instruction.
bool advance();
/// Finalize the region boundaries and recored live ins and live outs.
void closeRegion();
/// Get the resulting register pressure over the traversed region.
/// This result is complete if either advance() or recede() has returned true,
/// or if closeRegion() was explicitly invoked.
RegisterPressure &getPressure() { return P; }
/// Get the register set pressure at the current position, which may be less
/// than the pressure across the traversed region.
std::vector<unsigned> &getRegSetPressureAtPos() { return CurrSetPressure; }
void discoverPhysLiveIn(unsigned Reg);
void discoverPhysLiveOut(unsigned Reg);
void discoverVirtLiveIn(unsigned Reg);
void discoverVirtLiveOut(unsigned Reg);
bool isTopClosed() const;
bool isBottomClosed() const;
void closeTop();
void closeBottom();
/// Consider the pressure increase caused by traversing this instruction
/// bottom-up. Find the pressure set with the most change beyond its pressure
/// limit based on the tracker's current pressure, and record the number of
/// excess register units of that pressure set introduced by this instruction.
void getMaxUpwardPressureDelta(const MachineInstr *MI,
RegPressureDelta &Delta,
ArrayRef<PressureElement> CriticalPSets,
ArrayRef<unsigned> MaxPressureLimit);
/// Consider the pressure increase caused by traversing this instruction
/// top-down. Find the pressure set with the most change beyond its pressure
/// limit based on the tracker's current pressure, and record the number of
/// excess register units of that pressure set introduced by this instruction.
void getMaxDownwardPressureDelta(const MachineInstr *MI,
RegPressureDelta &Delta,
ArrayRef<PressureElement> CriticalPSets,
ArrayRef<unsigned> MaxPressureLimit);
/// Find the pressure set with the most change beyond its pressure limit after
/// traversing this instruction either upward or downward depending on the
/// closed end of the current region.
void getMaxPressureDelta(const MachineInstr *MI, RegPressureDelta &Delta,
ArrayRef<PressureElement> CriticalPSets,
ArrayRef<unsigned> MaxPressureLimit) {
if (isTopClosed())
return getMaxDownwardPressureDelta(MI, Delta, CriticalPSets,
MaxPressureLimit);
assert(isBottomClosed() && "Uninitialized pressure tracker");
return getMaxUpwardPressureDelta(MI, Delta, CriticalPSets,
MaxPressureLimit);
}
/// Get the pressure of each PSet after traversing this instruction bottom-up.
void getUpwardPressure(const MachineInstr *MI,
std::vector<unsigned> &PressureResult,
std::vector<unsigned> &MaxPressureResult);
/// Get the pressure of each PSet after traversing this instruction top-down.
void getDownwardPressure(const MachineInstr *MI,
std::vector<unsigned> &PressureResult,
std::vector<unsigned> &MaxPressureResult);
void getPressureAfterInst(const MachineInstr *MI,
std::vector<unsigned> &PressureResult,
std::vector<unsigned> &MaxPressureResult) {
if (isTopClosed())
return getUpwardPressure(MI, PressureResult, MaxPressureResult);
assert(isBottomClosed() && "Uninitialized pressure tracker");
return getDownwardPressure(MI, PressureResult, MaxPressureResult);
}
protected:
void increasePhysRegPressure(ArrayRef<unsigned> Regs);
void decreasePhysRegPressure(ArrayRef<unsigned> Regs);
void increaseVirtRegPressure(ArrayRef<unsigned> Regs);
void decreaseVirtRegPressure(ArrayRef<unsigned> Regs);
void bumpUpwardPressure(const MachineInstr *MI);
void bumpDownwardPressure(const MachineInstr *MI);
};
} // end namespace llvm
#endif

View File

@ -117,8 +117,9 @@ namespace llvm {
}
}
bool operator==(const SDep &Other) const {
if (Dep != Other.Dep || Latency != Other.Latency) return false;
/// Return true if the specified SDep is equivalent except for latency.
bool overlaps(const SDep &Other) const {
if (Dep != Other.Dep) return false;
switch (Dep.getInt()) {
case Data:
case Anti:
@ -133,6 +134,10 @@ namespace llvm {
llvm_unreachable("Invalid dependency kind!");
}
bool operator==(const SDep &Other) const {
return overlaps(Other) && Latency == Other.Latency;
}
bool operator!=(const SDep &Other) const {
return !operator==(Other);
}
@ -272,6 +277,9 @@ namespace llvm {
unsigned Depth; // Node depth.
unsigned Height; // Node height.
public:
unsigned TopReadyCycle; // Cycle relative to start when node is ready.
unsigned BotReadyCycle; // Cycle relative to end when node is ready.
const TargetRegisterClass *CopyDstRC; // Is a special copy node if not null.
const TargetRegisterClass *CopySrcRC;
@ -287,7 +295,7 @@ namespace llvm {
isScheduleHigh(false), isScheduleLow(false), isCloned(false),
SchedulingPref(Sched::None),
isDepthCurrent(false), isHeightCurrent(false), Depth(0), Height(0),
CopyDstRC(NULL), CopySrcRC(NULL) {}
TopReadyCycle(0), BotReadyCycle(0), CopyDstRC(NULL), CopySrcRC(NULL) {}
/// SUnit - Construct an SUnit for post-regalloc scheduling to represent
/// a MachineInstr.
@ -301,7 +309,7 @@ namespace llvm {
isScheduleHigh(false), isScheduleLow(false), isCloned(false),
SchedulingPref(Sched::None),
isDepthCurrent(false), isHeightCurrent(false), Depth(0), Height(0),
CopyDstRC(NULL), CopySrcRC(NULL) {}
TopReadyCycle(0), BotReadyCycle(0), CopyDstRC(NULL), CopySrcRC(NULL) {}
/// SUnit - Construct a placeholder SUnit.
SUnit()
@ -314,7 +322,7 @@ namespace llvm {
isScheduleHigh(false), isScheduleLow(false), isCloned(false),
SchedulingPref(Sched::None),
isDepthCurrent(false), isHeightCurrent(false), Depth(0), Height(0),
CopyDstRC(NULL), CopySrcRC(NULL) {}
TopReadyCycle(0), BotReadyCycle(0), CopyDstRC(NULL), CopySrcRC(NULL) {}
/// setNode - Assign the representative SDNode for this SUnit.
/// This may be used during pre-regalloc scheduling.
@ -552,12 +560,6 @@ namespace llvm {
///
virtual void computeLatency(SUnit *SU) = 0;
/// ComputeOperandLatency - Override dependence edge latency using
/// operand use/def information
///
virtual void computeOperandLatency(SUnit *, SUnit *,
SDep&) const { }
/// ForceUnitLatencies - Return true if all scheduling edges should be given
/// a latency value of one. The default is to return false; schedulers may
/// override this as needed.

View File

@ -28,6 +28,7 @@ namespace llvm {
class MachineLoopInfo;
class MachineDominatorTree;
class LiveIntervals;
class RegPressureTracker;
/// LoopDependencies - This class analyzes loop-oriented register
/// dependencies, which are used to guide scheduling decisions.
@ -35,7 +36,6 @@ namespace llvm {
/// scheduled as soon as possible after the variable's last use.
///
class LoopDependencies {
const MachineLoopInfo &MLI;
const MachineDominatorTree &MDT;
public:
@ -43,9 +43,7 @@ namespace llvm {
LoopDeps;
LoopDeps Deps;
LoopDependencies(const MachineLoopInfo &mli,
const MachineDominatorTree &mdt) :
MLI(mli), MDT(mdt) {}
LoopDependencies(const MachineDominatorTree &mdt) : MDT(mdt) {}
/// VisitLoop - Clear out any previous state and analyze the given loop.
///
@ -105,7 +103,7 @@ namespace llvm {
VReg2SUnit(unsigned reg, SUnit *su): VirtReg(reg), SU(su) {}
unsigned getSparseSetKey() const {
unsigned getSparseSetIndex() const {
return TargetRegisterInfo::virtReg2Index(VirtReg);
}
};
@ -160,7 +158,7 @@ namespace llvm {
/// compares ValueT's, only unsigned keys. This allows the set to be cleared
/// between scheduling regions in constant time as long as ValueT does not
/// require a destructor.
typedef SparseSet<VReg2SUnit> VReg2SUnitMap;
typedef SparseSet<VReg2SUnit, VirtReg2IndexFunctor> VReg2SUnitMap;
/// ScheduleDAGInstrs - A ScheduleDAG subclass for scheduling lists of
/// MachineInstrs.
@ -229,7 +227,7 @@ namespace llvm {
///
LoopDependencies LoopRegs;
/// DbgValues - Remember instruction that preceeds DBG_VALUE.
/// DbgValues - Remember instruction that precedes DBG_VALUE.
/// These are generated by buildSchedGraph but persist so they can be
/// referenced when emitting the final schedule.
typedef std::vector<std::pair<MachineInstr *, MachineInstr *> >
@ -275,7 +273,7 @@ namespace llvm {
/// buildSchedGraph - Build SUnits from the MachineBasicBlock that we are
/// input.
void buildSchedGraph(AliasAnalysis *AA);
void buildSchedGraph(AliasAnalysis *AA, RegPressureTracker *RPTracker = 0);
/// addSchedBarrierDeps - Add dependencies from instructions in the current
/// list of instructions being scheduled to scheduling barrier. We want to
@ -290,11 +288,15 @@ namespace llvm {
///
virtual void computeLatency(SUnit *SU);
/// computeOperandLatency - Override dependence edge latency using
/// computeOperandLatency - Return dependence edge latency using
/// operand use/def information
///
virtual void computeOperandLatency(SUnit *Def, SUnit *Use,
SDep& dep) const;
/// FindMin may be set to get the minimum vs. expected latency. Minimum
/// latency is used for scheduling groups, while expected latency is for
/// instruction cost and critical path.
virtual unsigned computeOperandLatency(SUnit *Def, SUnit *Use,
const SDep& dep,
bool FindMin = false) const;
/// schedule - Order nodes according to selected style, filling
/// in the Sequence member.
@ -321,10 +323,6 @@ namespace llvm {
void addPhysRegDeps(SUnit *SU, unsigned OperIdx);
void addVRegDefDeps(SUnit *SU, unsigned OperIdx);
void addVRegUseDeps(SUnit *SU, unsigned OperIdx);
VReg2SUnitMap::iterator findVRegDef(unsigned VirtReg) {
return VRegDefs.find(TargetRegisterInfo::virtReg2Index(VirtReg));
}
};
/// newSUnit - Creates a new SUnit and return a ptr to it.

View File

@ -46,6 +46,8 @@ class ScheduleHazardRecognizer {
/// atIssueLimit - Return true if no more instructions may be issued in this
/// cycle.
///
/// FIXME: remove this once MachineScheduler is the only client.
virtual bool atIssueLimit() const { return false; }
/// getHazardType - Return the hazard type of emitting this node. There are
@ -55,7 +57,7 @@ class ScheduleHazardRecognizer {
/// other instruction is available, issue it first.
/// * NoopHazard: issuing this instruction would break the program. If
/// some other instruction can be issued, do so, otherwise issue a noop.
virtual HazardType getHazardType(SUnit *m, int Stalls) {
virtual HazardType getHazardType(SUnit *m, int Stalls = 0) {
return NoHazard;
}

View File

@ -177,6 +177,44 @@ class SelectionDAG {
/// DbgInfo - Tracks dbg_value information through SDISel.
SDDbgInfo *DbgInfo;
public:
/// DAGUpdateListener - Clients of various APIs that cause global effects on
/// the DAG can optionally implement this interface. This allows the clients
/// to handle the various sorts of updates that happen.
///
/// A DAGUpdateListener automatically registers itself with DAG when it is
/// constructed, and removes itself when destroyed in RAII fashion.
struct DAGUpdateListener {
DAGUpdateListener *const Next;
SelectionDAG &DAG;
explicit DAGUpdateListener(SelectionDAG &D)
: Next(D.UpdateListeners), DAG(D) {
DAG.UpdateListeners = this;
}
virtual ~DAGUpdateListener() {
assert(DAG.UpdateListeners == this &&
"DAGUpdateListeners must be destroyed in LIFO order");
DAG.UpdateListeners = Next;
}
/// NodeDeleted - The node N that was deleted and, if E is not null, an
/// equivalent node E that replaced it.
virtual void NodeDeleted(SDNode *N, SDNode *E);
/// NodeUpdated - The node N that was updated.
virtual void NodeUpdated(SDNode *N);
};
private:
/// DAGUpdateListener is a friend so it can manipulate the listener stack.
friend struct DAGUpdateListener;
/// UpdateListeners - Linked list of registered DAGUpdateListener instances.
/// This stack is maintained by DAGUpdateListener RAII.
DAGUpdateListener *UpdateListeners;
/// setGraphColorHelper - Implementation of setSubgraphColor.
/// Return whether we had to truncate the search.
///
@ -384,6 +422,8 @@ class SelectionDAG {
int Offset = 0, unsigned char TargetFlags=0) {
return getConstantPool(C, VT, Align, Offset, true, TargetFlags);
}
SDValue getTargetIndex(int Index, EVT VT, int64_t Offset = 0,
unsigned char TargetFlags = 0);
// When generating a branch to a BB, we don't in general know enough
// to provide debug info for the BB at that time, so keep this one around.
SDValue getBasicBlock(MachineBasicBlock *MBB);
@ -817,30 +857,14 @@ class SelectionDAG {
SDDbgValue *getDbgValue(MDNode *MDPtr, unsigned FI, uint64_t Off,
DebugLoc DL, unsigned O);
/// DAGUpdateListener - Clients of various APIs that cause global effects on
/// the DAG can optionally implement this interface. This allows the clients
/// to handle the various sorts of updates that happen.
class DAGUpdateListener {
public:
virtual ~DAGUpdateListener();
/// NodeDeleted - The node N that was deleted and, if E is not null, an
/// equivalent node E that replaced it.
virtual void NodeDeleted(SDNode *N, SDNode *E) = 0;
/// NodeUpdated - The node N that was updated.
virtual void NodeUpdated(SDNode *N) = 0;
};
/// RemoveDeadNode - Remove the specified node from the system. If any of its
/// operands then becomes dead, remove them as well. Inform UpdateListener
/// for each node deleted.
void RemoveDeadNode(SDNode *N, DAGUpdateListener *UpdateListener = 0);
void RemoveDeadNode(SDNode *N);
/// RemoveDeadNodes - This method deletes the unreachable nodes in the
/// given list, and any nodes that become unreachable as a result.
void RemoveDeadNodes(SmallVectorImpl<SDNode *> &DeadNodes,
DAGUpdateListener *UpdateListener = 0);
void RemoveDeadNodes(SmallVectorImpl<SDNode *> &DeadNodes);
/// ReplaceAllUsesWith - Modify anything using 'From' to use 'To' instead.
/// This can cause recursive merging of nodes in the DAG. Use the first
@ -857,24 +881,19 @@ class SelectionDAG {
/// to be given new uses. These new uses of From are left in place, and
/// not automatically transferred to To.
///
void ReplaceAllUsesWith(SDValue From, SDValue Op,
DAGUpdateListener *UpdateListener = 0);
void ReplaceAllUsesWith(SDNode *From, SDNode *To,
DAGUpdateListener *UpdateListener = 0);
void ReplaceAllUsesWith(SDNode *From, const SDValue *To,
DAGUpdateListener *UpdateListener = 0);
void ReplaceAllUsesWith(SDValue From, SDValue Op);
void ReplaceAllUsesWith(SDNode *From, SDNode *To);
void ReplaceAllUsesWith(SDNode *From, const SDValue *To);
/// ReplaceAllUsesOfValueWith - Replace any uses of From with To, leaving
/// uses of other values produced by From.Val alone.
void ReplaceAllUsesOfValueWith(SDValue From, SDValue To,
DAGUpdateListener *UpdateListener = 0);
void ReplaceAllUsesOfValueWith(SDValue From, SDValue To);
/// ReplaceAllUsesOfValuesWith - Like ReplaceAllUsesOfValueWith, but
/// for multiple values at once. This correctly handles the case where
/// there is an overlap between the From values and the To values.
void ReplaceAllUsesOfValuesWith(const SDValue *From, const SDValue *To,
unsigned Num,
DAGUpdateListener *UpdateListener = 0);
unsigned Num);
/// AssignTopologicalOrder - Topological-sort the AllNodes list and a
/// assign a unique node id for each node in the DAG based on their
@ -1031,7 +1050,7 @@ class SelectionDAG {
private:
bool RemoveNodeFromCSEMaps(SDNode *N);
void AddModifiedNodeToCSEMaps(SDNode *N, DAGUpdateListener *UpdateListener);
void AddModifiedNodeToCSEMaps(SDNode *N);
SDNode *FindModifiedNodeSlot(SDNode *N, SDValue Op, void *&InsertPos);
SDNode *FindModifiedNodeSlot(SDNode *N, SDValue Op1, SDValue Op2,
void *&InsertPos);

View File

@ -172,53 +172,22 @@ class SelectionDAGISel : public MachineFunctionPass {
///
unsigned DAGSize;
/// ISelPosition - Node iterator marking the current position of
/// instruction selection as it procedes through the topologically-sorted
/// node list.
SelectionDAG::allnodes_iterator ISelPosition;
/// ISelUpdater - helper class to handle updates of the
/// instruction selection graph.
class ISelUpdater : public SelectionDAG::DAGUpdateListener {
virtual void anchor();
SelectionDAG::allnodes_iterator &ISelPosition;
public:
explicit ISelUpdater(SelectionDAG::allnodes_iterator &isp)
: ISelPosition(isp) {}
/// NodeDeleted - Handle nodes deleted from the graph. If the
/// node being deleted is the current ISelPosition node, update
/// ISelPosition.
///
virtual void NodeDeleted(SDNode *N, SDNode *E) {
if (ISelPosition == SelectionDAG::allnodes_iterator(N))
++ISelPosition;
}
/// NodeUpdated - Ignore updates for now.
virtual void NodeUpdated(SDNode *N) {}
};
/// ReplaceUses - replace all uses of the old node F with the use
/// of the new node T.
void ReplaceUses(SDValue F, SDValue T) {
ISelUpdater ISU(ISelPosition);
CurDAG->ReplaceAllUsesOfValueWith(F, T, &ISU);
CurDAG->ReplaceAllUsesOfValueWith(F, T);
}
/// ReplaceUses - replace all uses of the old nodes F with the use
/// of the new nodes T.
void ReplaceUses(const SDValue *F, const SDValue *T, unsigned Num) {
ISelUpdater ISU(ISelPosition);
CurDAG->ReplaceAllUsesOfValuesWith(F, T, Num, &ISU);
CurDAG->ReplaceAllUsesOfValuesWith(F, T, Num);
}
/// ReplaceUses - replace all uses of the old node F with the use
/// of the new node T.
void ReplaceUses(SDNode *F, SDNode *T) {
ISelUpdater ISU(ISelPosition);
CurDAG->ReplaceAllUsesWith(F, T, &ISU);
CurDAG->ReplaceAllUsesWith(F, T);
}

View File

@ -74,6 +74,10 @@ namespace ISD {
/// ISD::SCALAR_TO_VECTOR node or a BUILD_VECTOR node where only the low
/// element is not an undef.
bool isScalarToVector(const SDNode *N);
/// allOperandsUndef - Return true if the node has at least one operand
/// and all operands of the specified node are ISD::UNDEF.
bool allOperandsUndef(const SDNode *N);
} // end llvm:ISD namespace
//===----------------------------------------------------------------------===//
@ -142,7 +146,8 @@ class SDValue {
inline bool isMachineOpcode() const;
inline unsigned getMachineOpcode() const;
inline const DebugLoc getDebugLoc() const;
inline void dump() const;
inline void dumpr() const;
/// reachesChainWithoutSideEffects - Return true if this operand (which must
/// be a chain) reaches the specified operand without crossing any
@ -802,7 +807,12 @@ inline bool SDValue::hasOneUse() const {
inline const DebugLoc SDValue::getDebugLoc() const {
return Node->getDebugLoc();
}
inline void SDValue::dump() const {
return Node->dump();
}
inline void SDValue::dumpr() const {
return Node->dumpr();
}
// Define inline functions from the SDUse class.
inline void SDUse::set(const SDValue &V) {
@ -1339,6 +1349,29 @@ class ConstantPoolSDNode : public SDNode {
}
};
/// Completely target-dependent object reference.
class TargetIndexSDNode : public SDNode {
unsigned char TargetFlags;
int Index;
int64_t Offset;
friend class SelectionDAG;
public:
TargetIndexSDNode(int Idx, EVT VT, int64_t Ofs, unsigned char TF)
: SDNode(ISD::TargetIndex, DebugLoc(), getSDVTList(VT)),
TargetFlags(TF), Index(Idx), Offset(Ofs) {}
public:
unsigned char getTargetFlags() const { return TargetFlags; }
int getIndex() const { return Index; }
int64_t getOffset() const { return Offset; }
static bool classof(const TargetIndexSDNode*) { return true; }
static bool classof(const SDNode *N) {
return N->getOpcode() == ISD::TargetIndex;
}
};
class BasicBlockSDNode : public SDNode {
MachineBasicBlock *MBB;
friend class SelectionDAG;

View File

@ -76,7 +76,6 @@ namespace llvm {
/// SlotIndex - An opaque wrapper around machine indexes.
class SlotIndex {
friend class SlotIndexes;
friend struct DenseMapInfo<SlotIndex>;
enum Slot {
/// Basic block boundary. Used for live ranges entering and leaving a
@ -121,11 +120,6 @@ namespace llvm {
return static_cast<Slot>(lie.getInt());
}
static inline unsigned getHashValue(const SlotIndex &v) {
void *ptrVal = v.lie.getOpaqueValue();
return (unsigned((intptr_t)ptrVal)) ^ (unsigned((intptr_t)ptrVal) >> 9);
}
public:
enum {
/// The default distance between instructions as returned by distance().
@ -133,14 +127,6 @@ namespace llvm {
InstrDist = 4 * Slot_Count
};
static inline SlotIndex getEmptyKey() {
return SlotIndex(0, 1);
}
static inline SlotIndex getTombstoneKey() {
return SlotIndex(0, 2);
}
/// Construct an invalid index.
SlotIndex() : lie(0, 0) {}
@ -293,23 +279,6 @@ namespace llvm {
};
/// DenseMapInfo specialization for SlotIndex.
template <>
struct DenseMapInfo<SlotIndex> {
static inline SlotIndex getEmptyKey() {
return SlotIndex::getEmptyKey();
}
static inline SlotIndex getTombstoneKey() {
return SlotIndex::getTombstoneKey();
}
static inline unsigned getHashValue(const SlotIndex &v) {
return SlotIndex::getHashValue(v);
}
static inline bool isEqual(const SlotIndex &LHS, const SlotIndex &RHS) {
return (LHS == RHS);
}
};
template <> struct isPodLike<SlotIndex> { static const bool value = true; };
@ -344,7 +313,6 @@ namespace llvm {
IndexList indexList;
MachineFunction *mf;
unsigned functionSize;
typedef DenseMap<const MachineInstr*, SlotIndex> Mi2IndexMap;
Mi2IndexMap mi2iMap;
@ -402,19 +370,6 @@ namespace llvm {
return SlotIndex(&indexList.back(), 0);
}
/// Returns the distance between the highest and lowest indexes allocated
/// so far.
unsigned getIndexesLength() const {
assert(indexList.front().getIndex() == 0 &&
"Initial index isn't zero?");
return indexList.back().getIndex();
}
/// Returns the number of instructions in the function.
unsigned getFunctionSize() const {
return functionSize;
}
/// Returns true if the given machine instr is mapped to an index,
/// otherwise returns false.
bool hasIndex(const MachineInstr *instr) const {
@ -444,7 +399,7 @@ namespace llvm {
}
/// getIndexBefore - Returns the index of the last indexed instruction
/// before MI, or the the start index of its basic block.
/// before MI, or the start index of its basic block.
/// MI is not required to have an index.
SlotIndex getIndexBefore(const MachineInstr *MI) const {
const MachineBasicBlock *MBB = MI->getParent();
@ -590,7 +545,7 @@ namespace llvm {
nextItr = getIndexAfter(mi).listEntry();
prevItr = prior(nextItr);
} else {
// Insert mi's index immediately after the preceeding instruction.
// Insert mi's index immediately after the preceding instruction.
prevItr = getIndexBefore(mi).listEntry();
nextItr = llvm::next(prevItr);
}

View File

@ -33,6 +33,8 @@ namespace llvm {
class TargetLoweringObjectFileELF : public TargetLoweringObjectFile {
bool UseInitArray;
public:
virtual ~TargetLoweringObjectFileELF() {}
@ -66,6 +68,7 @@ class TargetLoweringObjectFileELF : public TargetLoweringObjectFile {
getCFIPersonalitySymbol(const GlobalValue *GV, Mangler *Mang,
MachineModuleInfo *MMI) const;
void InitializeELF(bool UseInitArray_);
virtual const MCSection *
getStaticCtorSection(unsigned Priority = 65535) const;
virtual const MCSection *

View File

@ -68,34 +68,38 @@ namespace llvm {
v2i32 = 22, // 2 x i32
v4i32 = 23, // 4 x i32
v8i32 = 24, // 8 x i32
v1i64 = 25, // 1 x i64
v2i64 = 26, // 2 x i64
v4i64 = 27, // 4 x i64
v8i64 = 28, // 8 x i64
v16i32 = 25, // 16 x i32
v1i64 = 26, // 1 x i64
v2i64 = 27, // 2 x i64
v4i64 = 28, // 4 x i64
v8i64 = 29, // 8 x i64
v16i64 = 30, // 16 x i64
v2f16 = 29, // 2 x f16
v2f32 = 30, // 2 x f32
v4f32 = 31, // 4 x f32
v8f32 = 32, // 8 x f32
v2f64 = 33, // 2 x f64
v4f64 = 34, // 4 x f64
v2f16 = 31, // 2 x f16
v2f32 = 32, // 2 x f32
v4f32 = 33, // 4 x f32
v8f32 = 34, // 8 x f32
v2f64 = 35, // 2 x f64
v4f64 = 36, // 4 x f64
FIRST_VECTOR_VALUETYPE = v2i8,
LAST_VECTOR_VALUETYPE = v4f64,
FIRST_INTEGER_VECTOR_VALUETYPE = v2i8,
LAST_INTEGER_VECTOR_VALUETYPE = v16i64,
FIRST_FP_VECTOR_VALUETYPE = v2f16,
LAST_FP_VECTOR_VALUETYPE = v4f64,
x86mmx = 35, // This is an X86 MMX value
x86mmx = 37, // This is an X86 MMX value
Glue = 36, // This glues nodes together during pre-RA sched
Glue = 38, // This glues nodes together during pre-RA sched
isVoid = 37, // This has no value
isVoid = 39, // This has no value
Untyped = 38, // This value takes a register, but has
Untyped = 40, // This value takes a register, but has
// unspecified type. The register class
// will be determined by the opcode.
LAST_VALUETYPE = 39, // This always remains at the end of the list.
LAST_VALUETYPE = 41, // This always remains at the end of the list.
// This is the current maximum for LAST_VALUETYPE.
// MVT::MAX_ALLOWED_VALUETYPE is used for asserts and to size bit vectors
@ -153,15 +157,16 @@ namespace llvm {
bool isFloatingPoint() const {
return ((SimpleTy >= MVT::FIRST_FP_VALUETYPE &&
SimpleTy <= MVT::LAST_FP_VALUETYPE) ||
(SimpleTy >= MVT::FIRST_FP_VECTOR_VALUETYPE &&
SimpleTy <= MVT::LAST_FP_VECTOR_VALUETYPE));
(SimpleTy >= MVT::FIRST_FP_VECTOR_VALUETYPE &&
SimpleTy <= MVT::LAST_FP_VECTOR_VALUETYPE));
}
/// isInteger - Return true if this is an integer, or a vector integer type.
bool isInteger() const {
return ((SimpleTy >= MVT::FIRST_INTEGER_VALUETYPE &&
SimpleTy <= MVT::LAST_INTEGER_VALUETYPE) ||
(SimpleTy >= MVT::v2i8 && SimpleTy <= MVT::v8i64));
(SimpleTy >= MVT::FIRST_INTEGER_VECTOR_VALUETYPE &&
SimpleTy <= MVT::LAST_INTEGER_VECTOR_VALUETYPE));
}
/// isVector - Return true if this is a vector value type.
@ -170,6 +175,37 @@ namespace llvm {
SimpleTy <= MVT::LAST_VECTOR_VALUETYPE);
}
/// is64BitVector - Return true if this is a 64-bit vector type.
bool is64BitVector() const {
return (SimpleTy == MVT::v8i8 || SimpleTy == MVT::v4i16 ||
SimpleTy == MVT::v2i32 || SimpleTy == MVT::v1i64 ||
SimpleTy == MVT::v2f32);
}
/// is128BitVector - Return true if this is a 128-bit vector type.
bool is128BitVector() const {
return (SimpleTy == MVT::v16i8 || SimpleTy == MVT::v8i16 ||
SimpleTy == MVT::v4i32 || SimpleTy == MVT::v2i64 ||
SimpleTy == MVT::v4f32 || SimpleTy == MVT::v2f64);
}
/// is256BitVector - Return true if this is a 256-bit vector type.
bool is256BitVector() const {
return (SimpleTy == MVT::v8f32 || SimpleTy == MVT::v4f64 ||
SimpleTy == MVT::v32i8 || SimpleTy == MVT::v16i16 ||
SimpleTy == MVT::v8i32 || SimpleTy == MVT::v4i64);
}
/// is512BitVector - Return true if this is a 512-bit vector type.
bool is512BitVector() const {
return (SimpleTy == MVT::v8i64 || SimpleTy == MVT::v16i32);
}
/// is1024BitVector - Return true if this is a 1024-bit vector type.
bool is1024BitVector() const {
return (SimpleTy == MVT::v16i64);
}
/// isPow2VectorType - Returns true if the given vector is a power of 2.
bool isPow2VectorType() const {
unsigned NElts = getVectorNumElements();
@ -196,7 +232,7 @@ namespace llvm {
MVT getVectorElementType() const {
switch (SimpleTy) {
default:
return (MVT::SimpleValueType)(MVT::INVALID_SIMPLE_VALUE_TYPE);
llvm_unreachable("Not a vector MVT!");
case v2i8 :
case v4i8 :
case v8i8 :
@ -208,11 +244,13 @@ namespace llvm {
case v16i16: return i16;
case v2i32:
case v4i32:
case v8i32: return i32;
case v8i32:
case v16i32: return i32;
case v1i64:
case v2i64:
case v4i64:
case v8i64: return i64;
case v8i64:
case v16i64: return i64;
case v2f16: return f16;
case v2f32:
case v4f32:
@ -225,10 +263,12 @@ namespace llvm {
unsigned getVectorNumElements() const {
switch (SimpleTy) {
default:
return ~0U;
llvm_unreachable("Not a vector MVT!");
case v32i8: return 32;
case v16i8:
case v16i16: return 16;
case v16i16:
case v16i32:
case v16i64:return 16;
case v8i8 :
case v8i16:
case v8i32:
@ -295,7 +335,9 @@ namespace llvm {
case v4i64:
case v8f32:
case v4f64: return 256;
case v16i32:
case v8i64: return 512;
case v16i64:return 1024;
}
}
@ -368,12 +410,14 @@ namespace llvm {
if (NumElements == 2) return MVT::v2i32;
if (NumElements == 4) return MVT::v4i32;
if (NumElements == 8) return MVT::v8i32;
if (NumElements == 16) return MVT::v16i32;
break;
case MVT::i64:
if (NumElements == 1) return MVT::v1i64;
if (NumElements == 2) return MVT::v2i64;
if (NumElements == 4) return MVT::v4i64;
if (NumElements == 8) return MVT::v8i64;
if (NumElements == 16) return MVT::v16i64;
break;
case MVT::f16:
if (NumElements == 2) return MVT::v2f16;
@ -487,32 +531,27 @@ namespace llvm {
/// is64BitVector - Return true if this is a 64-bit vector type.
bool is64BitVector() const {
if (!isSimple())
return isExtended64BitVector();
return (V == MVT::v8i8 || V==MVT::v4i16 || V==MVT::v2i32 ||
V == MVT::v1i64 || V==MVT::v2f32);
return isSimple() ? V.is64BitVector() : isExtended64BitVector();
}
/// is128BitVector - Return true if this is a 128-bit vector type.
bool is128BitVector() const {
if (!isSimple())
return isExtended128BitVector();
return (V==MVT::v16i8 || V==MVT::v8i16 || V==MVT::v4i32 ||
V==MVT::v2i64 || V==MVT::v4f32 || V==MVT::v2f64);
return isSimple() ? V.is128BitVector() : isExtended128BitVector();
}
/// is256BitVector - Return true if this is a 256-bit vector type.
inline bool is256BitVector() const {
if (!isSimple())
return isExtended256BitVector();
return (V == MVT::v8f32 || V == MVT::v4f64 || V == MVT::v32i8 ||
V == MVT::v16i16 || V == MVT::v8i32 || V == MVT::v4i64);
bool is256BitVector() const {
return isSimple() ? V.is256BitVector() : isExtended256BitVector();
}
/// is512BitVector - Return true if this is a 512-bit vector type.
inline bool is512BitVector() const {
return isSimple() ? (V == MVT::v8i64) : isExtended512BitVector();
bool is512BitVector() const {
return isSimple() ? V.is512BitVector() : isExtended512BitVector();
}
/// is1024BitVector - Return true if this is a 1024-bit vector type.
bool is1024BitVector() const {
return isSimple() ? V.is1024BitVector() : isExtended1024BitVector();
}
/// isOverloaded - Return true if this is an overloaded type for TableGen.
@ -705,6 +744,7 @@ namespace llvm {
bool isExtended128BitVector() const;
bool isExtended256BitVector() const;
bool isExtended512BitVector() const;
bool isExtended1024BitVector() const;
EVT getExtendedVectorElementType() const;
unsigned getExtendedVectorNumElements() const;
unsigned getExtendedSizeInBits() const;

View File

@ -45,22 +45,24 @@ def v16i16 : ValueType<256, 21>; // 16 x i16 vector value
def v2i32 : ValueType<64 , 22>; // 2 x i32 vector value
def v4i32 : ValueType<128, 23>; // 4 x i32 vector value
def v8i32 : ValueType<256, 24>; // 8 x i32 vector value
def v1i64 : ValueType<64 , 25>; // 1 x i64 vector value
def v2i64 : ValueType<128, 26>; // 2 x i64 vector value
def v4i64 : ValueType<256, 27>; // 4 x i64 vector value
def v8i64 : ValueType<512, 28>; // 8 x i64 vector value
def v16i32 : ValueType<512, 25>; // 16 x i32 vector value
def v1i64 : ValueType<64 , 26>; // 1 x i64 vector value
def v2i64 : ValueType<128, 27>; // 2 x i64 vector value
def v4i64 : ValueType<256, 28>; // 4 x i64 vector value
def v8i64 : ValueType<512, 29>; // 8 x i64 vector value
def v16i64 : ValueType<1024,30>; // 16 x i64 vector value
def v2f16 : ValueType<32 , 29>; // 2 x f16 vector value
def v2f32 : ValueType<64 , 30>; // 2 x f32 vector value
def v4f32 : ValueType<128, 31>; // 4 x f32 vector value
def v8f32 : ValueType<256, 32>; // 8 x f32 vector value
def v2f64 : ValueType<128, 33>; // 2 x f64 vector value
def v4f64 : ValueType<256, 34>; // 4 x f64 vector value
def v2f16 : ValueType<32 , 31>; // 2 x f16 vector value
def v2f32 : ValueType<64 , 32>; // 2 x f32 vector value
def v4f32 : ValueType<128, 33>; // 4 x f32 vector value
def v8f32 : ValueType<256, 34>; // 8 x f32 vector value
def v2f64 : ValueType<128, 35>; // 2 x f64 vector value
def v4f64 : ValueType<256, 36>; // 4 x f64 vector value
def x86mmx : ValueType<64 , 35>; // X86 MMX value
def FlagVT : ValueType<0 , 36>; // Pre-RA sched glue
def isVoid : ValueType<0 , 37>; // Produces no value
def untyped: ValueType<8 , 38>; // Produces an untyped value
def x86mmx : ValueType<64 , 37>; // X86 MMX value
def FlagVT : ValueType<0 , 38>; // Pre-RA sched glue
def isVoid : ValueType<0 , 39>; // Produces no value
def untyped: ValueType<8 , 40>; // Produces an untyped value
def MetadataVT: ValueType<0, 250>; // Metadata

View File

@ -137,8 +137,8 @@ class Constant : public User {
static Constant *getNullValue(Type* Ty);
/// @returns the value for an integer constant of the given type that has all
/// its bits set to true.
/// @returns the value for an integer or vector of integer constant of the
/// given type that has all its bits set to true.
/// @brief Get the all ones value
static Constant *getAllOnesValue(Type* Ty);

View File

@ -917,6 +917,17 @@ class ConstantExpr : public Constant {
return getLShr(C1, C2, true);
}
/// getBinOpIdentity - Return the identity for the given binary operation,
/// i.e. a constant C such that X op C = X and C op X = X for every X. It
/// returns null if the operator doesn't have an identity.
static Constant *getBinOpIdentity(unsigned Opcode, Type *Ty);
/// getBinOpAbsorber - Return the absorbing element for the given binary
/// operation, i.e. a constant C such that X op C = C and C op X = C for
/// every X. For example, this returns zero for integer multiplication.
/// It returns null if the operator doesn't have an absorbing element.
static Constant *getBinOpAbsorber(unsigned Opcode, Type *Ty);
/// Transparently provide more efficient getOperand methods.
DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Constant);

View File

@ -1,4 +1,4 @@
//===--- llvm/Analysis/DIBuilder.h - Debug Information Builder --*- C++ -*-===//
//===--- llvm/DIBuilder.h - Debug Information Builder -----------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
@ -127,8 +127,8 @@ namespace llvm {
StringRef Name = StringRef());
/// createReferenceType - Create debugging information entry for a c++
/// style reference.
DIType createReferenceType(DIType RTy);
/// style reference or rvalue reference type.
DIType createReferenceType(unsigned Tag, DIType RTy);
/// createTypedef - Create debugging information entry for a typedef.
/// @param Ty Original type.
@ -177,7 +177,7 @@ namespace llvm {
/// @param OffsetInBits Member offset.
/// @param Flags Flags to encode member attribute, e.g. private
/// @param Ty Parent type.
/// @param PropertyName Name of the Objective C property assoicated with
/// @param PropertyName Name of the Objective C property associated with
/// this ivar.
/// @param GetterName Name of the Objective C property getter selector.
/// @param SetterName Name of the Objective C property setter selector.
@ -218,11 +218,11 @@ namespace llvm {
/// @param PropertyAttributes Objective C property attributes.
/// @param Ty Type.
DIObjCProperty createObjCProperty(StringRef Name,
DIFile File, unsigned LineNumber,
StringRef GetterName,
StringRef SetterName,
unsigned PropertyAttributes,
DIType Ty);
DIFile File, unsigned LineNumber,
StringRef GetterName,
StringRef SetterName,
unsigned PropertyAttributes,
DIType Ty);
/// createClassType - Create debugging information entry for a class.
/// @param Scope Scope in which this class is defined.
@ -329,10 +329,12 @@ namespace llvm {
/// @param SizeInBits Member size.
/// @param AlignInBits Member alignment.
/// @param Elements Enumeration elements.
/// @param Flags Flags (e.g. forward decl)
DIType createEnumerationType(DIDescriptor Scope, StringRef Name,
DIFile File, unsigned LineNumber,
uint64_t SizeInBits,
uint64_t AlignInBits, DIArray Elements);
uint64_t SizeInBits, uint64_t AlignInBits,
DIArray Elements, DIType ClassType,
unsigned Flags);
/// createSubroutineType - Create subroutine type.
/// @param File File in which this subroutine is defined.
@ -348,8 +350,8 @@ namespace llvm {
DIType createTemporaryType(DIFile F);
/// createForwardDecl - Create a temporary forward-declared type.
DIType createForwardDecl(unsigned Tag, StringRef Name, DIFile F,
unsigned Line, unsigned RuntimeLang = 0);
DIType createForwardDecl(unsigned Tag, StringRef Name, DIDescriptor Scope,
DIFile F, unsigned Line, unsigned RuntimeLang = 0);
/// retainType - Retain DIType in a module even if it is not referenced
/// through debug info anchors.

View File

@ -46,8 +46,8 @@ namespace llvm {
class DIObjCProperty;
/// DIDescriptor - A thin wraper around MDNode to access encoded debug info.
/// This should not be stored in a container, because underly MDNode may
/// change in certain situations.
/// This should not be stored in a container, because the underlying MDNode
/// may change in certain situations.
class DIDescriptor {
public:
enum {
@ -104,12 +104,6 @@ namespace llvm {
return getUnsignedField(0) & ~LLVMDebugVersionMask;
}
/// print - print descriptor.
void print(raw_ostream &OS) const;
/// dump - print descriptor to dbgs() with a newline.
void dump() const;
bool isDerivedType() const;
bool isCompositeType() const;
bool isBasicType() const;
@ -130,10 +124,18 @@ namespace llvm {
bool isTemplateTypeParameter() const;
bool isTemplateValueParameter() const;
bool isObjCProperty() const;
/// print - print descriptor.
void print(raw_ostream &OS) const;
/// dump - print descriptor to dbgs() with a newline.
void dump() const;
};
/// DISubrange - This is used to represent ranges, for array bounds.
class DISubrange : public DIDescriptor {
friend class DIDescriptor;
void printInternal(raw_ostream &OS) const;
public:
explicit DISubrange(const MDNode *N = 0) : DIDescriptor(N) {}
@ -155,10 +157,11 @@ namespace llvm {
/// DIScope - A base class for various scopes.
class DIScope : public DIDescriptor {
virtual void anchor();
protected:
friend class DIDescriptor;
void printInternal(raw_ostream &OS) const;
public:
explicit DIScope(const MDNode *N = 0) : DIDescriptor (N) {}
virtual ~DIScope() {}
StringRef getFilename() const;
StringRef getDirectory() const;
@ -166,7 +169,8 @@ namespace llvm {
/// DICompileUnit - A wrapper for a compile unit.
class DICompileUnit : public DIScope {
virtual void anchor();
friend class DIDescriptor;
void printInternal(raw_ostream &OS) const;
public:
explicit DICompileUnit(const MDNode *N = 0) : DIScope(N) {}
@ -196,17 +200,12 @@ namespace llvm {
/// Verify - Verify that a compile unit is well formed.
bool Verify() const;
/// print - print compile unit.
void print(raw_ostream &OS) const;
/// dump - print compile unit to dbgs() with a newline.
void dump() const;
};
/// DIFile - This is a wrapper for a file.
class DIFile : public DIScope {
virtual void anchor();
friend class DIDescriptor;
void printInternal(raw_ostream &OS) const {} // FIXME: Output something?
public:
explicit DIFile(const MDNode *N = 0) : DIScope(N) {
if (DbgNode && !isFile())
@ -224,6 +223,8 @@ namespace llvm {
/// FIXME: it seems strange that this doesn't have either a reference to the
/// type/precision or a file/line pair for location info.
class DIEnumerator : public DIDescriptor {
friend class DIDescriptor;
void printInternal(raw_ostream &OS) const;
public:
explicit DIEnumerator(const MDNode *N = 0) : DIDescriptor(N) {}
@ -235,19 +236,17 @@ namespace llvm {
/// FIXME: Types should be factored much better so that CV qualifiers and
/// others do not require a huge and empty descriptor full of zeros.
class DIType : public DIScope {
virtual void anchor();
protected:
friend class DIDescriptor;
void printInternal(raw_ostream &OS) const;
// This ctor is used when the Tag has already been validated by a derived
// ctor.
DIType(const MDNode *N, bool, bool) : DIScope(N) {}
public:
/// Verify - Verify that a type descriptor is well formed.
bool Verify() const;
explicit DIType(const MDNode *N);
explicit DIType() {}
virtual ~DIType() {}
DIScope getContext() const { return getFieldAs<DIScope>(1); }
StringRef getName() const { return getStringField(2); }
@ -314,17 +313,10 @@ namespace llvm {
/// this descriptor.
void replaceAllUsesWith(DIDescriptor &D);
void replaceAllUsesWith(MDNode *D);
/// print - print type.
void print(raw_ostream &OS) const;
/// dump - print type to dbgs() with a newline.
void dump() const;
};
/// DIBasicType - A basic type, like 'int' or 'float'.
class DIBasicType : public DIType {
virtual void anchor();
public:
explicit DIBasicType(const MDNode *N = 0) : DIType(N) {}
@ -332,18 +324,13 @@ namespace llvm {
/// Verify - Verify that a basic type descriptor is well formed.
bool Verify() const;
/// print - print basic type.
void print(raw_ostream &OS) const;
/// dump - print basic type to dbgs() with a newline.
void dump() const;
};
/// DIDerivedType - A simple derived type, like a const qualified type,
/// a typedef, a pointer or reference, etc.
class DIDerivedType : public DIType {
virtual void anchor();
friend class DIDescriptor;
void printInternal(raw_ostream &OS) const;
protected:
explicit DIDerivedType(const MDNode *N, bool, bool)
: DIType(N, true, true) {}
@ -401,19 +388,14 @@ namespace llvm {
/// Verify - Verify that a derived type descriptor is well formed.
bool Verify() const;
/// print - print derived type.
void print(raw_ostream &OS) const;
/// dump - print derived type to dbgs() with a newline.
void dump() const;
};
/// DICompositeType - This descriptor holds a type that can refer to multiple
/// other types, like a function or struct.
/// FIXME: Why is this a DIDerivedType??
class DICompositeType : public DIDerivedType {
virtual void anchor();
friend class DIDescriptor;
void printInternal(raw_ostream &OS) const;
public:
explicit DICompositeType(const MDNode *N = 0)
: DIDerivedType(N, true, true) {
@ -430,12 +412,6 @@ namespace llvm {
/// Verify - Verify that a composite type descriptor is well formed.
bool Verify() const;
/// print - print composite type.
void print(raw_ostream &OS) const;
/// dump - print composite type to dbgs() with a newline.
void dump() const;
};
/// DITemplateTypeParameter - This is a wrapper for template type parameter.
@ -477,7 +453,8 @@ namespace llvm {
/// DISubprogram - This is a wrapper for a subprogram (e.g. a function).
class DISubprogram : public DIScope {
virtual void anchor();
friend class DIDescriptor;
void printInternal(raw_ostream &OS) const;
public:
explicit DISubprogram(const MDNode *N = 0) : DIScope(N) {}
@ -576,12 +553,6 @@ namespace llvm {
/// Verify - Verify that a subprogram descriptor is well formed.
bool Verify() const;
/// print - print subprogram.
void print(raw_ostream &OS) const;
/// dump - print subprogram to dbgs() with a newline.
void dump() const;
/// describes - Return true if this subprogram provides debugging
/// information for the function F.
bool describes(const Function *F);
@ -597,6 +568,8 @@ namespace llvm {
/// DIGlobalVariable - This is a wrapper for a global variable.
class DIGlobalVariable : public DIDescriptor {
friend class DIDescriptor;
void printInternal(raw_ostream &OS) const;
public:
explicit DIGlobalVariable(const MDNode *N = 0) : DIDescriptor(N) {}
@ -634,17 +607,13 @@ namespace llvm {
/// Verify - Verify that a global variable descriptor is well formed.
bool Verify() const;
/// print - print global variable.
void print(raw_ostream &OS) const;
/// dump - print global variable to dbgs() with a newline.
void dump() const;
};
/// DIVariable - This is a wrapper for a variable (e.g. parameter, local,
/// global etc).
class DIVariable : public DIDescriptor {
friend class DIDescriptor;
void printInternal(raw_ostream &OS) const;
public:
explicit DIVariable(const MDNode *N = 0)
: DIDescriptor(N) {}
@ -706,18 +675,11 @@ namespace llvm {
/// information for an inlined function arguments.
bool isInlinedFnArgument(const Function *CurFn);
/// print - print variable.
void print(raw_ostream &OS) const;
void printExtendedName(raw_ostream &OS) const;
/// dump - print variable to dbgs() with a newline.
void dump() const;
};
/// DILexicalBlock - This is a wrapper for a lexical block.
class DILexicalBlock : public DIScope {
virtual void anchor();
public:
explicit DILexicalBlock(const MDNode *N = 0) : DIScope(N) {}
DIScope getContext() const { return getFieldAs<DIScope>(1); }
@ -736,7 +698,6 @@ namespace llvm {
/// DILexicalBlockFile - This is a wrapper for a lexical block with
/// a filename change.
class DILexicalBlockFile : public DIScope {
virtual void anchor();
public:
explicit DILexicalBlockFile(const MDNode *N = 0) : DIScope(N) {}
DIScope getContext() const { return getScope().getContext(); }
@ -756,7 +717,6 @@ namespace llvm {
/// DINameSpace - A wrapper for a C++ style name space.
class DINameSpace : public DIScope {
virtual void anchor();
public:
explicit DINameSpace(const MDNode *N = 0) : DIScope(N) {}
DIScope getContext() const { return getFieldAs<DIScope>(1); }
@ -794,6 +754,8 @@ namespace llvm {
};
class DIObjCProperty : public DIDescriptor {
friend class DIDescriptor;
void printInternal(raw_ostream &OS) const;
public:
explicit DIObjCProperty(const MDNode *N) : DIDescriptor(N) { }
@ -830,12 +792,6 @@ namespace llvm {
/// Verify - Verify that a derived type descriptor is well formed.
bool Verify() const;
/// print - print derived type.
void print(raw_ostream &OS) const;
/// dump - print derived type to dbgs() with a newline.
void dump() const;
};
/// getDISubprogram - Find subprogram that is enclosing this scope.

View File

@ -15,9 +15,9 @@
#ifndef LLVM_DEBUGINFO_DICONTEXT_H
#define LLVM_DEBUGINFO_DICONTEXT_H
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/Support/DataTypes.h"
#include <cstring>
namespace llvm {
@ -25,27 +25,52 @@ class raw_ostream;
/// DILineInfo - a format-neutral container for source line information.
class DILineInfo {
const char *FileName;
SmallString<16> FileName;
SmallString<16> FunctionName;
uint32_t Line;
uint32_t Column;
public:
DILineInfo() : FileName("<invalid>"), Line(0), Column(0) {}
DILineInfo(const char *fileName, uint32_t line, uint32_t column)
: FileName(fileName), Line(line), Column(column) {}
DILineInfo()
: FileName("<invalid>"), FunctionName("<invalid>"),
Line(0), Column(0) {}
DILineInfo(const SmallString<16> &fileName,
const SmallString<16> &functionName,
uint32_t line, uint32_t column)
: FileName(fileName), FunctionName(functionName),
Line(line), Column(column) {}
const char *getFileName() const { return FileName; }
const char *getFileName() { return FileName.c_str(); }
const char *getFunctionName() { return FunctionName.c_str(); }
uint32_t getLine() const { return Line; }
uint32_t getColumn() const { return Column; }
bool operator==(const DILineInfo &RHS) const {
return Line == RHS.Line && Column == RHS.Column &&
std::strcmp(FileName, RHS.FileName) == 0;
FileName.equals(RHS.FileName) &&
FunctionName.equals(RHS.FunctionName);
}
bool operator!=(const DILineInfo &RHS) const {
return !(*this == RHS);
}
};
/// DILineInfoSpecifier - controls which fields of DILineInfo container
/// should be filled with data.
class DILineInfoSpecifier {
const uint32_t Flags; // Or'ed flags that set the info we want to fetch.
public:
enum Specification {
FileLineInfo = 1 << 0,
AbsoluteFilePath = 1 << 1,
FunctionName = 1 << 2
};
// Use file/line info by default.
DILineInfoSpecifier(uint32_t flags = FileLineInfo) : Flags(flags) {}
bool needs(Specification spec) const {
return (Flags & spec) > 0;
}
};
class DIContext {
public:
virtual ~DIContext();
@ -60,7 +85,8 @@ class DIContext {
virtual void dump(raw_ostream &OS) = 0;
virtual DILineInfo getLineInfoForAddress(uint64_t address) = 0;
virtual DILineInfo getLineInfoForAddress(uint64_t address,
DILineInfoSpecifier specifier = DILineInfoSpecifier()) = 0;
};
}

View File

@ -354,7 +354,7 @@ class ExecutionEngine {
/// variable, possibly emitting it to memory if needed. This is used by the
/// Emitter.
virtual void *getOrEmitGlobalVariable(const GlobalVariable *GV) {
return getPointerToGlobal((GlobalValue*)GV);
return getPointerToGlobal((const GlobalValue *)GV);
}
/// Registers a listener to be called back on various events within

View File

@ -23,7 +23,7 @@ extern "C" void LLVMLinkInInterpreter();
namespace {
struct ForceInterpreterLinking {
ForceInterpreterLinking() {
// We must reference the passes in such a way that compilers will not
// We must reference the interpreter in such a way that compilers will not
// delete it all as dead code, even with whole program optimization,
// yet is effectively a NO-OP. As the compiler isn't smart enough
// to know that getenv() never returns -1, this will do the job.

View File

@ -23,7 +23,7 @@ extern "C" void LLVMLinkInJIT();
namespace {
struct ForceJITLinking {
ForceJITLinking() {
// We must reference the passes in such a way that compilers will not
// We must reference JIT in such a way that compilers will not
// delete it all as dead code, even with whole program optimization,
// yet is effectively a NO-OP. As the compiler isn't smart enough
// to know that getenv() never returns -1, this will do the job.

View File

@ -23,7 +23,7 @@ extern "C" void LLVMLinkInMCJIT();
namespace {
struct ForceMCJITLinking {
ForceMCJITLinking() {
// We must reference the passes in such a way that compilers will not
// We must reference MCJIT in such a way that compilers will not
// delete it all as dead code, even with whole program optimization,
// yet is effectively a NO-OP. As the compiler isn't smart enough
// to know that getenv() never returns -1, this will do the job.

View File

@ -65,12 +65,15 @@ class RuntimeDyld {
RuntimeDyld(RTDyldMemoryManager*);
~RuntimeDyld();
/// Load an in-memory object file into the dynamic linker.
bool loadObject(MemoryBuffer *InputBuffer);
// Get the address of our local copy of the symbol. This may or may not
// be the address used for relocation (clients can copy the data around
// and resolve relocatons based on where they put it).
/// Get the address of our local copy of the symbol. This may or may not
/// be the address used for relocation (clients can copy the data around
/// and resolve relocatons based on where they put it).
void *getSymbolAddress(StringRef Name);
// Resolve the relocations for all symbols we currently know about.
/// Resolve the relocations for all symbols we currently know about.
void resolveRelocations();
/// mapSectionAddress - map a section to its target address space value.

View File

@ -420,8 +420,8 @@ class Function : public GlobalValue,
void dropAllReferences();
/// hasAddressTaken - returns true if there are any uses of this function
/// other than direct calls or invokes to it. Optionally passes back the
/// offending user for diagnostic purposes.
/// other than direct calls or invokes to it, or blockaddress expressions.
/// Optionally passes back an offending user for diagnostic purposes.
///
bool hasAddressTaken(const User** = 0) const;

View File

@ -164,6 +164,12 @@ class GlobalValue : public Constant {
return Linkage == CommonLinkage;
}
/// isDiscardableIfUnused - Whether the definition of this global may be
/// discarded if it is not used in its compilation unit.
static bool isDiscardableIfUnused(LinkageTypes Linkage) {
return isLinkOnceLinkage(Linkage) || isLocalLinkage(Linkage);
}
/// mayBeOverridden - Whether the definition of this global may be replaced
/// by something non-equivalent at link time. For example, if a function has
/// weak linkage then the code defining it may be replaced by different code.
@ -221,6 +227,10 @@ class GlobalValue : public Constant {
void setLinkage(LinkageTypes LT) { Linkage = LT; }
LinkageTypes getLinkage() const { return Linkage; }
bool isDiscardableIfUnused() const {
return isDiscardableIfUnused(Linkage);
}
bool mayBeOverridden() const { return mayBeOverridden(Linkage); }
bool isWeakForLinker() const { return isWeakForLinker(Linkage); }

Some files were not shown because too many files have changed in this diff Show More