Upgrade our copy of llvm/clang to r132879, from upstream's trunk.

This commit is contained in:
Dimitry Andric 2011-06-12 18:01:31 +00:00
commit bd5abe1968
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=223017
678 changed files with 35455 additions and 19867 deletions

View File

@ -282,6 +282,8 @@ typedef enum {
LLVMRealPredicateTrue /**< Always true (always folded) */
} LLVMRealPredicate;
void LLVMInitializeCore(LLVMPassRegistryRef R);
/*===-- Error handling ----------------------------------------------------===*/
@ -1164,6 +1166,7 @@ namespace llvm {
for (LLVMValueRef *I = Vals, *E = Vals + Length; I != E; ++I)
cast<T>(*I);
#endif
(void)Length;
return reinterpret_cast<T**>(Vals);
}

View File

@ -7,16 +7,16 @@
|* *|
|*===----------------------------------------------------------------------===*|
|* *|
|* This header provides public interface to a disassembler library. *|
|* This header provides a public interface to a disassembler library. *|
|* LLVM provides an implementation of this interface. *|
|* *|
\*===----------------------------------------------------------------------===*/
#ifndef LLVM_C_DISASSEMBLER_H
#define LLVM_C_DISASSEMBLER_H 1
#define LLVM_C_DISASSEMBLER_H
#include <stddef.h>
#include "llvm/Support/DataTypes.h"
#include <stddef.h>
/**
* An opaque reference to a disassembler context.
@ -38,14 +38,11 @@ typedef void *LLVMDisasmContextRef;
* will be the instruction width. The information is returned in TagBuf and is
* Triple specific with its specific information defined by the value of
* TagType for that Triple. If symbolic information is returned the function
* returns 1 else it returns 0.
* returns 1, otherwise it returns 0.
*/
typedef int (*LLVMOpInfoCallback)(void *DisInfo,
uint64_t PC,
uint64_t Offset,
uint64_t Size,
int TagType,
void *TagBuf);
typedef int (*LLVMOpInfoCallback)(void *DisInfo, uint64_t PC,
uint64_t Offset, uint64_t Size,
int TagType, void *TagBuf);
/**
* The initial support in LLVM MC for the most general form of a relocatable
@ -68,10 +65,11 @@ typedef int (*LLVMOpInfoCallback)(void *DisInfo,
* operands like "_foo@GOT", ":lower16:_foo", etc.
*/
struct LLVMOpInfoSymbol1 {
uint64_t Present; /* 1 if this symbol is present */
char *Name; /* symbol name if not NULL */
uint64_t Value; /* symbol value if name is NULL */
uint64_t Present; /* 1 if this symbol is present */
char *Name; /* symbol name if not NULL */
uint64_t Value; /* symbol value if name is NULL */
};
struct LLVMOpInfo1 {
struct LLVMOpInfoSymbol1 AddSymbol;
struct LLVMOpInfoSymbol1 SubtractSymbol;
@ -92,11 +90,11 @@ struct LLVMOpInfo1 {
/**
* The type for the symbol lookup function. This may be called by the
* disassembler for such things like adding a comment for a PC plus a constant
* disassembler for things like adding a comment for a PC plus a constant
* offset load instruction to use a symbol name instead of a load address value.
* It is passed the block information is saved when the disassembler context is
* created and a value of a symbol to look up. If no symbol is found NULL is
* to be returned.
* returned.
*/
typedef const char *(*LLVMSymbolLookupCallback)(void *DisInfo,
uint64_t SymbolValue);
@ -107,40 +105,33 @@ extern "C" {
/**
* Create a disassembler for the TripleName. Symbolic disassembly is supported
* by passing a block of information in the DisInfo parameter and specifing the
* TagType and call back functions as described above. These can all be passed
* as NULL. If successful this returns a disassembler context if not it
* by passing a block of information in the DisInfo parameter and specifying the
* TagType and callback functions as described above. These can all be passed
* as NULL. If successful, this returns a disassembler context. If not, it
* returns NULL.
*/
extern LLVMDisasmContextRef
LLVMCreateDisasm(const char *TripleName,
void *DisInfo,
int TagType,
LLVMOpInfoCallback GetOpInfo,
LLVMSymbolLookupCallback SymbolLookUp);
LLVMDisasmContextRef LLVMCreateDisasm(const char *TripleName, void *DisInfo,
int TagType, LLVMOpInfoCallback GetOpInfo,
LLVMSymbolLookupCallback SymbolLookUp);
/**
* Dispose of a disassembler context.
*/
extern void
LLVMDisasmDispose(LLVMDisasmContextRef DC);
void LLVMDisasmDispose(LLVMDisasmContextRef DC);
/**
* Disassmble a single instruction using the disassembler context specified in
* the parameter DC. The bytes of the instruction are specified in the parameter
* Bytes, and contains at least BytesSize number of bytes. The instruction is
* at the address specified by the PC parameter. If a valid instruction can be
* disassembled its string is returned indirectly in OutString which whos size
* is specified in the parameter OutStringSize. This function returns the
* number of bytes in the instruction or zero if there was no valid instruction.
* Disassemble a single instruction using the disassembler context specified in
* the parameter DC. The bytes of the instruction are specified in the
* parameter Bytes, and contains at least BytesSize number of bytes. The
* instruction is at the address specified by the PC parameter. If a valid
* instruction can be disassembled, its string is returned indirectly in
* OutString whose size is specified in the parameter OutStringSize. This
* function returns the number of bytes in the instruction or zero if there was
* no valid instruction.
*/
extern size_t
LLVMDisasmInstruction(LLVMDisasmContextRef DC,
uint8_t *Bytes,
uint64_t BytesSize,
uint64_t PC,
char *OutString,
size_t OutStringSize);
size_t LLVMDisasmInstruction(LLVMDisasmContextRef DC, uint8_t *Bytes,
uint64_t BytesSize, uint64_t PC,
char *OutString, size_t OutStringSize);
#ifdef __cplusplus
}

View File

@ -671,17 +671,10 @@ class FastFoldingSetNode : public FoldingSetNode {
// Partial specializations of FoldingSetTrait.
template<typename T> struct FoldingSetTrait<T*> {
static inline void Profile(const T *X, FoldingSetNodeID &ID) {
static inline void Profile(T *X, FoldingSetNodeID &ID) {
ID.AddPointer(X);
}
};
template<typename T> struct FoldingSetTrait<const T*> {
static inline void Profile(const T *X, FoldingSetNodeID &ID) {
ID.AddPointer(X);
}
};
} // End of namespace llvm.
#endif

View File

@ -0,0 +1,158 @@
//===- llvm/ADT/PackedVector.h - Packed values vector -----------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file implements the PackedVector class.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_ADT_PACKEDVECTOR_H
#define LLVM_ADT_PACKEDVECTOR_H
#include "llvm/ADT/BitVector.h"
#include <limits>
namespace llvm {
template <typename T, unsigned BitNum, bool isSigned>
class PackedVectorBase;
// This won't be necessary if we can specialize members without specializing
// the parent template.
template <typename T, unsigned BitNum>
class PackedVectorBase<T, BitNum, false> {
protected:
static T getValue(const llvm::BitVector &Bits, unsigned Idx) {
T val = T();
for (unsigned i = 0; i != BitNum; ++i)
val = T(val | ((Bits[(Idx << (BitNum-1)) + i] ? 1UL : 0UL) << i));
return val;
}
static void setValue(llvm::BitVector &Bits, unsigned Idx, T val) {
assert((val >> BitNum) == 0 && "value is too big");
for (unsigned i = 0; i != BitNum; ++i)
Bits[(Idx << (BitNum-1)) + i] = val & (T(1) << i);
}
};
template <typename T, unsigned BitNum>
class PackedVectorBase<T, BitNum, true> {
protected:
static T getValue(const llvm::BitVector &Bits, unsigned Idx) {
T val = T();
for (unsigned i = 0; i != BitNum-1; ++i)
val = T(val | ((Bits[(Idx << (BitNum-1)) + i] ? 1UL : 0UL) << i));
if (Bits[(Idx << (BitNum-1)) + BitNum-1])
val = ~val;
return val;
}
static void setValue(llvm::BitVector &Bits, unsigned Idx, T val) {
if (val < 0) {
val = ~val;
Bits.set((Idx << (BitNum-1)) + BitNum-1);
}
assert((val >> (BitNum-1)) == 0 && "value is too big");
for (unsigned i = 0; i != BitNum-1; ++i)
Bits[(Idx << (BitNum-1)) + i] = val & (T(1) << i);
}
};
/// \brief Store a vector of values using a specific number of bits for each
/// value. Both signed and unsigned types can be used, e.g
/// @code
/// PackedVector<signed, 2> vec;
/// @endcode
/// will create a vector accepting values -2, -1, 0, 1. Any other value will hit
/// an assertion.
template <typename T, unsigned BitNum>
class PackedVector : public PackedVectorBase<T, BitNum,
std::numeric_limits<T>::is_signed> {
llvm::BitVector Bits;
typedef PackedVectorBase<T, BitNum, std::numeric_limits<T>::is_signed> base;
public:
class reference {
PackedVector &Vec;
const unsigned Idx;
reference(); // Undefined
public:
reference(PackedVector &vec, unsigned idx) : Vec(vec), Idx(idx) { }
reference &operator=(T val) {
Vec.setValue(Vec.Bits, Idx, val);
return *this;
}
operator T() {
return Vec.getValue(Vec.Bits, Idx);
}
};
PackedVector() { }
explicit PackedVector(unsigned size) : Bits(size << (BitNum-1)) { }
bool empty() const { return Bits.empty(); }
unsigned size() const { return Bits.size() >> (BitNum-1); }
void clear() { Bits.clear(); }
void resize(unsigned N) { Bits.resize(N << (BitNum-1)); }
void reserve(unsigned N) { Bits.reserve(N << (BitNum-1)); }
PackedVector &reset() {
Bits.reset();
return *this;
}
void push_back(T val) {
resize(size()+1);
(*this)[size()-1] = val;
}
reference operator[](unsigned Idx) {
return reference(*this, Idx);
}
T operator[](unsigned Idx) const {
return base::getValue(Bits, Idx);
}
bool operator==(const PackedVector &RHS) const {
return Bits == RHS.Bits;
}
bool operator!=(const PackedVector &RHS) const {
return Bits != RHS.Bits;
}
const PackedVector &operator=(const PackedVector &RHS) {
Bits = RHS.Bits;
return *this;
}
PackedVector &operator|=(const PackedVector &RHS) {
Bits |= RHS.Bits;
return *this;
}
void swap(PackedVector &RHS) {
Bits.swap(RHS.Bits);
}
};
// Leave BitNum=0 undefined.
template <typename T>
class PackedVector<T, 0>;
} // end llvm namespace
#endif

View File

@ -46,7 +46,14 @@ namespace llvm {
// integer works around this bug.
static size_t min(size_t a, size_t b) { return a < b ? a : b; }
static size_t max(size_t a, size_t b) { return a > b ? a : b; }
// Workaround memcmp issue with null pointers (undefined behavior)
// by providing a specialized version
static int compareMemory(const char *Lhs, const char *Rhs, size_t Length) {
if (Length == 0) { return 0; }
return ::memcmp(Lhs,Rhs,Length);
}
public:
/// @name Constructors
/// @{
@ -56,11 +63,17 @@ namespace llvm {
/// Construct a string ref from a cstring.
/*implicit*/ StringRef(const char *Str)
: Data(Str), Length(::strlen(Str)) {}
: Data(Str) {
assert(Str && "StringRef cannot be built from a NULL argument");
Length = ::strlen(Str); // invoking strlen(NULL) is undefined behavior
}
/// Construct a string ref from a pointer and length.
/*implicit*/ StringRef(const char *data, size_t length)
: Data(data), Length(length) {}
: Data(data), Length(length) {
assert((data || length == 0) &&
"StringRef cannot be built from a NULL argument with non-null length");
}
/// Construct a string ref from an std::string.
/*implicit*/ StringRef(const std::string &Str)
@ -104,7 +117,7 @@ namespace llvm {
/// compare() when the relative ordering of inequal strings isn't needed.
bool equals(StringRef RHS) const {
return (Length == RHS.Length &&
memcmp(Data, RHS.Data, RHS.Length) == 0);
compareMemory(Data, RHS.Data, RHS.Length) == 0);
}
/// equals_lower - Check for string equality, ignoring case.
@ -116,7 +129,7 @@ namespace llvm {
/// is lexicographically less than, equal to, or greater than the \arg RHS.
int compare(StringRef RHS) const {
// Check the prefix for a mismatch.
if (int Res = memcmp(Data, RHS.Data, min(Length, RHS.Length)))
if (int Res = compareMemory(Data, RHS.Data, min(Length, RHS.Length)))
return Res < 0 ? -1 : 1;
// Otherwise the prefixes match, so we only need to check the lengths.
@ -183,13 +196,13 @@ namespace llvm {
/// startswith - Check if this string starts with the given \arg Prefix.
bool startswith(StringRef Prefix) const {
return Length >= Prefix.Length &&
memcmp(Data, Prefix.Data, Prefix.Length) == 0;
compareMemory(Data, Prefix.Data, Prefix.Length) == 0;
}
/// endswith - Check if this string ends with the given \arg Suffix.
bool endswith(StringRef Suffix) const {
return Length >= Suffix.Length &&
memcmp(end() - Suffix.Length, Suffix.Data, Suffix.Length) == 0;
compareMemory(end() - Suffix.Length, Suffix.Data, Suffix.Length) == 0;
}
/// @}
@ -447,6 +460,10 @@ namespace llvm {
return LHS.compare(RHS) != -1;
}
inline std::string &operator+=(std::string &buffer, llvm::StringRef string) {
return buffer.append(string.data(), string.size());
}
/// @}
// StringRefs can be treated like a POD type.

View File

@ -225,7 +225,7 @@ class Triple {
/// if the environment component is present).
StringRef getOSAndEnvironmentName() const;
/// getOSNumber - Parse the version number from the OS name component of the
/// getOSVersion - Parse the version number from the OS name component of the
/// triple, if present.
///
/// For example, "fooos1.2.3" would return (1, 2, 3).

View File

@ -38,6 +38,7 @@
#define LLVM_ANALYSIS_ALIAS_ANALYSIS_H
#include "llvm/Support/CallSite.h"
#include "llvm/ADT/DenseMap.h"
namespace llvm {
@ -488,6 +489,32 @@ class AliasAnalysis {
}
};
// Specialize DenseMapInfo for Location.
template<>
struct DenseMapInfo<AliasAnalysis::Location> {
static inline AliasAnalysis::Location getEmptyKey() {
return
AliasAnalysis::Location(DenseMapInfo<const Value *>::getEmptyKey(),
0, 0);
}
static inline AliasAnalysis::Location getTombstoneKey() {
return
AliasAnalysis::Location(DenseMapInfo<const Value *>::getTombstoneKey(),
0, 0);
}
static unsigned getHashValue(const AliasAnalysis::Location &Val) {
return DenseMapInfo<const Value *>::getHashValue(Val.Ptr) ^
DenseMapInfo<uint64_t>::getHashValue(Val.Size) ^
DenseMapInfo<const MDNode *>::getHashValue(Val.TBAATag);
}
static bool isEqual(const AliasAnalysis::Location &LHS,
const AliasAnalysis::Location &RHS) {
return LHS.Ptr == RHS.Ptr &&
LHS.Size == RHS.Size &&
LHS.TBAATag == RHS.TBAATag;
}
};
/// isNoAliasCall - Return true if this pointer is returned by a noalias
/// function.
bool isNoAliasCall(const Value *V);

View File

@ -0,0 +1,78 @@
//===--- BranchProbabilityInfo.h - Branch Probability Analysis --*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This pass is used to evaluate branch probabilties.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_ANALYSIS_BRANCHPROBABILITYINFO_H
#define LLVM_ANALYSIS_BRANCHPROBABILITYINFO_H
#include "llvm/InitializePasses.h"
#include "llvm/Support/BranchProbability.h"
#include "llvm/Analysis/LoopInfo.h"
namespace llvm {
class raw_ostream;
class BranchProbabilityInfo : public FunctionPass {
// Default weight value. Used when we don't have information about the edge.
static const uint32_t DEFAULT_WEIGHT = 16;
typedef std::pair<BasicBlock *, BasicBlock *> Edge;
DenseMap<Edge, uint32_t> Weights;
// Get sum of the block successors' weights.
uint32_t getSumForBlock(BasicBlock *BB) const;
public:
static char ID;
BranchProbabilityInfo() : FunctionPass(ID) {
initializeBranchProbabilityInfoPass(*PassRegistry::getPassRegistry());
}
void getAnalysisUsage(AnalysisUsage &AU) const {
AU.addRequired<LoopInfo>();
AU.setPreservesAll();
}
bool runOnFunction(Function &F);
// Returned value is between 1 and UINT32_MAX. Look at
// BranchProbabilityInfo.cpp for details.
uint32_t getEdgeWeight(BasicBlock *Src, BasicBlock *Dst) const;
// Look at BranchProbabilityInfo.cpp for details. Use it with caution!
void setEdgeWeight(BasicBlock *Src, BasicBlock *Dst, uint32_t Weight);
// A 'Hot' edge is an edge which probability is >= 80%.
bool isEdgeHot(BasicBlock *Src, BasicBlock *Dst) const;
// Return a hot successor for the block BB or null if there isn't one.
BasicBlock *getHotSucc(BasicBlock *BB) const;
// Return a probability as a fraction between 0 (0% probability) and
// 1 (100% probability), however the value is never equal to 0, and can be 1
// only iff SRC block has only one successor.
BranchProbability getEdgeProbability(BasicBlock *Src, BasicBlock *Dst) const;
// Print value between 0 (0% probability) and 1 (100% probability),
// however the value is never equal to 0, and can be 1 only iff SRC block
// has only one successor.
raw_ostream &printEdgeProbability(raw_ostream &OS, BasicBlock *Src,
BasicBlock *Dst) const;
};
}
#endif

View File

@ -259,6 +259,9 @@ class CallGraphNode {
/// addCalledFunction - Add a function to the list of functions called by this
/// one.
void addCalledFunction(CallSite CS, CallGraphNode *M) {
assert(!CS.getInstruction() ||
!CS.getCalledFunction() ||
!CS.getCalledFunction()->isIntrinsic());
CalledFunctions.push_back(std::make_pair(CS.getInstruction(), M));
M->AddRef();
}

View File

@ -117,8 +117,9 @@ namespace llvm {
/// @param Name Typedef name.
/// @param File File where this type is defined.
/// @param LineNo Line number.
/// @param Context The surrounding context for the typedef.
DIType createTypedef(DIType Ty, StringRef Name, DIFile File,
unsigned LineNo);
unsigned LineNo, DIDescriptor Context);
/// createFriend - Create debugging information entry for a 'friend'.
DIType createFriend(DIType Ty, DIType FriendTy);

View File

@ -49,15 +49,16 @@ namespace llvm {
class DIDescriptor {
public:
enum {
FlagPrivate = 1 << 0,
FlagProtected = 1 << 1,
FlagFwdDecl = 1 << 2,
FlagAppleBlock = 1 << 3,
FlagBlockByrefStruct = 1 << 4,
FlagVirtual = 1 << 5,
FlagArtificial = 1 << 6,
FlagExplicit = 1 << 7,
FlagPrototyped = 1 << 8
FlagPrivate = 1 << 0,
FlagProtected = 1 << 1,
FlagFwdDecl = 1 << 2,
FlagAppleBlock = 1 << 3,
FlagBlockByrefStruct = 1 << 4,
FlagVirtual = 1 << 5,
FlagArtificial = 1 << 6,
FlagExplicit = 1 << 7,
FlagPrototyped = 1 << 8,
FlagObjcClassComplete = 1 << 9
};
protected:
const MDNode *DbgNode;
@ -271,6 +272,9 @@ namespace llvm {
bool isArtificial() const {
return (getFlags() & FlagArtificial) != 0;
}
bool isObjcClassComplete() const {
return (getFlags() & FlagObjcClassComplete) != 0;
}
bool isValid() const {
return DbgNode && (isBasicType() || isDerivedType() || isCompositeType());
}

View File

@ -14,8 +14,8 @@
#ifndef LLVM_ANALYSIS_FINDUSEDTYPES_H
#define LLVM_ANALYSIS_FINDUSEDTYPES_H
#include "llvm/ADT/SetVector.h"
#include "llvm/Pass.h"
#include <set>
namespace llvm {
@ -23,7 +23,7 @@ class Type;
class Value;
class FindUsedTypes : public ModulePass {
std::set<const Type *> UsedTypes;
SetVector<const Type *> UsedTypes;
public:
static char ID; // Pass identification, replacement for typeid
FindUsedTypes() : ModulePass(ID) {
@ -33,7 +33,7 @@ class FindUsedTypes : public ModulePass {
/// getTypes - After the pass has been run, return the set containing all of
/// the types used in the module.
///
const std::set<const Type *> &getTypes() const { return UsedTypes; }
const SetVector<const Type *> &getTypes() const { return UsedTypes; }
/// Print the types found in the module. If the optional Module parameter is
/// passed in, then the types are printed symbolically if possible, using the

View File

@ -37,8 +37,8 @@ class TargetData;
class IVStrideUse : public CallbackVH, public ilist_node<IVStrideUse> {
friend class IVUsers;
public:
IVStrideUse(IVUsers *P, Instruction* U, Value *O)
: CallbackVH(U), Parent(P), OperandValToReplace(O) {
IVStrideUse(IVUsers *P, Instruction* U, Value *O, Value *PN)
: CallbackVH(U), Parent(P), OperandValToReplace(O), Phi(PN) {
}
/// getUser - Return the user instruction for this use.
@ -51,6 +51,11 @@ class IVStrideUse : public CallbackVH, public ilist_node<IVStrideUse> {
setValPtr(NewUser);
}
/// getPhi - Return the phi node that represents this IV.
PHINode *getPhi() const {
return cast<PHINode>(Phi);
}
/// getOperandValToReplace - Return the Value of the operand in the user
/// instruction that this IVStrideUse is representing.
Value *getOperandValToReplace() const {
@ -81,6 +86,9 @@ class IVStrideUse : public CallbackVH, public ilist_node<IVStrideUse> {
/// that this IVStrideUse is representing.
WeakVH OperandValToReplace;
/// Phi - The loop header phi that represents this IV.
WeakVH Phi;
/// PostIncLoops - The set of loops for which Expr has been adjusted to
/// use post-inc mode. This corresponds with SCEVExpander's post-inc concept.
PostIncLoopSet PostIncLoops;
@ -143,9 +151,9 @@ class IVUsers : public LoopPass {
/// AddUsersIfInteresting - Inspect the specified Instruction. If it is a
/// reducible SCEV, recursively add its users to the IVUsesByStride set and
/// return true. Otherwise, return false.
bool AddUsersIfInteresting(Instruction *I);
bool AddUsersIfInteresting(Instruction *I, PHINode *Phi);
IVStrideUse &AddUser(Instruction *User, Value *Operand);
IVStrideUse &AddUser(Instruction *User, Value *Operand, PHINode *Phi);
/// getReplacementExpr - Return a SCEV expression which computes the
/// value of the OperandValToReplace of the given IVStrideUse.

View File

@ -109,7 +109,7 @@ class RGPassManager : public FunctionPass, public PMDataManager {
/// @brief Print passes managed by this manager.
void dumpPassStructure(unsigned Offset);
/// @brief Print passes contained by this manager.
/// @brief Get passes contained by this manager.
Pass *getContainedPass(unsigned N) {
assert(N < PassVector.size() && "Pass number out of range!");
Pass *FP = static_cast<Pass *>(PassVector[N]);

View File

@ -270,30 +270,30 @@ namespace llvm {
/// BackedgeTakenCounts - Cache the backedge-taken count of the loops for
/// this function as they are computed.
std::map<const Loop*, BackedgeTakenInfo> BackedgeTakenCounts;
DenseMap<const Loop*, BackedgeTakenInfo> BackedgeTakenCounts;
/// ConstantEvolutionLoopExitValue - This map contains entries for all of
/// the PHI instructions that we attempt to compute constant evolutions for.
/// This allows us to avoid potentially expensive recomputation of these
/// properties. An instruction maps to null if we are unable to compute its
/// exit value.
std::map<PHINode*, Constant*> ConstantEvolutionLoopExitValue;
DenseMap<PHINode*, Constant*> ConstantEvolutionLoopExitValue;
/// ValuesAtScopes - This map contains entries for all the expressions
/// that we attempt to compute getSCEVAtScope information for, which can
/// be expensive in extreme cases.
std::map<const SCEV *,
DenseMap<const SCEV *,
std::map<const Loop *, const SCEV *> > ValuesAtScopes;
/// LoopDispositions - Memoized computeLoopDisposition results.
std::map<const SCEV *,
DenseMap<const SCEV *,
std::map<const Loop *, LoopDisposition> > LoopDispositions;
/// computeLoopDisposition - Compute a LoopDisposition value.
LoopDisposition computeLoopDisposition(const SCEV *S, const Loop *L);
/// BlockDispositions - Memoized computeBlockDisposition results.
std::map<const SCEV *,
DenseMap<const SCEV *,
std::map<const BasicBlock *, BlockDisposition> > BlockDispositions;
/// computeBlockDisposition - Compute a BlockDisposition value.

View File

@ -51,6 +51,9 @@ class Argument : public Value, public ilist_node<Argument> {
/// hasByValAttr - Return true if this argument has the byval attribute on it
/// in its containing function.
bool hasByValAttr() const;
/// getParamAlignment - If this is a byval argument, return its alignment.
unsigned getParamAlignment() const;
/// hasNestAttr - Return true if this argument has the nest attribute on
/// it in its containing function.

View File

@ -67,6 +67,20 @@ const Attributes StackAlignment = 7<<26; ///< Alignment of stack for
///alignstack(1))
const Attributes Hotpatch = 1<<29; ///< Function should have special
///'hotpatch' sequence in prologue
const Attributes UWTable = 1<<30; ///< Function must be in a unwind
///table
/// Note that uwtable is about the ABI or the user mandating an entry in the
/// unwind table. The nounwind attribute is about an exception passing by the
/// function.
/// In a theoretical system that uses tables for profiling and sjlj for
/// exceptions, they would be fully independent. In a normal system that
/// uses tables for both, the semantics are:
/// nil = Needs an entry because an exception might pass by.
/// nounwind = No need for an entry
/// uwtable = Needs an entry because the ABI says so and because
/// an exception might pass by.
/// uwtable + nounwind = Needs an entry because the ABI says so.
/// @brief Attributes that only apply to function parameters.
const Attributes ParameterOnly = ByVal | Nest | StructRet | NoCapture;
@ -76,7 +90,7 @@ const Attributes ParameterOnly = ByVal | Nest | StructRet | NoCapture;
const Attributes FunctionOnly = NoReturn | NoUnwind | ReadNone | ReadOnly |
NoInline | AlwaysInline | OptimizeForSize | StackProtect | StackProtectReq |
NoRedZone | NoImplicitFloat | Naked | InlineHint | StackAlignment |
Hotpatch;
Hotpatch | UWTable;
/// @brief Parameter attributes that do not apply to vararg call arguments.
const Attributes VarArgsIncompatible = StructRet;

View File

@ -185,7 +185,14 @@ namespace llvm {
void emitPrologLabel(const MachineInstr &MI);
bool needsCFIMoves();
enum CFIMoveType {
CFI_M_None,
CFI_M_EH,
CFI_M_Debug
};
CFIMoveType needsCFIMoves();
bool needsSEHMoves();
/// EmitConstantPool - Print to the current output stream assembly
/// representations of the constants in the constant pool MCP. This is
@ -381,10 +388,6 @@ namespace llvm {
/// operands.
virtual MachineLocation getDebugValueLocation(const MachineInstr *MI) const;
/// getDwarfRegOpSize - get size required to emit given machine location
/// using dwarf encoding.
virtual unsigned getDwarfRegOpSize(const MachineLocation &MLoc) const;
/// getISAEncoding - Get the value for DW_AT_APPLE_isa. Zero if no isa
/// encoding specified.
virtual unsigned getISAEncoding() { return 0; }
@ -396,12 +399,9 @@ namespace llvm {
// Dwarf Lowering Routines
//===------------------------------------------------------------------===//
/// EmitFrameMoves - Emit frame instructions to describe the layout of the
/// EmitCFIFrameMove - Emit frame instruction to describe the layout of the
/// frame.
void EmitFrameMoves(const std::vector<MachineMove> &Moves,
MCSymbol *BaseLabel, bool isEH) const;
void EmitCFIFrameMove(const MachineMove &Move) const;
void EmitCFIFrameMoves(const std::vector<MachineMove> &Moves) const;
//===------------------------------------------------------------------===//
// Inline Asm Support

View File

@ -16,6 +16,7 @@
#define LLVM_CODEGEN_CALLINGCONVLOWER_H
#include "llvm/ADT/SmallVector.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/ValueTypes.h"
#include "llvm/Target/TargetCallingConv.h"
#include "llvm/CallingConv.h"
@ -141,14 +142,19 @@ typedef bool CCCustomFn(unsigned &ValNo, MVT &ValVT,
MVT &LocVT, CCValAssign::LocInfo &LocInfo,
ISD::ArgFlagsTy &ArgFlags, CCState &State);
typedef enum { Invalid, Prologue, Call } ParmContext;
/// ParmContext - This enum tracks whether calling convention lowering is in
/// the context of prologue or call generation. Not all backends make use of
/// this information.
typedef enum { Unknown, Prologue, Call } ParmContext;
/// CCState - This class holds information needed while lowering arguments and
/// return values. It captures which registers are already assigned and which
/// stack slots are used. It provides accessors to allocate these values.
class CCState {
private:
CallingConv::ID CallingConv;
bool IsVarArg;
MachineFunction &MF;
const TargetMachine &TM;
const TargetRegisterInfo &TRI;
SmallVector<CCValAssign, 16> &Locs;
@ -158,10 +164,14 @@ class CCState {
SmallVector<uint32_t, 16> UsedRegs;
unsigned FirstByValReg;
bool FirstByValRegValid;
protected:
ParmContext CallOrPrologue;
public:
CCState(CallingConv::ID CC, bool isVarArg, const TargetMachine &TM,
SmallVector<CCValAssign, 16> &locs, LLVMContext &C);
CCState(CallingConv::ID CC, bool isVarArg, MachineFunction &MF,
const TargetMachine &TM, SmallVector<CCValAssign, 16> &locs,
LLVMContext &C);
void addLoc(const CCValAssign &V) {
Locs.push_back(V);
@ -169,6 +179,7 @@ class CCState {
LLVMContext &getContext() const { return Context; }
const TargetMachine &getTarget() const { return TM; }
MachineFunction &getMachineFunction() const { return MF; }
CallingConv::ID getCallingConv() const { return CallingConv; }
bool isVarArg() const { return IsVarArg; }
@ -301,7 +312,6 @@ class CCState {
bool isFirstByValRegValid() { return FirstByValRegValid; }
ParmContext getCallOrPrologue() { return CallOrPrologue; }
void setCallOrPrologue(ParmContext pc) { CallOrPrologue = pc; }
private:
/// MarkAllocated - Mark a register and all of its aliases as allocated.

View File

@ -241,6 +241,15 @@ class FastISel {
unsigned Op0, bool Op0IsKill,
unsigned Op1, bool Op1IsKill);
/// FastEmitInst_rrr - Emit a MachineInstr with three register operands
/// and a result register in the given register class.
///
unsigned FastEmitInst_rrr(unsigned MachineInstOpcode,
const TargetRegisterClass *RC,
unsigned Op0, bool Op0IsKill,
unsigned Op1, bool Op1IsKill,
unsigned Op2, bool Op2IsKill);
/// FastEmitInst_ri - Emit a MachineInstr with a register operand,
/// an immediate, and a result register in the given register class.
///
@ -301,7 +310,7 @@ class FastISel {
/// the CFG.
void FastEmitBranch(MachineBasicBlock *MBB, DebugLoc DL);
unsigned UpdateValueMap(const Value* I, unsigned Reg);
void UpdateValueMap(const Value* I, unsigned Reg, unsigned NumRegs = 1);
unsigned createResultReg(const TargetRegisterClass *RC);
@ -334,6 +343,8 @@ class FastISel {
bool SelectCast(const User *I, unsigned Opcode);
bool SelectExtractValue(const User *I);
/// HandlePHINodesInSuccessorBlocks - Handle PHI nodes in successor blocks.
/// Emit code to ensure constants are copied into registers when needed.
/// Remember the virtual registers that need to be added to the Machine PHI

View File

@ -107,11 +107,11 @@ namespace ISD {
// and returns an outchain.
EH_SJLJ_LONGJMP,
// OUTCHAIN = EH_SJLJ_DISPATCHSETUP(INCHAIN, context)
// OUTCHAIN = EH_SJLJ_DISPATCHSETUP(INCHAIN, setjmpval)
// This corresponds to the eh.sjlj.dispatchsetup intrinsic. It takes an
// input chain and a pointer to the sjlj function context as inputs and
// returns an outchain. By default, this does nothing. Targets can lower
// this to unwind setup code if needed.
// input chain and the value returning from setjmp as inputs and returns an
// outchain. By default, this does nothing. Targets can lower this to unwind
// setup code if needed.
EH_SJLJ_DISPATCHSETUP,
// TargetConstant* - Like Constant*, but the DAG does not do any folding,

View File

@ -492,9 +492,10 @@ namespace llvm {
/// Returns true if the live interval is zero length, i.e. no live ranges
/// span instructions. It doesn't pay to spill such an interval.
bool isZeroLength() const {
bool isZeroLength(SlotIndexes *Indexes) const {
for (const_iterator i = begin(), e = end(); i != e; ++i)
if (i->end.getPrevIndex() > i->start)
if (Indexes->getNextNonNullIndex(i->start).getBaseIndex() <
i->end.getBaseIndex())
return false;
return true;
}

View File

@ -229,6 +229,7 @@ class MachineInstr : public ilist_node<MachineInstr> {
enum MICheckType {
CheckDefs, // Check all operands for equality
CheckKillDead, // Check all operands including kill / dead markers
IgnoreDefs, // Ignore all definitions
IgnoreVRegDefs // Ignore virtual register definitions
};

View File

@ -88,7 +88,7 @@ class MachineInstrBuilder {
return *this;
}
const MachineInstrBuilder &addFrameIndex(unsigned Idx) const {
const MachineInstrBuilder &addFrameIndex(int Idx) const {
MI->addOperand(MachineOperand::CreateFI(Idx));
return *this;
}

View File

@ -52,27 +52,13 @@ namespace llvm {
class Constant;
class GlobalVariable;
class MDNode;
class MMIAddrLabelMap;
class MachineBasicBlock;
class MachineFunction;
class Module;
class PointerType;
class StructType;
/// MachineModuleInfoImpl - This class can be derived from and used by targets
/// to hold private target-specific information for each Module. Objects of
/// type are accessed/created with MMI::getInfo and destroyed when the
/// MachineModuleInfo is destroyed.
class MachineModuleInfoImpl {
public:
typedef PointerIntPair<MCSymbol*, 1, bool> StubValueTy;
virtual ~MachineModuleInfoImpl();
typedef std::vector<std::pair<MCSymbol*, StubValueTy> > SymbolListTy;
protected:
static SymbolListTy GetSortedStubs(const DenseMap<MCSymbol*, StubValueTy>&);
};
//===----------------------------------------------------------------------===//
/// LandingPadInfo - This structure is used to retain landing pad info for
/// the current function.
@ -89,7 +75,20 @@ struct LandingPadInfo {
: LandingPadBlock(MBB), LandingPadLabel(0), Personality(0) {}
};
class MMIAddrLabelMap;
//===----------------------------------------------------------------------===//
/// MachineModuleInfoImpl - This class can be derived from and used by targets
/// to hold private target-specific information for each Module. Objects of
/// type are accessed/created with MMI::getInfo and destroyed when the
/// MachineModuleInfo is destroyed.
///
class MachineModuleInfoImpl {
public:
typedef PointerIntPair<MCSymbol*, 1, bool> StubValueTy;
virtual ~MachineModuleInfoImpl();
typedef std::vector<std::pair<MCSymbol*, StubValueTy> > SymbolListTy;
protected:
static SymbolListTy GetSortedStubs(const DenseMap<MCSymbol*, StubValueTy>&);
};
//===----------------------------------------------------------------------===//
/// MachineModuleInfo - This class contains meta information specific to a

View File

@ -94,8 +94,8 @@ class MachineOperand {
/// not a real instruction. Such uses should be ignored during codegen.
bool IsDebug : 1;
/// SmallContents - Thisreally should be part of the Contents union, but lives
/// out here so we can get a better packed struct.
/// SmallContents - This really should be part of the Contents union, but
/// lives out here so we can get a better packed struct.
/// MO_Register: Register number.
/// OffsetedInfo: Low bits of offset.
union {
@ -473,7 +473,7 @@ class MachineOperand {
Op.setTargetFlags(TargetFlags);
return Op;
}
static MachineOperand CreateFI(unsigned Idx) {
static MachineOperand CreateFI(int Idx) {
MachineOperand Op(MachineOperand::MO_FrameIndex);
Op.setIndex(Idx);
return Op;

View File

@ -21,7 +21,7 @@ namespace llvm {
class raw_ostream;
/// PseudoSourceValue - Special value supplied for machine level alias
/// analysis. It indicates that the a memory access references the functions
/// analysis. It indicates that a memory access references the functions
/// stack frame (e.g., a spill slot), below the stack frame (e.g., argument
/// space), or constant pool.
class PseudoSourceValue : public Value {

View File

@ -94,7 +94,7 @@ namespace llvm {
typedef std::map<PBQP::Graph::ConstNodeItr, unsigned,
PBQP::NodeItrComparator> Node2VReg;
typedef DenseMap<unsigned, PBQP::Graph::NodeItr> VReg2Node;
typedef std::map<unsigned, AllowedSet> AllowedSetMap;
typedef DenseMap<unsigned, AllowedSet> AllowedSetMap;
PBQP::Graph graph;
Node2VReg node2VReg;

View File

@ -265,7 +265,6 @@ namespace llvm {
bool isCloned : 1; // True if this node has been cloned.
Sched::Preference SchedulingPref; // Scheduling preference.
SmallVector<MachineInstr*, 4> DbgInstrList; // dbg_values referencing this.
private:
bool isDepthCurrent : 1; // True if Depth is current.
bool isHeightCurrent : 1; // True if Height is current.

View File

@ -284,7 +284,7 @@ class SelectionDAG {
///
/// Note that this is an involved process that may invalidate pointers into
/// the graph.
void Legalize(CodeGenOpt::Level OptLevel);
void Legalize();
/// LegalizeVectors - This transforms the SelectionDAG into a SelectionDAG
/// that only uses vector math operations supported by the target. This is
@ -985,10 +985,6 @@ class SelectionDAG {
/// other positive zero.
bool isEqualTo(SDValue A, SDValue B) const;
/// isVerifiedDebugInfoDesc - Returns true if the specified SDValue has
/// been verified as a debug information descriptor.
bool isVerifiedDebugInfoDesc(SDValue Op) const;
/// UnrollVectorOp - Utility function used by legalize and lowering to
/// "unroll" a vector operation by splitting out the scalars and operating
/// on each element individually. If the ResNE is 0, fully unroll the vector

View File

@ -58,6 +58,10 @@ class TargetLoweringObjectFileELF : public TargetLoweringObjectFile {
virtual void Initialize(MCContext &Ctx, const TargetMachine &TM);
virtual const MCSection *getEHFrameSection() const;
virtual const MCSection *getWin64EHFuncTableSection(StringRef) const {
return NULL;
}
virtual const MCSection *getWin64EHTableSection(StringRef) const{return NULL;}
virtual void emitPersonalityValue(MCStreamer &Streamer,
const TargetMachine &TM,
@ -133,6 +137,10 @@ class TargetLoweringObjectFileMachO : public TargetLoweringObjectFile {
virtual void Initialize(MCContext &Ctx, const TargetMachine &TM);
virtual const MCSection *getEHFrameSection() const;
virtual const MCSection *getWin64EHFuncTableSection(StringRef) const {
return NULL;
}
virtual const MCSection *getWin64EHTableSection(StringRef) const{return NULL;}
virtual const MCSection *
SelectSectionForGlobal(const GlobalValue *GV, SectionKind Kind,
@ -196,6 +204,8 @@ class TargetLoweringObjectFileMachO : public TargetLoweringObjectFile {
class TargetLoweringObjectFileCOFF : public TargetLoweringObjectFile {
const MCSection *DrectveSection;
const MCSection *PDataSection;
const MCSection *XDataSection;
public:
TargetLoweringObjectFileCOFF() {}
~TargetLoweringObjectFileCOFF() {}
@ -203,6 +213,8 @@ class TargetLoweringObjectFileCOFF : public TargetLoweringObjectFile {
virtual void Initialize(MCContext &Ctx, const TargetMachine &TM);
virtual const MCSection *getEHFrameSection() const;
virtual const MCSection *getWin64EHFuncTableSection(StringRef) const;
virtual const MCSection *getWin64EHTableSection(StringRef) const;
virtual const MCSection *getDrectveSection() const { return DrectveSection; }

View File

@ -56,8 +56,11 @@ def forward_not_split;
def case;
// Boolean constants.
def true;
def false;
class Bool<bit val> {
bit Value = val;
}
def true : Bool<1>;
def false : Bool<0>;
// Boolean operators.
def and;

View File

@ -0,0 +1,167 @@
//===- llvm/DefaultPasses.h - Default Pass Support code --------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
// This file defines the infrastructure for registering the standard pass list.
// This defines sets of standard optimizations that plugins can modify and
// front ends can use.
//===----------------------------------------------------------------------===//
#ifndef LLVM_DEFAULT_PASS_SUPPORT_H
#define LLVM_DEFAULT_PASS_SUPPORT_H
namespace llvm {
class PassManagerBase;
/// Unique identifiers for the default standard passes. The addresses of
/// these symbols are used to uniquely identify passes from the default list.
namespace DefaultStandardPasses {
extern unsigned char AggressiveDCEID;
extern unsigned char ArgumentPromotionID;
extern unsigned char BasicAliasAnalysisID;
extern unsigned char CFGSimplificationID;
extern unsigned char ConstantMergeID;
extern unsigned char CorrelatedValuePropagationID;
extern unsigned char DeadArgEliminationID;
extern unsigned char DeadStoreEliminationID;
extern unsigned char DeadTypeEliminationID;
extern unsigned char EarlyCSEID;
extern unsigned char FunctionAttrsID;
extern unsigned char FunctionInliningID;
extern unsigned char GVNID;
extern unsigned char GlobalDCEID;
extern unsigned char GlobalOptimizerID;
extern unsigned char GlobalsModRefID;
extern unsigned char IPSCCPID;
extern unsigned char IndVarSimplifyID;
extern unsigned char InlinerPlaceholderID;
extern unsigned char InstructionCombiningID;
extern unsigned char JumpThreadingID;
extern unsigned char LICMID;
extern unsigned char LoopDeletionID;
extern unsigned char LoopIdiomID;
extern unsigned char LoopRotateID;
extern unsigned char LoopUnrollID;
extern unsigned char LoopUnswitchID;
extern unsigned char MemCpyOptID;
extern unsigned char PruneEHID;
extern unsigned char ReassociateID;
extern unsigned char SCCPID;
extern unsigned char ScalarReplAggregatesID;
extern unsigned char SimplifyLibCallsID;
extern unsigned char StripDeadPrototypesID;
extern unsigned char TailCallEliminationID;
extern unsigned char TypeBasedAliasAnalysisID;
}
/// StandardPass - The class responsible for maintaining the lists of standard
class StandardPass {
friend class RegisterStandardPassLists;
public:
/// Predefined standard sets of passes
enum StandardSet {
AliasAnalysis,
Function,
Module,
LTO
};
/// Flags to specify whether a pass should be enabled. Passes registered
/// with the standard sets may specify a minimum optimization level and one
/// or more flags that must be set when constructing the set for the pass to
/// be used.
enum OptimizationFlags {
/// Optimize for size was requested.
OptimizeSize = 1<<0,
/// Allow passes which may make global module changes.
UnitAtATime = 1<<1,
/// UnrollLoops - Allow loop unrolling.
UnrollLoops = 1<<2,
/// Allow library calls to be simplified.
SimplifyLibCalls = 1<<3,
/// Whether the module may have code using exceptions.
HaveExceptions = 1<<4,
// Run an inliner pass as part of this set.
RunInliner = 1<<5
};
enum OptimizationFlagComponents {
/// The low bits are used to store the optimization level. When requesting
/// passes, this should store the requested optimisation level. When
/// setting passes, this should set the minimum optimization level at which
/// the pass will run.
OptimizationLevelMask=0xf,
/// The maximum optimisation level at which the pass is run.
MaxOptimizationLevelMask=0xf0,
// Flags that must be set
RequiredFlagMask=0xff00,
// Flags that may not be set.
DisallowedFlagMask=0xff0000,
MaxOptimizationLevelShift=4,
RequiredFlagShift=8,
DisallowedFlagShift=16
};
/// Returns the optimisation level from a set of flags.
static unsigned OptimizationLevel(unsigned flags) {
return flags & OptimizationLevelMask;
}
/// Returns the maximum optimization level for this set of flags
static unsigned MaxOptimizationLevel(unsigned flags) {
return (flags & MaxOptimizationLevelMask) >> 4;
}
/// Constructs a set of flags from the specified minimum and maximum
/// optimisation level
static unsigned OptimzationFlags(unsigned minLevel=0, unsigned maxLevel=0xf,
unsigned requiredFlags=0, unsigned disallowedFlags=0) {
return ((minLevel & OptimizationLevelMask) |
((maxLevel<<MaxOptimizationLevelShift) & MaxOptimizationLevelMask)
| ((requiredFlags<<RequiredFlagShift) & RequiredFlagMask)
| ((disallowedFlags<<DisallowedFlagShift) & DisallowedFlagMask));
}
/// Returns the flags that must be set for this to match
static unsigned RequiredFlags(unsigned flags) {
return (flags & RequiredFlagMask) >> RequiredFlagShift;
}
/// Returns the flags that must not be set for this to match
static unsigned DisallowedFlags(unsigned flags) {
return (flags & DisallowedFlagMask) >> DisallowedFlagShift;
}
/// Register a standard pass in the specified set. If flags is non-zero,
/// then the pass will only be returned when the specified flags are set.
template<typename passName>
class RegisterStandardPass {
public:
RegisterStandardPass(StandardSet set, unsigned char *runBefore=0,
unsigned flags=0, unsigned char *ID=0) {
// Use the pass's ID if one is not specified
RegisterDefaultPass(PassInfo::NormalCtor_t(callDefaultCtor<passName>),
ID ? ID : (unsigned char*)&passName::ID, runBefore, set, flags);
}
};
/// Adds the passes from the specified set to the provided pass manager
static void AddPassesFromSet(PassManagerBase *PM,
StandardSet set,
unsigned flags=0,
bool VerifyEach=false,
Pass *inliner=0);
private:
/// Registers the default passes. This is set by RegisterStandardPassLists
/// and is called lazily.
static void (*RegisterDefaultPasses)(void);
/// Creates the verifier pass that is inserted when a VerifyEach is passed to
/// AddPassesFromSet()
static Pass* (*CreateVerifierPass)(void);
/// Registers the pass
static void RegisterDefaultPass(PassInfo::NormalCtor_t constructor,
unsigned char *newPass,
unsigned char *oldPass,
StandardSet set,
unsigned flags=0);
};
} // namespace llvm
#endif

View File

@ -135,20 +135,14 @@ class ExecutionEngine {
JITMemoryManager *JMM,
CodeGenOpt::Level OptLevel,
bool GVsWithCode,
CodeModel::Model CMM,
StringRef MArch,
StringRef MCPU,
const SmallVectorImpl<std::string>& MAttrs);
TargetMachine *TM);
static ExecutionEngine *(*MCJITCtor)(
Module *M,
std::string *ErrorStr,
JITMemoryManager *JMM,
CodeGenOpt::Level OptLevel,
bool GVsWithCode,
CodeModel::Model CMM,
StringRef MArch,
StringRef MCPU,
const SmallVectorImpl<std::string>& MAttrs);
TargetMachine *TM);
static ExecutionEngine *(*InterpCtor)(Module *M,
std::string *ErrorStr);
@ -569,6 +563,14 @@ class EngineBuilder {
return *this;
}
/// selectTarget - Pick a target either via -march or by guessing the native
/// arch. Add any CPU features specified via -mcpu or -mattr.
static TargetMachine *selectTarget(Module *M,
StringRef MArch,
StringRef MCPU,
const SmallVectorImpl<std::string>& MAttrs,
std::string *Err);
ExecutionEngine *create();
};

View File

@ -253,6 +253,23 @@ class Function : public GlobalValue,
else removeFnAttr(Attribute::NoUnwind);
}
/// @brief True if the ABI mandates (or the user requested) that this
/// function be in a unwind table.
bool hasUWTable() const {
return hasFnAttr(Attribute::UWTable);
}
void setHasUWTable(bool HasUWTable = true) {
if (HasUWTable)
addFnAttr(Attribute::UWTable);
else
removeFnAttr(Attribute::UWTable);
}
/// @brief True if this function needs an unwind table.
bool needsUnwindTableEntry() const {
return hasUWTable() || !doesNotThrow();
}
/// @brief Determine if the function returns a structure through first
/// pointer argument.
bool hasStructRetAttr() const {
@ -414,6 +431,10 @@ class Function : public GlobalValue,
///
bool hasAddressTaken(const User** = 0) const;
/// callsFunctionThatReturnsTwice - Return true if the function has a call to
/// setjmp or other function that gcc recognizes as "returning twice".
bool callsFunctionThatReturnsTwice() const;
private:
// Shadow Value::setValueSubclassData with a private forwarding method so that
// subclasses cannot accidentally use it.

View File

@ -66,6 +66,7 @@ void initializeBasicAliasAnalysisPass(PassRegistry&);
void initializeBasicCallGraphPass(PassRegistry&);
void initializeBlockExtractorPassPass(PassRegistry&);
void initializeBlockPlacementPass(PassRegistry&);
void initializeBranchProbabilityInfoPass(PassRegistry&);
void initializeBreakCriticalEdgesPass(PassRegistry&);
void initializeCFGOnlyPrinterPass(PassRegistry&);
void initializeCFGOnlyViewerPass(PassRegistry&);

View File

@ -139,7 +139,7 @@ namespace llvm {
return !getVolatileCst()->isZero();
}
unsigned getAddressSpace() const {
unsigned getDestAddressSpace() const {
return cast<PointerType>(getRawDest()->getType())->getAddressSpace();
}
@ -227,6 +227,10 @@ namespace llvm {
/// value is guaranteed to be a pointer.
Value *getSource() const { return getRawSource()->stripPointerCasts(); }
unsigned getSourceAddressSpace() const {
return cast<PointerType>(getRawSource()->getType())->getAddressSpace();
}
void setSource(Value *Ptr) {
assert(getRawSource()->getType() == Ptr->getType() &&
"setSource called with pointer of wrong type!");

View File

@ -47,6 +47,9 @@ def IntrReadWriteArgMem : IntrinsicProperty;
// Commutative - This intrinsic is commutative: X op Y == Y op X.
def Commutative : IntrinsicProperty;
// Throws - This intrinsic can throw.
def Throws : IntrinsicProperty;
// NoCapture - The specified argument pointer is not captured by the intrinsic.
class NoCapture<int argNo> : IntrinsicProperty {
int ArgNo = argNo;
@ -292,6 +295,7 @@ let Properties = [IntrNoMem] in {
def int_eh_exception : Intrinsic<[llvm_ptr_ty], [], [IntrReadMem]>;
def int_eh_selector : Intrinsic<[llvm_i32_ty],
[llvm_ptr_ty, llvm_ptr_ty, llvm_vararg_ty]>;
def int_eh_resume : Intrinsic<[], [llvm_ptr_ty, llvm_i32_ty], [Throws]>;
def int_eh_typeid_for : Intrinsic<[llvm_i32_ty], [llvm_ptr_ty]>;
@ -307,7 +311,7 @@ let Properties = [IntrNoMem] in {
def int_eh_sjlj_lsda : Intrinsic<[llvm_ptr_ty]>;
def int_eh_sjlj_callsite: Intrinsic<[], [llvm_i32_ty]>;
}
def int_eh_sjlj_dispatch_setup : Intrinsic<[], []>;
def int_eh_sjlj_dispatch_setup : Intrinsic<[], [llvm_i32_ty], [IntrReadMem]>;
def int_eh_sjlj_setjmp : Intrinsic<[llvm_i32_ty], [llvm_ptr_ty]>;
def int_eh_sjlj_longjmp : Intrinsic<[], [llvm_ptr_ty]>;

View File

@ -35,6 +35,16 @@ let TargetPrefix = "arm" in { // All intrinsics start with "llvm.arm.".
Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
}
//===----------------------------------------------------------------------===//
// Load and Store exclusive doubleword
let TargetPrefix = "arm" in { // All intrinsics start with "llvm.arm.".
def int_arm_strexd : Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty,
llvm_ptr_ty], [IntrReadWriteArgMem]>;
def int_arm_ldrexd : Intrinsic<[llvm_i32_ty, llvm_i32_ty], [llvm_ptr_ty],
[IntrReadArgMem]>;
}
//===----------------------------------------------------------------------===//
// VFP
@ -49,6 +59,43 @@ let TargetPrefix = "arm" in { // All intrinsics start with "llvm.arm.".
[IntrNoMem]>;
}
//===----------------------------------------------------------------------===//
// Coprocessor
let TargetPrefix = "arm" in { // All intrinsics start with "llvm.arm.".
// Move to coprocessor
def int_arm_mcr : GCCBuiltin<"__builtin_arm_mcr">,
Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], []>;
def int_arm_mcr2 : GCCBuiltin<"__builtin_arm_mcr2">,
Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], []>;
// Move from coprocessor
def int_arm_mrc : GCCBuiltin<"__builtin_arm_mrc">,
Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
llvm_i32_ty, llvm_i32_ty], []>;
def int_arm_mrc2 : GCCBuiltin<"__builtin_arm_mrc2">,
Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
llvm_i32_ty, llvm_i32_ty], []>;
// Coprocessor data processing
def int_arm_cdp : GCCBuiltin<"__builtin_arm_cdp">,
Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], []>;
def int_arm_cdp2 : GCCBuiltin<"__builtin_arm_cdp2">,
Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], []>;
// Move from two registers to coprocessor
def int_arm_mcrr : GCCBuiltin<"__builtin_arm_mcrr">,
Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
llvm_i32_ty, llvm_i32_ty], []>;
def int_arm_mcrr2 : GCCBuiltin<"__builtin_arm_mcrr2">,
Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
llvm_i32_ty, llvm_i32_ty], []>;
}
//===----------------------------------------------------------------------===//
// Advanced SIMD (NEON)

View File

@ -224,9 +224,6 @@ let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
// Cacheability support ops
let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
def int_x86_sse_movnt_ps : GCCBuiltin<"__builtin_ia32_movntps">,
Intrinsic<[], [llvm_ptr_ty,
llvm_v4f32_ty], []>;
def int_x86_sse_sfence : GCCBuiltin<"__builtin_ia32_sfence">,
Intrinsic<[], [], []>;
}
@ -536,19 +533,6 @@ let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
llvm_v4i32_ty], []>;
}
// Cacheability support ops
let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
def int_x86_sse2_movnt_dq : GCCBuiltin<"__builtin_ia32_movntdq">,
Intrinsic<[], [llvm_ptr_ty,
llvm_v2i64_ty], []>;
def int_x86_sse2_movnt_pd : GCCBuiltin<"__builtin_ia32_movntpd">,
Intrinsic<[], [llvm_ptr_ty,
llvm_v2f64_ty], []>;
def int_x86_sse2_movnt_i : GCCBuiltin<"__builtin_ia32_movnti">,
Intrinsic<[], [llvm_ptr_ty,
llvm_i32_ty], []>;
}
// Misc.
let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
def int_x86_sse2_packsswb_128 : GCCBuiltin<"__builtin_ia32_packsswb128">,
@ -964,19 +948,19 @@ let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
// Miscellaneous
// CRC Instruction
let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
def int_x86_sse42_crc32_8 : GCCBuiltin<"__builtin_ia32_crc32qi">,
def int_x86_sse42_crc32_32_8 : GCCBuiltin<"__builtin_ia32_crc32qi">,
Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i8_ty],
[IntrNoMem]>;
def int_x86_sse42_crc32_16 : GCCBuiltin<"__builtin_ia32_crc32hi">,
def int_x86_sse42_crc32_32_16 : GCCBuiltin<"__builtin_ia32_crc32hi">,
Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i16_ty],
[IntrNoMem]>;
def int_x86_sse42_crc32_32 : GCCBuiltin<"__builtin_ia32_crc32si">,
def int_x86_sse42_crc32_32_32 : GCCBuiltin<"__builtin_ia32_crc32si">,
Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty],
[IntrNoMem]>;
def int_x86_sse42_crc64_8 :
def int_x86_sse42_crc32_64_8 :
Intrinsic<[llvm_i64_ty], [llvm_i64_ty, llvm_i8_ty],
[IntrNoMem]>;
def int_x86_sse42_crc64_64 : GCCBuiltin<"__builtin_ia32_crc32di">,
def int_x86_sse42_crc32_64_64 : GCCBuiltin<"__builtin_ia32_crc32di">,
Intrinsic<[llvm_i64_ty], [llvm_i64_ty, llvm_i64_ty],
[IntrNoMem]>;
}

View File

@ -11,6 +11,12 @@
let TargetPrefix = "xcore" in { // All intrinsics start with "llvm.xcore.".
// Miscellaneous instructions.
def int_xcore_bitrev : Intrinsic<[llvm_i32_ty],[llvm_i32_ty],[IntrNoMem]>;
def int_xcore_crc8 : Intrinsic<[llvm_i32_ty, llvm_i32_ty],
[llvm_i32_ty,llvm_i32_ty,llvm_i32_ty],
[IntrNoMem]>;
def int_xcore_crc32 : Intrinsic<[llvm_i32_ty],
[llvm_i32_ty,llvm_i32_ty,llvm_i32_ty],
[IntrNoMem]>;
def int_xcore_getid : Intrinsic<[llvm_i32_ty],[],[IntrNoMem]>;
def int_xcore_getps : Intrinsic<[llvm_i32_ty],[llvm_i32_ty]>;
def int_xcore_setps : Intrinsic<[],[llvm_i32_ty, llvm_i32_ty]>;

View File

@ -70,7 +70,7 @@ namespace {
(void) llvm::createEdgeProfilerPass();
(void) llvm::createOptimalEdgeProfilerPass();
(void) llvm::createPathProfilerPass();
(void) llvm::createGCOVProfilerPass(true, true);
(void) llvm::createGCOVProfilerPass(true, true, false);
(void) llvm::createFunctionInliningPass();
(void) llvm::createAlwaysInlinerPass();
(void) llvm::createGlobalDCEPass();

View File

@ -26,12 +26,12 @@ namespace llvm {
class MCSymbol;
class MCContext;
/// MCAsmInfo - This class is intended to be used as a base class for asm
/// properties and features specific to the target.
namespace ExceptionHandling {
enum ExceptionsType { None, DwarfTable, DwarfCFI, SjLj, ARM };
enum ExceptionsType { None, DwarfCFI, SjLj, ARM, Win64 };
}
/// MCAsmInfo - This class is intended to be used as a base class for asm
/// properties and features specific to the target.
class MCAsmInfo {
protected:
//===------------------------------------------------------------------===//
@ -269,9 +269,6 @@ namespace llvm {
/// SupportsExceptionHandling - True if target supports exception handling.
ExceptionHandling::ExceptionsType ExceptionsType; // Defaults to None
/// RequiresFrameSection - true if the Dwarf2 output needs a frame section
bool DwarfRequiresFrameSection; // Defaults to true.
/// DwarfUsesInlineInfoSection - True if DwarfDebugInlineSection is used to
/// encode inline subroutine information.
bool DwarfUsesInlineInfoSection; // Defaults to false.
@ -279,9 +276,9 @@ namespace llvm {
/// DwarfSectionOffsetDirective - Special section offset directive.
const char* DwarfSectionOffsetDirective; // Defaults to NULL
/// DwarfUsesAbsoluteLabelForStmtList - True if DW_AT_stmt_list needs
/// absolute label instead of offset.
bool DwarfUsesAbsoluteLabelForStmtList; // Defaults to true;
/// DwarfRequiresRelocationForSectionOffset - True if we need to produce a
// relocation when we want a section offset in dwarf.
bool DwarfRequiresRelocationForSectionOffset; // Defaults to true;
// DwarfUsesLabelOffsetDifference - True if Dwarf2 output can
// use EmitLabelOffsetDifference.
@ -462,13 +459,9 @@ namespace llvm {
}
bool isExceptionHandlingDwarf() const {
return
(ExceptionsType == ExceptionHandling::DwarfTable ||
ExceptionsType == ExceptionHandling::DwarfCFI ||
ExceptionsType == ExceptionHandling::ARM);
}
bool doesDwarfRequireFrameSection() const {
return DwarfRequiresFrameSection;
(ExceptionsType == ExceptionHandling::DwarfCFI ||
ExceptionsType == ExceptionHandling::ARM ||
ExceptionsType == ExceptionHandling::Win64);
}
bool doesDwarfUsesInlineInfoSection() const {
return DwarfUsesInlineInfoSection;
@ -476,8 +469,8 @@ namespace llvm {
const char *getDwarfSectionOffsetDirective() const {
return DwarfSectionOffsetDirective;
}
bool doesDwarfUsesAbsoluteLabelForStmtList() const {
return DwarfUsesAbsoluteLabelForStmtList;
bool doesDwarfRequireRelocationForSectionOffset() const {
return DwarfRequiresRelocationForSectionOffset;
}
bool doesDwarfUsesLabelOffsetForRanges() const {
return DwarfUsesLabelOffsetForRanges;

View File

@ -281,11 +281,10 @@ namespace llvm {
//
// This emits the frame info section.
//
static void Emit(MCStreamer &streamer, bool usingCFI);
static void EmitDarwin(MCStreamer &streamer, bool usingCFI);
static void Emit(MCStreamer &streamer, bool usingCFI,
bool isEH);
static void EmitAdvanceLoc(MCStreamer &Streamer, uint64_t AddrDelta);
static void EncodeAdvanceLoc(uint64_t AddrDelta, raw_ostream &OS,
const TargetAsmInfo &AsmInfo);
static void EncodeAdvanceLoc(uint64_t AddrDelta, raw_ostream &OS);
};
} // end namespace llvm

View File

@ -49,7 +49,8 @@ namespace llvm {
ELF_STV_Hidden = (ELF::STV_HIDDEN << ELF_STV_Shift),
ELF_STV_Protected = (ELF::STV_PROTECTED << ELF_STV_Shift),
ELF_Other_Weakref = (1 << ELF_Other_Shift)
ELF_Other_Weakref = (1 << ELF_Other_Shift),
ELF_Other_ThumbFunc = (2 << ELF_Other_Shift)
};
} // end namespace llvm

View File

@ -171,8 +171,10 @@ class MCSymbolRefExpr : public MCExpr {
VK_ARM_GOTTPOFF,
VK_PPC_TOC,
VK_PPC_HA16, // ha16(symbol)
VK_PPC_LO16 // lo16(symbol)
VK_PPC_DARWIN_HA16, // ha16(symbol)
VK_PPC_DARWIN_LO16, // lo16(symbol)
VK_PPC_GAS_HA16, // symbol@ha
VK_PPC_GAS_LO16 // symbol@l
};
private:

View File

@ -45,8 +45,8 @@ class MCInstPrinter {
/// "MOV32ri") or empty if we can't resolve it.
virtual StringRef getOpcodeName(unsigned Opcode) const;
/// getRegName - Return the assembler register name.
virtual StringRef getRegName(unsigned RegNo) const;
/// printRegName - Print the assembler register name.
virtual void printRegName(raw_ostream &OS, unsigned RegNo) const;
unsigned getAvailableFeatures() const { return AvailableFeatures; }
void setAvailableFeatures(unsigned Value) { AvailableFeatures = Value; }

View File

@ -44,6 +44,7 @@ class AsmToken {
Colon,
Plus, Minus, Tilde,
Slash, // '/'
BackSlash, // '\'
LParen, RParen, LBrac, RBrac, LCurly, RCurly,
Star, Dot, Comma, Dollar, Equal, EqualEqual,

View File

@ -71,7 +71,9 @@ class MCAsmParser {
/// Warning - Emit a warning at the location \arg L, with the message \arg
/// Msg.
virtual void Warning(SMLoc L, const Twine &Msg) = 0;
///
/// \return The return value is true, if warnings are fatal.
virtual bool Warning(SMLoc L, const Twine &Msg) = 0;
/// Error - Emit an error at the location \arg L, with the message \arg
/// Msg.

View File

@ -56,7 +56,7 @@ class MCAsmParserExtension {
MCAsmParser &getParser() { return *Parser; }
SourceMgr &getSourceManager() { return getParser().getSourceManager(); }
MCStreamer &getStreamer() { return getParser().getStreamer(); }
void Warning(SMLoc L, const Twine &Msg) {
bool Warning(SMLoc L, const Twine &Msg) {
return getParser().Warning(L, Msg);
}
bool Error(SMLoc L, const Twine &Msg) {

View File

@ -18,6 +18,7 @@
#include "llvm/Support/DataTypes.h"
#include "llvm/MC/MCDirectives.h"
#include "llvm/MC/MCDwarf.h"
#include "llvm/MC/MCWin64EH.h"
namespace llvm {
class MCAsmInfo;
@ -50,10 +51,18 @@ namespace llvm {
MCStreamer(const MCStreamer&); // DO NOT IMPLEMENT
MCStreamer &operator=(const MCStreamer&); // DO NOT IMPLEMENT
bool EmitEHFrame;
bool EmitDebugFrame;
std::vector<MCDwarfFrameInfo> FrameInfos;
MCDwarfFrameInfo *getCurrentFrameInfo();
void EnsureValidFrame();
std::vector<MCWin64EHUnwindInfo *> W64UnwindInfos;
MCWin64EHUnwindInfo *CurrentW64UnwindInfo;
void setCurrentW64UnwindInfo(MCWin64EHUnwindInfo *Frame);
void EnsureValidW64UnwindInfo();
const MCSymbol* LastNonPrivate;
/// SectionStack - This is stack of current and previous section
@ -67,8 +76,12 @@ namespace llvm {
const MCExpr *BuildSymbolDiff(MCContext &Context, const MCSymbol *A,
const MCSymbol *B);
const MCExpr *ForceExpAbs(MCStreamer *Streamer, MCContext &Context,
const MCExpr* Expr);
const MCExpr *ForceExpAbs(const MCExpr* Expr);
void EmitFrames(bool usingCFI);
MCWin64EHUnwindInfo *getCurrentW64UnwindInfo(){return CurrentW64UnwindInfo;}
void EmitW64Tables();
public:
virtual ~MCStreamer();
@ -83,6 +96,14 @@ namespace llvm {
return FrameInfos[i];
}
unsigned getNumW64UnwindInfos() {
return W64UnwindInfos.size();
}
MCWin64EHUnwindInfo &getW64UnwindInfo(unsigned i) {
return *W64UnwindInfos[i];
}
/// @name Assembly File Formatting.
/// @{
@ -174,6 +195,17 @@ namespace llvm {
}
}
/// SwitchSectionNoChange - Set the current section where code is being
/// emitted to @p Section. This is required to update CurSection. This
/// version does not call ChangeSection.
void SwitchSectionNoChange(const MCSection *Section) {
assert(Section && "Cannot switch to a null section!");
const MCSection *curSection = SectionStack.back().first;
SectionStack.back().second = curSection;
if (Section != curSection)
SectionStack.back().first = Section;
}
/// InitSections - Create the default sections and set the initial one.
virtual void InitSections() = 0;
@ -288,6 +320,7 @@ namespace llvm {
/// if non-zero. This must be a power of 2 on some targets.
virtual void EmitTBSSSymbol(const MCSection *Section, MCSymbol *Symbol,
uint64_t Size, unsigned ByteAlignment = 0) = 0;
/// @}
/// @name Generating Data
/// @{
@ -436,6 +469,7 @@ namespace llvm {
void EmitDwarfSetLineAddr(int64_t LineDelta, const MCSymbol *Label,
int PointerSize);
virtual void EmitCFISections(bool EH, bool Debug);
virtual void EmitCFIStartProc();
virtual void EmitCFIEndProc();
virtual void EmitCFIDefCfa(int64_t Register, int64_t Offset);
@ -450,6 +484,21 @@ namespace llvm {
virtual void EmitCFIRelOffset(int64_t Register, int64_t Offset);
virtual void EmitCFIAdjustCfaOffset(int64_t Adjustment);
virtual void EmitWin64EHStartProc(const MCSymbol *Symbol);
virtual void EmitWin64EHEndProc();
virtual void EmitWin64EHStartChained();
virtual void EmitWin64EHEndChained();
virtual void EmitWin64EHHandler(const MCSymbol *Sym, bool Unwind,
bool Except);
virtual void EmitWin64EHHandlerData();
virtual void EmitWin64EHPushReg(unsigned Register);
virtual void EmitWin64EHSetFrame(unsigned Register, unsigned Offset);
virtual void EmitWin64EHAllocStack(unsigned Size);
virtual void EmitWin64EHSaveReg(unsigned Register, unsigned Offset);
virtual void EmitWin64EHSaveXMM(unsigned Register, unsigned Offset);
virtual void EmitWin64EHPushFrame(bool Code);
virtual void EmitWin64EHEndProlog();
/// EmitInstruction - Emit the given @p Instruction into the current
/// section.
virtual void EmitInstruction(const MCInst &Inst) = 0;

View File

@ -0,0 +1,93 @@
//===- MCWin64EH.h - Machine Code Win64 EH support --------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file contains declarations to support the Win64 Exception Handling
// scheme in MC.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_MC_MCWIN64EH_H
#define LLVM_MC_MCWIN64EH_H
#include "llvm/Support/Win64EH.h"
#include <cassert>
#include <vector>
namespace llvm {
class StringRef;
class MCStreamer;
class MCSymbol;
class MCWin64EHInstruction {
public:
typedef Win64EH::UnwindOpcodes OpType;
private:
OpType Operation;
MCSymbol *Label;
unsigned Offset;
unsigned Register;
public:
MCWin64EHInstruction(OpType Op, MCSymbol *L, unsigned Reg)
: Operation(Op), Label(L), Offset(0), Register(Reg) {
assert(Op == Win64EH::UOP_PushNonVol);
}
MCWin64EHInstruction(MCSymbol *L, unsigned Size)
: Operation(Size>128 ? Win64EH::UOP_AllocLarge : Win64EH::UOP_AllocSmall),
Label(L), Offset(Size) { }
MCWin64EHInstruction(OpType Op, MCSymbol *L, unsigned Reg, unsigned Off)
: Operation(Op), Label(L), Offset(Off), Register(Reg) {
assert(Op == Win64EH::UOP_SetFPReg ||
Op == Win64EH::UOP_SaveNonVol ||
Op == Win64EH::UOP_SaveNonVolBig ||
Op == Win64EH::UOP_SaveXMM128 ||
Op == Win64EH::UOP_SaveXMM128Big);
}
MCWin64EHInstruction(OpType Op, MCSymbol *L, bool Code)
: Operation(Op), Label(L), Offset(Code ? 1 : 0) {
assert(Op == Win64EH::UOP_PushMachFrame);
}
OpType getOperation() const { return Operation; }
MCSymbol *getLabel() const { return Label; }
unsigned getOffset() const { return Offset; }
unsigned getSize() const { return Offset; }
unsigned getRegister() const { return Register; }
bool isPushCodeFrame() const { return Offset == 1; }
};
struct MCWin64EHUnwindInfo {
MCWin64EHUnwindInfo() : Begin(0), End(0), ExceptionHandler(0),
Function(0), PrologEnd(0), Symbol(0),
HandlesUnwind(false), HandlesExceptions(false),
LastFrameInst(-1), ChainedParent(0),
Instructions() {}
MCSymbol *Begin;
MCSymbol *End;
const MCSymbol *ExceptionHandler;
const MCSymbol *Function;
MCSymbol *PrologEnd;
MCSymbol *Symbol;
bool HandlesUnwind;
bool HandlesExceptions;
int LastFrameInst;
MCWin64EHUnwindInfo *ChainedParent;
std::vector<MCWin64EHInstruction> Instructions;
};
class MCWin64EHUnwindEmitter {
public:
static StringRef GetSectionSuffix(const MCSymbol *func);
//
// This emits the unwind info sections (.pdata and .xdata in PE/COFF).
//
static void Emit(MCStreamer &streamer);
static void EmitUnwindInfo(MCStreamer &streamer, MCWin64EHUnwindInfo *info);
};
} // end namespace llvm
#endif

View File

@ -34,7 +34,7 @@ template<typename ValueSubClass, typename ItemParentClass>
//===----------------------------------------------------------------------===//
/// MDString - a single uniqued string.
/// These are used to efficiently contain a byte sequence for metadata.
/// MDString is always unnamd.
/// MDString is always unnamed.
class MDString : public Value {
MDString(const MDString &); // DO NOT IMPLEMENT

View File

@ -186,28 +186,46 @@ class ConcreteOperator : public SuperClass {
};
class AddOperator
: public ConcreteOperator<OverflowingBinaryOperator, Instruction::Add> {};
: public ConcreteOperator<OverflowingBinaryOperator, Instruction::Add> {
~AddOperator(); // DO NOT IMPLEMENT
};
class SubOperator
: public ConcreteOperator<OverflowingBinaryOperator, Instruction::Sub> {};
: public ConcreteOperator<OverflowingBinaryOperator, Instruction::Sub> {
~SubOperator(); // DO NOT IMPLEMENT
};
class MulOperator
: public ConcreteOperator<OverflowingBinaryOperator, Instruction::Mul> {};
: public ConcreteOperator<OverflowingBinaryOperator, Instruction::Mul> {
~MulOperator(); // DO NOT IMPLEMENT
};
class ShlOperator
: public ConcreteOperator<OverflowingBinaryOperator, Instruction::Shl> {};
: public ConcreteOperator<OverflowingBinaryOperator, Instruction::Shl> {
~ShlOperator(); // DO NOT IMPLEMENT
};
class SDivOperator
: public ConcreteOperator<PossiblyExactOperator, Instruction::SDiv> {};
: public ConcreteOperator<PossiblyExactOperator, Instruction::SDiv> {
~SDivOperator(); // DO NOT IMPLEMENT
};
class UDivOperator
: public ConcreteOperator<PossiblyExactOperator, Instruction::UDiv> {};
: public ConcreteOperator<PossiblyExactOperator, Instruction::UDiv> {
~UDivOperator(); // DO NOT IMPLEMENT
};
class AShrOperator
: public ConcreteOperator<PossiblyExactOperator, Instruction::AShr> {};
: public ConcreteOperator<PossiblyExactOperator, Instruction::AShr> {
~AShrOperator(); // DO NOT IMPLEMENT
};
class LShrOperator
: public ConcreteOperator<PossiblyExactOperator, Instruction::LShr> {};
: public ConcreteOperator<PossiblyExactOperator, Instruction::LShr> {
~LShrOperator(); // DO NOT IMPLEMENT
};
class GEPOperator
: public ConcreteOperator<Operator, Instruction::GetElementPtr> {
~GEPOperator(); // DO NOT IMPLEMENT
enum {
IsInBounds = (1 << 0)
};

View File

@ -0,0 +1,50 @@
//===- BranchProbability.h - Branch Probability Analysis --------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// Definition of BranchProbability shared by IR and Machine Instructions.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_SUPPORT_BRANCHPROBABILITY_H
#define LLVM_SUPPORT_BRANCHPROBABILITY_H
#include "llvm/Support/DataTypes.h"
namespace llvm {
class raw_ostream;
class BranchProbabilityInfo;
class MachineBranchProbabilityInfo;
class MachineBasicBlock;
// This class represents Branch Probability as a non-negative fraction.
class BranchProbability {
friend class BranchProbabilityInfo;
friend class MachineBranchProbabilityInfo;
friend class MachineBasicBlock;
// Numerator
uint32_t N;
// Denominator
uint32_t D;
BranchProbability(uint32_t n, uint32_t d);
public:
raw_ostream &print(raw_ostream &OS) const;
void dump() const;
};
raw_ostream &operator<<(raw_ostream &OS, const BranchProbability &Prob);
}
#endif

View File

@ -23,8 +23,6 @@ namespace llvm {
// isa<x> Support Templates
//===----------------------------------------------------------------------===//
template<typename FromCl> struct isa_impl_cl;
// Define a template that can be specialized by smart pointers to reflect the
// fact that they are automatically dereferenced, and are not involved with the
// template selection process... the default implementation is a noop.
@ -43,12 +41,9 @@ template<typename From> struct simplify_type<const From> {
}
};
// isa<X> - Return true if the parameter to the template is an instance of the
// template type argument. Used like this:
//
// if (isa<Type*>(myVal)) { ... }
//
// The core of the implementation of isa<X> is here; To and From should be
// the names of classes. This template can be specialized to customize the
// implementation of isa<> without rewriting it from scratch.
template <typename To, typename From>
struct isa_impl {
static inline bool doit(const From &Val) {
@ -56,66 +51,63 @@ struct isa_impl {
}
};
template<typename To, typename From, typename SimpleType>
template <typename To, typename From> struct isa_impl_cl {
static inline bool doit(const From &Val) {
return isa_impl<To, From>::doit(Val);
}
};
template <typename To, typename From> struct isa_impl_cl<To, const From> {
static inline bool doit(const From &Val) {
return isa_impl<To, From>::doit(Val);
}
};
template <typename To, typename From> struct isa_impl_cl<To, From*> {
static inline bool doit(const From *Val) {
return isa_impl<To, From>::doit(*Val);
}
};
template <typename To, typename From> struct isa_impl_cl<To, const From*> {
static inline bool doit(const From *Val) {
return isa_impl<To, From>::doit(*Val);
}
};
template <typename To, typename From> struct isa_impl_cl<To, const From*const> {
static inline bool doit(const From *Val) {
return isa_impl<To, From>::doit(*Val);
}
};
template<typename To, typename From, typename SimpleFrom>
struct isa_impl_wrap {
// When From != SimplifiedType, we can simplify the type some more by using
// the simplify_type template.
static bool doit(const From &Val) {
return isa_impl_cl<const SimpleType>::template
isa<To>(simplify_type<const From>::getSimplifiedValue(Val));
return isa_impl_wrap<To, SimpleFrom,
typename simplify_type<SimpleFrom>::SimpleType>::doit(
simplify_type<From>::getSimplifiedValue(Val));
}
};
template<typename To, typename FromTy>
struct isa_impl_wrap<To, const FromTy, const FromTy> {
struct isa_impl_wrap<To, FromTy, FromTy> {
// When From == SimpleType, we are as simple as we are going to get.
static bool doit(const FromTy &Val) {
return isa_impl<To,FromTy>::doit(Val);
return isa_impl_cl<To,FromTy>::doit(Val);
}
};
// isa_impl_cl - Use class partial specialization to transform types to a single
// canonical form for isa_impl.
// isa<X> - Return true if the parameter to the template is an instance of the
// template type argument. Used like this:
//
// if (isa<Type>(myVal)) { ... }
//
template<typename FromCl>
struct isa_impl_cl {
template<class ToCl>
static bool isa(const FromCl &Val) {
return isa_impl_wrap<ToCl,const FromCl,
typename simplify_type<const FromCl>::SimpleType>::doit(Val);
}
};
// Specialization used to strip const qualifiers off of the FromCl type...
template<typename FromCl>
struct isa_impl_cl<const FromCl> {
template<class ToCl>
static bool isa(const FromCl &Val) {
return isa_impl_cl<FromCl>::template isa<ToCl>(Val);
}
};
// Define pointer traits in terms of base traits...
template<class FromCl>
struct isa_impl_cl<FromCl*> {
template<class ToCl>
static bool isa(FromCl *Val) {
return isa_impl_cl<FromCl>::template isa<ToCl>(*Val);
}
};
// Define reference traits in terms of base traits...
template<class FromCl>
struct isa_impl_cl<FromCl&> {
template<class ToCl>
static bool isa(FromCl &Val) {
return isa_impl_cl<FromCl>::template isa<ToCl>(&Val);
}
};
template <class X, class Y>
inline bool isa(const Y &Val) {
return isa_impl_cl<Y>::template isa<X>(Val);
return isa_impl_wrap<X, Y, typename simplify_type<Y>::SimpleType>::doit(Val);
}
//===----------------------------------------------------------------------===//

View File

@ -186,8 +186,13 @@ class CrashRecoveryContextCleanupRegistrar {
}
~CrashRecoveryContextCleanupRegistrar() {
unregister();
}
void unregister() {
if (cleanup && !cleanup->cleanupFired)
cleanup->getContext()->unregisterCleanup(cleanup);
cleanup->getContext()->unregisterCleanup(cleanup);
cleanup = 0;
}
};
}

View File

@ -235,6 +235,7 @@ enum dwarf_constants {
DW_AT_APPLE_property_getter = 0x3fe9,
DW_AT_APPLE_property_setter = 0x3fea,
DW_AT_APPLE_property_attribute = 0x3feb,
DW_AT_APPLE_objc_complete_type = 0x3fec,
// Attribute form encodings
DW_FORM_addr = 0x01,

View File

@ -80,6 +80,7 @@ class IRBuilderBase {
void SetInsertPoint(Instruction *I) {
BB = I->getParent();
InsertPt = I;
SetCurrentDebugLocation(I->getDebugLoc());
}
/// SetInsertPoint - This specifies that created instructions should be
@ -106,6 +107,10 @@ class IRBuilderBase {
I->setDebugLoc(CurDbgLocation);
}
/// getCurrentFunctionReturnType - Get the return type of the current function
/// that we're emitting into.
const Type *getCurrentFunctionReturnType() const;
/// InsertPoint - A saved insertion point.
class InsertPoint {
BasicBlock *Block;
@ -194,6 +199,7 @@ class IRBuilderBase {
return ConstantInt::get(getInt64Ty(), C);
}
/// getInt - Get a constant integer value.
ConstantInt *getInt(const APInt &AI) {
return ConstantInt::get(Context, AI);
}
@ -246,10 +252,10 @@ class IRBuilderBase {
return Type::getInt8PtrTy(Context, AddrSpace);
}
/// getCurrentFunctionReturnType - Get the return type of the current function
/// that we're emitting into.
const Type *getCurrentFunctionReturnType() const;
//===--------------------------------------------------------------------===//
// Intrinsic creation methods
//===--------------------------------------------------------------------===//
/// CreateMemSet - Create and insert a memset to the specified pointer and the
/// specified value. If the pointer isn't an i8*, it will be converted. If a
/// TBAA tag is specified, it will be added to the instruction.
@ -282,6 +288,15 @@ class IRBuilderBase {
CallInst *CreateMemMove(Value *Dst, Value *Src, Value *Size, unsigned Align,
bool isVolatile = false, MDNode *TBAATag = 0);
/// CreateLifetimeStart - Create a lifetime.start intrinsic. If the pointer
/// isn't i8* it will be converted.
CallInst *CreateLifetimeStart(Value *Ptr, ConstantInt *Size = 0);
/// CreateLifetimeEnd - Create a lifetime.end intrinsic. If the pointer isn't
/// i8* it will be converted.
CallInst *CreateLifetimeEnd(Value *Ptr, ConstantInt *Size = 0);
private:
Value *getCastedInt8PtrValue(Value *Ptr);
};
@ -324,6 +339,7 @@ class IRBuilder : public IRBuilderBase, public Inserter {
explicit IRBuilder(Instruction *IP)
: IRBuilderBase(IP->getContext()), Folder() {
SetInsertPoint(IP);
SetCurrentDebugLocation(IP->getDebugLoc());
}
IRBuilder(BasicBlock *TheBB, BasicBlock::iterator IP, const T& F)

View File

@ -81,7 +81,7 @@ class MemoryBuffer {
bool RequiresNullTerminator = true);
/// getMemBuffer - Open the specified memory range as a MemoryBuffer. Note
/// that InputData must be null terminated.
/// that InputData must be null terminated if RequiresNullTerminator is true.
static MemoryBuffer *getMemBuffer(StringRef InputData,
StringRef BufferName = "",
bool RequiresNullTerminator = true);

View File

@ -0,0 +1,322 @@
//===-- llvm/Support/PassManagerBuilder.h - Build Standard Pass -*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file defines the PassManagerBuilder class, which is used to set up a
// "standard" optimization sequence suitable for languages like C and C++.
//
// These are implemented as inline functions so that we do not have to worry
// about link issues.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_SUPPORT_PASSMANAGERBUILDER_H
#define LLVM_SUPPORT_PASSMANAGERBUILDER_H
#include "llvm/PassManager.h"
#include "llvm/DefaultPasses.h"
#include "llvm/Analysis/Passes.h"
#include "llvm/Analysis/Verifier.h"
#include "llvm/Target/TargetLibraryInfo.h"
#include "llvm/Transforms/Scalar.h"
#include "llvm/Transforms/IPO.h"
namespace llvm {
/// PassManagerBuilder - This class is used to set up a standard optimization
/// sequence for languages like C and C++, allowing some APIs to customize the
/// pass sequence in various ways. A simple example of using it would be:
///
/// PassManagerBuilder Builder;
/// Builder.OptLevel = 2;
/// Builder.populateFunctionPassManager(FPM);
/// Builder.populateModulePassManager(MPM);
///
/// In addition to setting up the basic passes, PassManagerBuilder allows
/// frontends to vend a plugin API, where plugins are allowed to add extensions
/// to the default pass manager. They do this by specifying where in the pass
/// pipeline they want to be added, along with a callback function that adds
/// the pass(es). For example, a plugin that wanted to add a loop optimization
/// could do something like this:
///
/// static void addMyLoopPass(const PMBuilder &Builder, PassManagerBase &PM) {
/// if (Builder.getOptLevel() > 2 && Builder.getOptSizeLevel() == 0)
/// PM.add(createMyAwesomePass());
/// }
/// ...
/// Builder.addExtension(PassManagerBuilder::EP_LoopOptimizerEnd,
/// addMyLoopPass);
/// ...
class PassManagerBuilder {
public:
/// Extensions are passed the builder itself (so they can see how it is
/// configured) as well as the pass manager to add stuff to.
typedef void (*ExtensionFn)(const PassManagerBuilder &Builder,
PassManagerBase &PM);
enum ExtensionPointTy {
/// EP_EarlyAsPossible - This extension point allows adding passes before
/// any other transformations, allowing them to see the code as it is coming
/// out of the frontend.
EP_EarlyAsPossible,
/// EP_LoopOptimizerEnd - This extension point allows adding loop passes to
/// the end of the loop optimizer.
EP_LoopOptimizerEnd
};
/// The Optimization Level - Specify the basic optimization level.
/// 0 = -O0, 1 = -O1, 2 = -O2, 3 = -O3
unsigned OptLevel;
/// SizeLevel - How much we're optimizing for size.
/// 0 = none, 1 = -Os, 2 = -Oz
unsigned SizeLevel;
/// LibraryInfo - Specifies information about the runtime library for the
/// optimizer. If this is non-null, it is added to both the function and
/// per-module pass pipeline.
TargetLibraryInfo *LibraryInfo;
/// Inliner - Specifies the inliner to use. If this is non-null, it is
/// added to the per-module passes.
Pass *Inliner;
bool DisableSimplifyLibCalls;
bool DisableUnitAtATime;
bool DisableUnrollLoops;
private:
/// ExtensionList - This is list of all of the extensions that are registered.
std::vector<std::pair<ExtensionPointTy, ExtensionFn> > Extensions;
public:
PassManagerBuilder() {
OptLevel = 2;
SizeLevel = 0;
LibraryInfo = 0;
Inliner = 0;
DisableSimplifyLibCalls = false;
DisableUnitAtATime = false;
DisableUnrollLoops = false;
}
~PassManagerBuilder() {
delete LibraryInfo;
delete Inliner;
}
void addExtension(ExtensionPointTy Ty, ExtensionFn Fn) {
Extensions.push_back(std::make_pair(Ty, Fn));
}
private:
void addExtensionsToPM(ExtensionPointTy ETy, PassManagerBase &PM) const {
for (unsigned i = 0, e = Extensions.size(); i != e; ++i)
if (Extensions[i].first == ETy)
Extensions[i].second(*this, PM);
}
void addInitialAliasAnalysisPasses(PassManagerBase &PM) const {
// Add TypeBasedAliasAnalysis before BasicAliasAnalysis so that
// BasicAliasAnalysis wins if they disagree. This is intended to help
// support "obvious" type-punning idioms.
PM.add(createTypeBasedAliasAnalysisPass());
PM.add(createBasicAliasAnalysisPass());
}
public:
/// populateFunctionPassManager - This fills in the function pass manager,
/// which is expected to be run on each function immediately as it is
/// generated. The idea is to reduce the size of the IR in memory.
void populateFunctionPassManager(FunctionPassManager &FPM) {
addExtensionsToPM(EP_EarlyAsPossible, FPM);
// Add LibraryInfo if we have some.
if (LibraryInfo) FPM.add(new TargetLibraryInfo(*LibraryInfo));
if (OptLevel == 0) return;
addInitialAliasAnalysisPasses(FPM);
FPM.add(createCFGSimplificationPass());
FPM.add(createScalarReplAggregatesPass());
FPM.add(createEarlyCSEPass());
}
/// populateModulePassManager - This sets up the primary pass manager.
void populateModulePassManager(PassManagerBase &MPM) {
// If all optimizations are disabled, just run the always-inline pass.
if (OptLevel == 0) {
if (Inliner) {
MPM.add(Inliner);
Inliner = 0;
}
return;
}
// Add LibraryInfo if we have some.
if (LibraryInfo) MPM.add(new TargetLibraryInfo(*LibraryInfo));
addInitialAliasAnalysisPasses(MPM);
if (!DisableUnitAtATime) {
MPM.add(createGlobalOptimizerPass()); // Optimize out global vars
MPM.add(createIPSCCPPass()); // IP SCCP
MPM.add(createDeadArgEliminationPass()); // Dead argument elimination
MPM.add(createInstructionCombiningPass());// Clean up after IPCP & DAE
MPM.add(createCFGSimplificationPass()); // Clean up after IPCP & DAE
}
// Start of CallGraph SCC passes.
if (!DisableUnitAtATime)
MPM.add(createPruneEHPass()); // Remove dead EH info
if (Inliner) {
MPM.add(Inliner);
Inliner = 0;
}
if (!DisableUnitAtATime)
MPM.add(createFunctionAttrsPass()); // Set readonly/readnone attrs
if (OptLevel > 2)
MPM.add(createArgumentPromotionPass()); // Scalarize uninlined fn args
// Start of function pass.
// Break up aggregate allocas, using SSAUpdater.
MPM.add(createScalarReplAggregatesPass(-1, false));
MPM.add(createEarlyCSEPass()); // Catch trivial redundancies
if (!DisableSimplifyLibCalls)
MPM.add(createSimplifyLibCallsPass()); // Library Call Optimizations
MPM.add(createJumpThreadingPass()); // Thread jumps.
MPM.add(createCorrelatedValuePropagationPass()); // Propagate conditionals
MPM.add(createCFGSimplificationPass()); // Merge & remove BBs
MPM.add(createInstructionCombiningPass()); // Combine silly seq's
MPM.add(createTailCallEliminationPass()); // Eliminate tail calls
MPM.add(createCFGSimplificationPass()); // Merge & remove BBs
MPM.add(createReassociatePass()); // Reassociate expressions
MPM.add(createLoopRotatePass()); // Rotate Loop
MPM.add(createLICMPass()); // Hoist loop invariants
MPM.add(createLoopUnswitchPass(SizeLevel || OptLevel < 3));
MPM.add(createInstructionCombiningPass());
MPM.add(createIndVarSimplifyPass()); // Canonicalize indvars
MPM.add(createLoopIdiomPass()); // Recognize idioms like memset.
MPM.add(createLoopDeletionPass()); // Delete dead loops
if (!DisableUnrollLoops)
MPM.add(createLoopUnrollPass()); // Unroll small loops
addExtensionsToPM(EP_LoopOptimizerEnd, MPM);
if (OptLevel > 1)
MPM.add(createGVNPass()); // Remove redundancies
MPM.add(createMemCpyOptPass()); // Remove memcpy / form memset
MPM.add(createSCCPPass()); // Constant prop with SCCP
// Run instcombine after redundancy elimination to exploit opportunities
// opened up by them.
MPM.add(createInstructionCombiningPass());
MPM.add(createJumpThreadingPass()); // Thread jumps
MPM.add(createCorrelatedValuePropagationPass());
MPM.add(createDeadStoreEliminationPass()); // Delete dead stores
MPM.add(createAggressiveDCEPass()); // Delete dead instructions
MPM.add(createCFGSimplificationPass()); // Merge & remove BBs
MPM.add(createInstructionCombiningPass()); // Clean up after everything.
if (!DisableUnitAtATime) {
MPM.add(createStripDeadPrototypesPass()); // Get rid of dead prototypes
MPM.add(createDeadTypeEliminationPass()); // Eliminate dead types
// GlobalOpt already deletes dead functions and globals, at -O3 try a
// late pass of GlobalDCE. It is capable of deleting dead cycles.
if (OptLevel > 2)
MPM.add(createGlobalDCEPass()); // Remove dead fns and globals.
if (OptLevel > 1)
MPM.add(createConstantMergePass()); // Merge dup global constants
}
}
void populateLTOPassManager(PassManagerBase &PM, bool Internalize,
bool RunInliner) {
// Provide AliasAnalysis services for optimizations.
addInitialAliasAnalysisPasses(PM);
// Now that composite has been compiled, scan through the module, looking
// for a main function. If main is defined, mark all other functions
// internal.
if (Internalize)
PM.add(createInternalizePass(true));
// Propagate constants at call sites into the functions they call. This
// opens opportunities for globalopt (and inlining) by substituting function
// pointers passed as arguments to direct uses of functions.
PM.add(createIPSCCPPass());
// Now that we internalized some globals, see if we can hack on them!
PM.add(createGlobalOptimizerPass());
// Linking modules together can lead to duplicated global constants, only
// keep one copy of each constant.
PM.add(createConstantMergePass());
// Remove unused arguments from functions.
PM.add(createDeadArgEliminationPass());
// Reduce the code after globalopt and ipsccp. Both can open up significant
// simplification opportunities, and both can propagate functions through
// function pointers. When this happens, we often have to resolve varargs
// calls, etc, so let instcombine do this.
PM.add(createInstructionCombiningPass());
// Inline small functions
if (RunInliner)
PM.add(createFunctionInliningPass());
PM.add(createPruneEHPass()); // Remove dead EH info.
// Optimize globals again if we ran the inliner.
if (RunInliner)
PM.add(createGlobalOptimizerPass());
PM.add(createGlobalDCEPass()); // Remove dead functions.
// If we didn't decide to inline a function, check to see if we can
// transform it to pass arguments by value instead of by reference.
PM.add(createArgumentPromotionPass());
// The IPO passes may leave cruft around. Clean up after them.
PM.add(createInstructionCombiningPass());
PM.add(createJumpThreadingPass());
// Break up allocas
PM.add(createScalarReplAggregatesPass());
// Run a few AA driven optimizations here and now, to cleanup the code.
PM.add(createFunctionAttrsPass()); // Add nocapture.
PM.add(createGlobalsModRefPass()); // IP alias analysis.
PM.add(createLICMPass()); // Hoist loop invariants.
PM.add(createGVNPass()); // Remove redundancies.
PM.add(createMemCpyOptPass()); // Remove dead memcpys.
// Nuke dead stores.
PM.add(createDeadStoreEliminationPass());
// Cleanup and simplify the code after the scalar optimizations.
PM.add(createInstructionCombiningPass());
PM.add(createJumpThreadingPass());
// Delete basic blocks, which optimization passes may have killed.
PM.add(createCFGSimplificationPass());
// Now that we have optimized the program, discard unreachable functions.
PM.add(createGlobalDCEPass());
}
};
} // end namespace llvm
#endif

View File

@ -694,6 +694,99 @@ inline brc_match<Cond_t> m_Br(const Cond_t &C, BasicBlock *&T, BasicBlock *&F) {
return brc_match<Cond_t>(C, T, F);
}
//===----------------------------------------------------------------------===//
// Matchers for max/min idioms, eg: "select (sgt x, y), x, y" -> smax(x,y).
//
template<typename LHS_t, typename RHS_t, typename Pred_t>
struct MaxMin_match {
LHS_t L;
RHS_t R;
MaxMin_match(const LHS_t &LHS, const RHS_t &RHS)
: L(LHS), R(RHS) {}
template<typename OpTy>
bool match(OpTy *V) {
// Look for "(x pred y) ? x : y" or "(x pred y) ? y : x".
SelectInst *SI = dyn_cast<SelectInst>(V);
if (!SI)
return false;
ICmpInst *Cmp = dyn_cast<ICmpInst>(SI->getCondition());
if (!Cmp)
return false;
// At this point we have a select conditioned on a comparison. Check that
// it is the values returned by the select that are being compared.
Value *TrueVal = SI->getTrueValue();
Value *FalseVal = SI->getFalseValue();
Value *LHS = Cmp->getOperand(0);
Value *RHS = Cmp->getOperand(1);
if ((TrueVal != LHS || FalseVal != RHS) &&
(TrueVal != RHS || FalseVal != LHS))
return false;
ICmpInst::Predicate Pred = LHS == TrueVal ?
Cmp->getPredicate() : Cmp->getSwappedPredicate();
// Does "(x pred y) ? x : y" represent the desired max/min operation?
if (!Pred_t::match(Pred))
return false;
// It does! Bind the operands.
return L.match(LHS) && R.match(RHS);
}
};
/// smax_pred_ty - Helper class for identifying signed max predicates.
struct smax_pred_ty {
static bool match(ICmpInst::Predicate Pred) {
return Pred == CmpInst::ICMP_SGT || Pred == CmpInst::ICMP_SGE;
}
};
/// smin_pred_ty - Helper class for identifying signed min predicates.
struct smin_pred_ty {
static bool match(ICmpInst::Predicate Pred) {
return Pred == CmpInst::ICMP_SLT || Pred == CmpInst::ICMP_SLE;
}
};
/// umax_pred_ty - Helper class for identifying unsigned max predicates.
struct umax_pred_ty {
static bool match(ICmpInst::Predicate Pred) {
return Pred == CmpInst::ICMP_UGT || Pred == CmpInst::ICMP_UGE;
}
};
/// umin_pred_ty - Helper class for identifying unsigned min predicates.
struct umin_pred_ty {
static bool match(ICmpInst::Predicate Pred) {
return Pred == CmpInst::ICMP_ULT || Pred == CmpInst::ICMP_ULE;
}
};
template<typename LHS, typename RHS>
inline MaxMin_match<LHS, RHS, smax_pred_ty>
m_SMax(const LHS &L, const RHS &R) {
return MaxMin_match<LHS, RHS, smax_pred_ty>(L, R);
}
template<typename LHS, typename RHS>
inline MaxMin_match<LHS, RHS, smin_pred_ty>
m_SMin(const LHS &L, const RHS &R) {
return MaxMin_match<LHS, RHS, smin_pred_ty>(L, R);
}
template<typename LHS, typename RHS>
inline MaxMin_match<LHS, RHS, umax_pred_ty>
m_UMax(const LHS &L, const RHS &R) {
return MaxMin_match<LHS, RHS, umax_pred_ty>(L, R);
}
template<typename LHS, typename RHS>
inline MaxMin_match<LHS, RHS, umin_pred_ty>
m_UMin(const LHS &L, const RHS &R) {
return MaxMin_match<LHS, RHS, umin_pred_ty>(L, R);
}
} // end namespace PatternMatch
} // end namespace llvm

View File

@ -85,8 +85,9 @@ namespace sys {
/// This function waits for the program to exit. This function will block
/// the current program until the invoked program exits.
/// @returns an integer result code indicating the status of the program.
/// A zero or positive value indicates the result code of the program. A
/// negative value is the signal number on which it terminated.
/// A zero or positive value indicates the result code of the program.
/// -1 indicates failure to execute
/// -2 indicates a crash during execution or timeout
/// @see Execute
/// @brief Waits for the program to exit.
int Wait

View File

@ -106,7 +106,9 @@ class SourceMgr {
/// AddIncludeFile - Search for a file with the specified name in the current
/// directory or in one of the IncludeDirs. If no file is found, this returns
/// ~0, otherwise it returns the buffer ID of the stacked file.
unsigned AddIncludeFile(const std::string &Filename, SMLoc IncludeLoc);
/// The full path to the included file can be found in IncludedFile.
unsigned AddIncludeFile(const std::string &Filename, SMLoc IncludeLoc,
std::string &IncludedFile);
/// FindBufferContainingLoc - Return the ID of the buffer containing the
/// specified location, returning -1 if not found.

View File

@ -1,244 +0,0 @@
//===-- llvm/Support/StandardPasses.h - Standard pass lists -----*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file defines utility functions for creating a "standard" set of
// optimization passes, so that compilers and tools which use optimization
// passes use the same set of standard passes.
//
// These are implemented as inline functions so that we do not have to worry
// about link issues.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_SUPPORT_STANDARDPASSES_H
#define LLVM_SUPPORT_STANDARDPASSES_H
#include "llvm/PassManager.h"
#include "llvm/Analysis/Passes.h"
#include "llvm/Analysis/Verifier.h"
#include "llvm/Transforms/Scalar.h"
#include "llvm/Transforms/IPO.h"
namespace llvm {
static inline void createStandardAliasAnalysisPasses(PassManagerBase *PM) {
// Add TypeBasedAliasAnalysis before BasicAliasAnalysis so that
// BasicAliasAnalysis wins if they disagree. This is intended to help
// support "obvious" type-punning idioms.
PM->add(createTypeBasedAliasAnalysisPass());
PM->add(createBasicAliasAnalysisPass());
}
/// createStandardFunctionPasses - Add the standard list of function passes to
/// the provided pass manager.
///
/// \arg OptimizationLevel - The optimization level, corresponding to -O0,
/// -O1, etc.
static inline void createStandardFunctionPasses(PassManagerBase *PM,
unsigned OptimizationLevel) {
if (OptimizationLevel > 0) {
createStandardAliasAnalysisPasses(PM);
PM->add(createCFGSimplificationPass());
PM->add(createScalarReplAggregatesPass());
PM->add(createEarlyCSEPass());
}
}
/// createStandardModulePasses - Add the standard list of module passes to the
/// provided pass manager.
///
/// \arg OptimizationLevel - The optimization level, corresponding to -O0,
/// -O1, etc.
/// \arg OptimizeSize - Whether the transformations should optimize for size.
/// \arg UnitAtATime - Allow passes which may make global module changes.
/// \arg UnrollLoops - Allow loop unrolling.
/// \arg SimplifyLibCalls - Allow library calls to be simplified.
/// \arg HaveExceptions - Whether the module may have code using exceptions.
/// \arg InliningPass - The inlining pass to use, if any, or null. This will
/// always be added, even at -O0.a
static inline void createStandardModulePasses(PassManagerBase *PM,
unsigned OptimizationLevel,
bool OptimizeSize,
bool UnitAtATime,
bool UnrollLoops,
bool SimplifyLibCalls,
bool HaveExceptions,
Pass *InliningPass) {
createStandardAliasAnalysisPasses(PM);
// If all optimizations are disabled, just run the always-inline pass.
if (OptimizationLevel == 0) {
if (InliningPass)
PM->add(InliningPass);
return;
}
if (UnitAtATime) {
PM->add(createGlobalOptimizerPass()); // Optimize out global vars
PM->add(createIPSCCPPass()); // IP SCCP
PM->add(createDeadArgEliminationPass()); // Dead argument elimination
PM->add(createInstructionCombiningPass());// Clean up after IPCP & DAE
PM->add(createCFGSimplificationPass()); // Clean up after IPCP & DAE
}
// Start of CallGraph SCC passes.
if (UnitAtATime && HaveExceptions)
PM->add(createPruneEHPass()); // Remove dead EH info
if (InliningPass)
PM->add(InliningPass);
if (UnitAtATime)
PM->add(createFunctionAttrsPass()); // Set readonly/readnone attrs
if (OptimizationLevel > 2)
PM->add(createArgumentPromotionPass()); // Scalarize uninlined fn args
// Start of function pass.
// Break up aggregate allocas, using SSAUpdater.
PM->add(createScalarReplAggregatesPass(-1, false));
PM->add(createEarlyCSEPass()); // Catch trivial redundancies
if (SimplifyLibCalls)
PM->add(createSimplifyLibCallsPass()); // Library Call Optimizations
PM->add(createJumpThreadingPass()); // Thread jumps.
PM->add(createCorrelatedValuePropagationPass()); // Propagate conditionals
PM->add(createCFGSimplificationPass()); // Merge & remove BBs
PM->add(createInstructionCombiningPass()); // Combine silly seq's
PM->add(createTailCallEliminationPass()); // Eliminate tail calls
PM->add(createCFGSimplificationPass()); // Merge & remove BBs
PM->add(createReassociatePass()); // Reassociate expressions
PM->add(createLoopRotatePass()); // Rotate Loop
PM->add(createLICMPass()); // Hoist loop invariants
PM->add(createLoopUnswitchPass(OptimizeSize || OptimizationLevel < 3));
PM->add(createInstructionCombiningPass());
PM->add(createIndVarSimplifyPass()); // Canonicalize indvars
PM->add(createLoopIdiomPass()); // Recognize idioms like memset.
PM->add(createLoopDeletionPass()); // Delete dead loops
if (UnrollLoops)
PM->add(createLoopUnrollPass()); // Unroll small loops
if (OptimizationLevel > 1)
PM->add(createGVNPass()); // Remove redundancies
PM->add(createMemCpyOptPass()); // Remove memcpy / form memset
PM->add(createSCCPPass()); // Constant prop with SCCP
// Run instcombine after redundancy elimination to exploit opportunities
// opened up by them.
PM->add(createInstructionCombiningPass());
PM->add(createJumpThreadingPass()); // Thread jumps
PM->add(createCorrelatedValuePropagationPass());
PM->add(createDeadStoreEliminationPass()); // Delete dead stores
PM->add(createAggressiveDCEPass()); // Delete dead instructions
PM->add(createCFGSimplificationPass()); // Merge & remove BBs
PM->add(createInstructionCombiningPass()); // Clean up after everything.
if (UnitAtATime) {
PM->add(createStripDeadPrototypesPass()); // Get rid of dead prototypes
PM->add(createDeadTypeEliminationPass()); // Eliminate dead types
// GlobalOpt already deletes dead functions and globals, at -O3 try a
// late pass of GlobalDCE. It is capable of deleting dead cycles.
if (OptimizationLevel > 2)
PM->add(createGlobalDCEPass()); // Remove dead fns and globals.
if (OptimizationLevel > 1)
PM->add(createConstantMergePass()); // Merge dup global constants
}
}
static inline void addOnePass(PassManagerBase *PM, Pass *P, bool AndVerify) {
PM->add(P);
if (AndVerify)
PM->add(createVerifierPass());
}
/// createStandardLTOPasses - Add the standard list of module passes suitable
/// for link time optimization.
///
/// Internalize - Run the internalize pass.
/// RunInliner - Use a function inlining pass.
/// VerifyEach - Run the verifier after each pass.
static inline void createStandardLTOPasses(PassManagerBase *PM,
bool Internalize,
bool RunInliner,
bool VerifyEach) {
// Provide AliasAnalysis services for optimizations.
createStandardAliasAnalysisPasses(PM);
// Now that composite has been compiled, scan through the module, looking
// for a main function. If main is defined, mark all other functions
// internal.
if (Internalize)
addOnePass(PM, createInternalizePass(true), VerifyEach);
// Propagate constants at call sites into the functions they call. This
// opens opportunities for globalopt (and inlining) by substituting function
// pointers passed as arguments to direct uses of functions.
addOnePass(PM, createIPSCCPPass(), VerifyEach);
// Now that we internalized some globals, see if we can hack on them!
addOnePass(PM, createGlobalOptimizerPass(), VerifyEach);
// Linking modules together can lead to duplicated global constants, only
// keep one copy of each constant...
addOnePass(PM, createConstantMergePass(), VerifyEach);
// Remove unused arguments from functions...
addOnePass(PM, createDeadArgEliminationPass(), VerifyEach);
// Reduce the code after globalopt and ipsccp. Both can open up significant
// simplification opportunities, and both can propagate functions through
// function pointers. When this happens, we often have to resolve varargs
// calls, etc, so let instcombine do this.
addOnePass(PM, createInstructionCombiningPass(), VerifyEach);
// Inline small functions
if (RunInliner)
addOnePass(PM, createFunctionInliningPass(), VerifyEach);
addOnePass(PM, createPruneEHPass(), VerifyEach); // Remove dead EH info.
// Optimize globals again if we ran the inliner.
if (RunInliner)
addOnePass(PM, createGlobalOptimizerPass(), VerifyEach);
addOnePass(PM, createGlobalDCEPass(), VerifyEach); // Remove dead functions.
// If we didn't decide to inline a function, check to see if we can
// transform it to pass arguments by value instead of by reference.
addOnePass(PM, createArgumentPromotionPass(), VerifyEach);
// The IPO passes may leave cruft around. Clean up after them.
addOnePass(PM, createInstructionCombiningPass(), VerifyEach);
addOnePass(PM, createJumpThreadingPass(), VerifyEach);
// Break up allocas
addOnePass(PM, createScalarReplAggregatesPass(), VerifyEach);
// Run a few AA driven optimizations here and now, to cleanup the code.
addOnePass(PM, createFunctionAttrsPass(), VerifyEach); // Add nocapture.
addOnePass(PM, createGlobalsModRefPass(), VerifyEach); // IP alias analysis.
addOnePass(PM, createLICMPass(), VerifyEach); // Hoist loop invariants.
addOnePass(PM, createGVNPass(), VerifyEach); // Remove redundancies.
addOnePass(PM, createMemCpyOptPass(), VerifyEach); // Remove dead memcpys.
// Nuke dead stores.
addOnePass(PM, createDeadStoreEliminationPass(), VerifyEach);
// Cleanup and simplify the code after the scalar optimizations.
addOnePass(PM, createInstructionCombiningPass(), VerifyEach);
addOnePass(PM, createJumpThreadingPass(), VerifyEach);
// Delete basic blocks, which optimization passes may have killed.
addOnePass(PM, createCFGSimplificationPass(), VerifyEach);
// Now that we have optimized the program, discard unreachable functions.
addOnePass(PM, createGlobalDCEPass(), VerifyEach);
}
}
#endif

View File

@ -0,0 +1,100 @@
//===-- llvm/Support/Win64EH.h ---Win64 EH Constants-------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file contains constants and structures used for implementing
// exception handling on Win64 platforms. For more information, see
// http://msdn.microsoft.com/en-us/library/1eyas8tf.aspx
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_SUPPORT_WIN64EH_H
#define LLVM_SUPPORT_WIN64EH_H
#include "llvm/Support/DataTypes.h"
namespace llvm {
namespace Win64EH {
/// UnwindOpcodes - Enumeration whose values specify a single operation in
/// the prolog of a function.
enum UnwindOpcodes {
UOP_PushNonVol = 0,
UOP_AllocLarge,
UOP_AllocSmall,
UOP_SetFPReg,
UOP_SaveNonVol,
UOP_SaveNonVolBig,
UOP_SaveXMM128 = 8,
UOP_SaveXMM128Big,
UOP_PushMachFrame
};
/// UnwindCode - This union describes a single operation in a function prolog,
/// or part thereof.
union UnwindCode {
struct {
uint8_t codeOffset;
uint8_t unwindOp:4,
opInfo:4;
} u;
uint16_t frameOffset;
};
enum {
/// UNW_ExceptionHandler - Specifies that this function has an exception
/// handler.
UNW_ExceptionHandler = 0x01,
/// UNW_TerminateHandler - Specifies that this function has a termination
/// handler.
UNW_TerminateHandler = 0x02,
/// UNW_ChainInfo - Specifies that this UnwindInfo structure is chained to
/// another one.
UNW_ChainInfo = 0x04
};
/// RuntimeFunction - An entry in the table of functions with unwind info.
struct RuntimeFunction {
uint64_t startAddress;
uint64_t endAddress;
uint64_t unwindInfoOffset;
};
/// UnwindInfo - An entry in the exception table.
struct UnwindInfo {
uint8_t version:3,
flags:5;
uint8_t prologSize;
uint8_t numCodes;
uint8_t frameRegister:4,
frameOffset:4;
UnwindCode unwindCodes[1];
void *getLanguageSpecificData() {
return reinterpret_cast<void *>(&unwindCodes[(numCodes+1) & ~1]);
}
uint64_t getLanguageSpecificHandlerOffset() {
return *reinterpret_cast<uint64_t *>(getLanguageSpecificData());
}
void setLanguageSpecificHandlerOffset(uint64_t offset) {
*reinterpret_cast<uint64_t *>(getLanguageSpecificData()) = offset;
}
RuntimeFunction *getChainedFunctionEntry() {
return reinterpret_cast<RuntimeFunction *>(getLanguageSpecificData());
}
void *getExceptionData() {
return reinterpret_cast<void *>(reinterpret_cast<uint64_t *>(
getLanguageSpecificData())+1);
}
};
} // End of namespace Win64EH
} // End of namespace llvm
#endif

View File

@ -128,6 +128,11 @@ class RegisterClass<string namespace, list<ValueType> regTypes, int alignment,
// dags: (RegClass SubRegIndex, SubRegindex, ...)
list<dag> SubRegClasses = [];
// isAllocatable - Specify that the register class can be used for virtual
// registers and register allocation. Some register classes are only used to
// model instruction operand constraints, and should have isAllocatable = 0.
bit isAllocatable = 1;
// MethodProtos/MethodBodies - These members can be used to insert arbitrary
// code into a generated register class. The normal usage of this is to
// overload virtual methods.
@ -151,6 +156,14 @@ class DwarfRegNum<list<int> Numbers> {
list<int> DwarfNumbers = Numbers;
}
// DwarfRegAlias - This class declares that a given register uses the same dwarf
// numbers as another one. This is useful for making it clear that the two
// registers do have the same number. It also lets us build a mapping
// from dwarf register number to llvm register.
class DwarfRegAlias<Register reg> {
Register DwarfAlias = reg;
}
//===----------------------------------------------------------------------===//
// Pull in the common support for scheduling
//

View File

@ -22,6 +22,7 @@
namespace llvm {
class MCSection;
class MCContext;
class MachineFunction;
class TargetMachine;
class TargetLoweringObjectFile;
@ -58,6 +59,18 @@ class TargetAsmInfo {
return TLOF->getEHFrameSection();
}
const MCSection *getDwarfFrameSection() const {
return TLOF->getDwarfFrameSection();
}
const MCSection *getWin64EHFuncTableSection(StringRef Suffix) const {
return TLOF->getWin64EHFuncTableSection(Suffix);
}
const MCSection *getWin64EHTableSection(StringRef Suffix) const {
return TLOF->getWin64EHTableSection(Suffix);
}
unsigned getFDEEncoding(bool CFI) const {
return TLOF->getFDEEncoding(CFI);
}
@ -66,6 +79,10 @@ class TargetAsmInfo {
return TLOF->isFunctionEHFrameSymbolPrivate();
}
const unsigned *getCalleeSavedRegs(MachineFunction *MF = 0) const {
return TRI->getCalleeSavedRegs(MF);
}
unsigned getDwarfRARegNum(bool isEH) const {
return TRI->getDwarfRegNum(TRI->getRARegister(), isEH);
}
@ -77,6 +94,14 @@ class TargetAsmInfo {
int getDwarfRegNum(unsigned RegNum, bool isEH) const {
return TRI->getDwarfRegNum(RegNum, isEH);
}
int getLLVMRegNum(unsigned DwarfRegNum, bool isEH) const {
return TRI->getLLVMRegNum(DwarfRegNum, isEH);
}
int getSEHRegNum(unsigned RegNum) const {
return TRI->getSEHRegNum(RegNum);
}
};
}

View File

@ -122,7 +122,8 @@ class InstrItineraryData {
InstrItineraryData(const InstrStage *S, const unsigned *OS,
const unsigned *F, const InstrItinerary *I)
: Stages(S), OperandCycles(OS), Forwardings(F), Itineraries(I) {}
: Stages(S), OperandCycles(OS), Forwardings(F), Itineraries(I),
IssueWidth(0) {}
/// isEmpty - Returns true if there are no itineraries.
///

View File

@ -51,6 +51,7 @@ class TargetLibraryInfo : public ImmutablePass {
static char ID;
TargetLibraryInfo();
TargetLibraryInfo(const Triple &T);
explicit TargetLibraryInfo(const TargetLibraryInfo &TLI);
/// has - This function is used by optimizations that want to match on or form
/// a given library function.

View File

@ -94,6 +94,19 @@ class TargetLowering {
Custom // Use the LowerOperation hook to implement custom lowering.
};
/// LegalizeAction - This enum indicates whether a types are legal for a
/// target, and if not, what action should be used to make them valid.
enum LegalizeTypeAction {
TypeLegal, // The target natively supports this type.
TypePromoteInteger, // Replace this integer with a larger one.
TypeExpandInteger, // Split this integer into two of half the size.
TypeSoftenFloat, // Convert this float to a same size integer type.
TypeExpandFloat, // Split this float into two of half the size.
TypeScalarizeVector, // Replace this one-element vector with its element.
TypeSplitVector, // Split this vector into two of half the size.
TypeWidenVector // This vector should be widened into a larger vector.
};
enum BooleanContent { // How the target represents true/false values.
UndefinedBooleanContent, // Only bit 0 counts, the rest can hold garbage.
ZeroOrOneBooleanContent, // All bits zero except for bit 0.
@ -200,71 +213,20 @@ class TargetLowering {
}
class ValueTypeActionImpl {
/// ValueTypeActions - For each value type, keep a LegalizeAction enum
/// ValueTypeActions - For each value type, keep a LegalizeTypeAction enum
/// that indicates how instruction selection should deal with the type.
uint8_t ValueTypeActions[MVT::LAST_VALUETYPE];
LegalizeAction getExtendedTypeAction(EVT VT) const {
// Handle non-vector integers.
if (!VT.isVector()) {
assert(VT.isInteger() && "Unsupported extended type!");
unsigned BitSize = VT.getSizeInBits();
// First promote to a power-of-two size, then expand if necessary.
if (BitSize < 8 || !isPowerOf2_32(BitSize))
return Promote;
return Expand;
}
// Vectors with only one element are always scalarized.
if (VT.getVectorNumElements() == 1)
return Expand;
// Vectors with a number of elements that is not a power of two are always
// widened, for example <3 x float> -> <4 x float>.
if (!VT.isPow2VectorType())
return Promote;
// Vectors with a crazy element type are always expanded, for example
// <4 x i2> is expanded into two vectors of type <2 x i2>.
if (!VT.getVectorElementType().isSimple())
return Expand;
// If this type is smaller than a legal vector type then widen it,
// otherwise expand it. E.g. <2 x float> -> <4 x float>.
MVT EltType = VT.getVectorElementType().getSimpleVT();
unsigned NumElts = VT.getVectorNumElements();
while (1) {
// Round up to the next power of 2.
NumElts = (unsigned)NextPowerOf2(NumElts);
// If there is no simple vector type with this many elements then there
// cannot be a larger legal vector type. Note that this assumes that
// there are no skipped intermediate vector types in the simple types.
MVT LargerVector = MVT::getVectorVT(EltType, NumElts);
if (LargerVector == MVT())
return Expand;
// If this type is legal then widen the vector.
if (getTypeAction(LargerVector) == Legal)
return Promote;
}
}
public:
ValueTypeActionImpl() {
std::fill(ValueTypeActions, array_endof(ValueTypeActions), 0);
}
LegalizeAction getTypeAction(EVT VT) const {
if (!VT.isExtended())
return getTypeAction(VT.getSimpleVT());
return getExtendedTypeAction(VT);
LegalizeTypeAction getTypeAction(MVT VT) const {
return (LegalizeTypeAction)ValueTypeActions[VT.SimpleTy];
}
LegalizeAction getTypeAction(MVT VT) const {
return (LegalizeAction)ValueTypeActions[VT.SimpleTy];
}
void setTypeAction(EVT VT, LegalizeAction Action) {
void setTypeAction(EVT VT, LegalizeTypeAction Action) {
unsigned I = VT.getSimpleVT().SimpleTy;
ValueTypeActions[I] = Action;
}
@ -278,10 +240,10 @@ class TargetLowering {
/// it is already legal (return 'Legal') or we need to promote it to a larger
/// type (return 'Promote'), or we need to expand it into multiple registers
/// of smaller integer type (return 'Expand'). 'Custom' is not an option.
LegalizeAction getTypeAction(EVT VT) const {
return ValueTypeActions.getTypeAction(VT);
LegalizeTypeAction getTypeAction(LLVMContext &Context, EVT VT) const {
return getTypeConversion(Context, VT).first;
}
LegalizeAction getTypeAction(MVT VT) const {
LegalizeTypeAction getTypeAction(MVT VT) const {
return ValueTypeActions.getTypeAction(VT);
}
@ -292,38 +254,7 @@ class TargetLowering {
/// to get to the smaller register. For illegal floating point types, this
/// returns the integer type to transform to.
EVT getTypeToTransformTo(LLVMContext &Context, EVT VT) const {
if (VT.isSimple()) {
assert((unsigned)VT.getSimpleVT().SimpleTy <
array_lengthof(TransformToType));
EVT NVT = TransformToType[VT.getSimpleVT().SimpleTy];
assert(getTypeAction(NVT) != Promote &&
"Promote may not follow Expand or Promote");
return NVT;
}
if (VT.isVector()) {
EVT NVT = VT.getPow2VectorType(Context);
if (NVT == VT) {
// Vector length is a power of 2 - split to half the size.
unsigned NumElts = VT.getVectorNumElements();
EVT EltVT = VT.getVectorElementType();
return (NumElts == 1) ?
EltVT : EVT::getVectorVT(Context, EltVT, NumElts / 2);
}
// Promote to a power of two size, avoiding multi-step promotion.
return getTypeAction(NVT) == Promote ?
getTypeToTransformTo(Context, NVT) : NVT;
} else if (VT.isInteger()) {
EVT NVT = VT.getRoundIntegerType(Context);
if (NVT == VT) // Size is a power of two - expand to half the size.
return EVT::getIntegerVT(Context, VT.getSizeInBits() / 2);
// Promote to a power of two size, avoiding multi-step promotion.
return getTypeAction(NVT) == Promote ?
getTypeToTransformTo(Context, NVT) : NVT;
}
assert(0 && "Unsupported extended type!");
return MVT(MVT::Other); // Not reached
return getTypeConversion(Context, VT).second;
}
/// getTypeToExpandTo - For types supported by the target, this is an
@ -333,7 +264,7 @@ class TargetLowering {
EVT getTypeToExpandTo(LLVMContext &Context, EVT VT) const {
assert(!VT.isVector());
while (true) {
switch (getTypeAction(VT)) {
switch (getTypeAction(Context, VT)) {
case Legal:
return VT;
case Expand:
@ -761,6 +692,18 @@ class TargetLowering {
return MinStackArgumentAlignment;
}
/// getMinFunctionAlignment - return the minimum function alignment.
///
unsigned getMinFunctionAlignment() const {
return MinFunctionAlignment;
}
/// getPrefFunctionAlignment - return the preferred function alignment.
///
unsigned getPrefFunctionAlignment() const {
return PrefFunctionAlignment;
}
/// getPrefLoopAlignment - return the preferred loop alignment.
///
unsigned getPrefLoopAlignment() const {
@ -824,9 +767,6 @@ class TargetLowering {
/// PIC relocation models.
virtual bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const;
/// getFunctionAlignment - Return the Log2 alignment of this function.
virtual unsigned getFunctionAlignment(const Function *) const = 0;
/// getStackCookieLocation - Return true if the target stores stack
/// protector cookies at a fixed offset in some non-standard address
/// space, and populates the address space and offset as
@ -1167,6 +1107,18 @@ class TargetLowering {
JumpBufAlignment = Align;
}
/// setMinFunctionAlignment - Set the target's minimum function alignment.
void setMinFunctionAlignment(unsigned Align) {
MinFunctionAlignment = Align;
}
/// setPrefFunctionAlignment - Set the target's preferred function alignment.
/// This should be set if there is a performance benefit to
/// higher-than-minimum alignment
void setPrefFunctionAlignment(unsigned Align) {
PrefFunctionAlignment = Align;
}
/// setPrefLoopAlignment - Set the target's preferred loop alignment. Default
/// alignment is zero, it means the target does not care about loop alignment.
void setPrefLoopAlignment(unsigned Align) {
@ -1259,7 +1211,8 @@ class TargetLowering {
/// return values described by the Outs array can fit into the return
/// registers. If false is returned, an sret-demotion is performed.
///
virtual bool CanLowerReturn(CallingConv::ID CallConv, bool isVarArg,
virtual bool CanLowerReturn(CallingConv::ID CallConv,
MachineFunction &MF, bool isVarArg,
const SmallVectorImpl<ISD::OutputArg> &Outs,
LLVMContext &Context) const
{
@ -1497,7 +1450,7 @@ class TargetLowering {
/// LowerAsmOperandForConstraint - Lower the specified operand into the Ops
/// vector. If it is invalid, don't add anything to Ops.
virtual void LowerAsmOperandForConstraint(SDValue Op, char ConstraintLetter,
virtual void LowerAsmOperandForConstraint(SDValue Op, std::string &Constraint,
std::vector<SDValue> &Ops,
SelectionDAG &DAG) const;
@ -1583,6 +1536,14 @@ class TargetLowering {
return true;
}
/// isLegalAddImmediate - Return true if the specified immediate is legal
/// add immediate, that is the target has add instructions which can add
/// a register with the immediate without having to materialize the
/// immediate into a register.
virtual bool isLegalAddImmediate(int64_t Imm) const {
return true;
}
//===--------------------------------------------------------------------===//
// Div utility functions
//
@ -1637,6 +1598,13 @@ class TargetLowering {
const TargetData *TD;
const TargetLoweringObjectFile &TLOF;
/// We are in the process of implementing a new TypeLegalization action
/// which is the promotion of vector elements. This feature is under
/// development. Until this feature is complete, it is only enabled using a
/// flag. We pass this flag using a member because of circular dep issues.
/// This member will be removed with the flag once we complete the transition.
bool mayPromoteElements;
/// PointerTy - The type to use for pointers, usually i32 or i64.
///
MVT PointerTy;
@ -1693,7 +1661,18 @@ class TargetLowering {
///
unsigned MinStackArgumentAlignment;
/// PrefLoopAlignment - The perferred loop alignment.
/// MinFunctionAlignment - The minimum function alignment (used when
/// optimizing for size, and to prevent explicitly provided alignment
/// from leading to incorrect code).
///
unsigned MinFunctionAlignment;
/// PrefFunctionAlignment - The preferred function alignment (used when
/// alignment unspecified and optimizing for speed).
///
unsigned PrefFunctionAlignment;
/// PrefLoopAlignment - The preferred loop alignment.
///
unsigned PrefLoopAlignment;
@ -1774,6 +1753,128 @@ class TargetLowering {
ValueTypeActionImpl ValueTypeActions;
typedef std::pair<LegalizeTypeAction, EVT> LegalizeKind;
LegalizeKind
getTypeConversion(LLVMContext &Context, EVT VT) const {
// If this is a simple type, use the ComputeRegisterProp mechanism.
if (VT.isSimple()) {
assert((unsigned)VT.getSimpleVT().SimpleTy <
array_lengthof(TransformToType));
EVT NVT = TransformToType[VT.getSimpleVT().SimpleTy];
LegalizeTypeAction LA = ValueTypeActions.getTypeAction(VT.getSimpleVT());
assert(
(!(NVT.isSimple() && LA != TypeLegal) ||
ValueTypeActions.getTypeAction(NVT.getSimpleVT()) != TypePromoteInteger)
&& "Promote may not follow Expand or Promote");
return LegalizeKind(LA, NVT);
}
// Handle Extended Scalar Types.
if (!VT.isVector()) {
assert(VT.isInteger() && "Float types must be simple");
unsigned BitSize = VT.getSizeInBits();
// First promote to a power-of-two size, then expand if necessary.
if (BitSize < 8 || !isPowerOf2_32(BitSize)) {
EVT NVT = VT.getRoundIntegerType(Context);
assert(NVT != VT && "Unable to round integer VT");
LegalizeKind NextStep = getTypeConversion(Context, NVT);
// Avoid multi-step promotion.
if (NextStep.first == TypePromoteInteger) return NextStep;
// Return rounded integer type.
return LegalizeKind(TypePromoteInteger, NVT);
}
return LegalizeKind(TypeExpandInteger,
EVT::getIntegerVT(Context, VT.getSizeInBits()/2));
}
// Handle vector types.
unsigned NumElts = VT.getVectorNumElements();
EVT EltVT = VT.getVectorElementType();
// Vectors with only one element are always scalarized.
if (NumElts == 1)
return LegalizeKind(TypeScalarizeVector, EltVT);
// If we allow the promotion of vector elements using a flag,
// then try to widen vector elements until a legal type is found.
if (mayPromoteElements && EltVT.isInteger()) {
// Vectors with a number of elements that is not a power of two are always
// widened, for example <3 x float> -> <4 x float>.
if (!VT.isPow2VectorType()) {
NumElts = (unsigned)NextPowerOf2(NumElts);
EVT NVT = EVT::getVectorVT(Context, EltVT, NumElts);
return LegalizeKind(TypeWidenVector, NVT);
}
// Examine the element type.
LegalizeKind LK = getTypeConversion(Context, EltVT);
// If type is to be expanded, split the vector.
// <4 x i140> -> <2 x i140>
if (LK.first == TypeExpandInteger)
return LegalizeKind(TypeSplitVector,
EVT::getVectorVT(Context, EltVT, NumElts / 2));
// Promote the integer element types until a legal vector type is found
// or until the element integer type is too big. If a legal type was not
// found, fallback to the usual mechanism of widening/splitting the
// vector.
while (1) {
// Increase the bitwidth of the element to the next pow-of-two
// (which is greater than 8 bits).
EltVT = EVT::getIntegerVT(Context, 1 + EltVT.getSizeInBits()
).getRoundIntegerType(Context);
// Stop trying when getting a non-simple element type.
// Note that vector elements may be greater than legal vector element
// types. Example: X86 XMM registers hold 64bit element on 32bit systems.
if (!EltVT.isSimple()) break;
// Build a new vector type and check if it is legal.
MVT NVT = MVT::getVectorVT(EltVT.getSimpleVT(), NumElts);
// Found a legal promoted vector type.
if (ValueTypeActions.getTypeAction(NVT) == TypeLegal)
return LegalizeKind(TypePromoteInteger,
EVT::getVectorVT(Context, EltVT, NumElts));
}
}
// Try to widen the vector until a legal type is found.
// If there is no wider legal type, split the vector.
while (1) {
// Round up to the next power of 2.
NumElts = (unsigned)NextPowerOf2(NumElts);
// If there is no simple vector type with this many elements then there
// cannot be a larger legal vector type. Note that this assumes that
// there are no skipped intermediate vector types in the simple types.
MVT LargerVector = MVT::getVectorVT(EltVT.getSimpleVT(), NumElts);
if (LargerVector == MVT()) break;
// If this type is legal then widen the vector.
if (ValueTypeActions.getTypeAction(LargerVector) == TypeLegal)
return LegalizeKind(TypeWidenVector, LargerVector);
}
// Widen odd vectors to next power of two.
if (!VT.isPow2VectorType()) {
EVT NVT = VT.getPow2VectorType(Context);
return LegalizeKind(TypeWidenVector, NVT);
}
// Vectors with illegal element types are expanded.
EVT NVT = EVT::getVectorVT(Context, EltVT, VT.getVectorNumElements() / 2);
return LegalizeKind(TypeSplitVector, NVT);
assert(false && "Unable to handle this kind of vector type");
return LegalizeKind(TypeLegal, VT);
}
std::vector<std::pair<EVT, TargetRegisterClass*> > AvailableRegClasses;
/// TargetDAGCombineArray - Targets can specify ISD nodes that they would

View File

@ -97,10 +97,6 @@ class TargetLoweringObjectFile {
/// weak_definition of constant 0 for an omitted EH frame.
bool SupportsWeakOmittedEHFrame;
/// IsFunctionEHSymbolGlobal - This flag is set to true if the ".eh" symbol
/// for a function should be marked .globl.
bool IsFunctionEHSymbolGlobal;
/// IsFunctionEHFrameSymbolPrivate - This flag is set to true if the
/// "EH_frame" symbol for EH information should be an assembler temporary (aka
/// private linkage, aka an L or .L label) or false if it should be a normal
@ -119,9 +115,6 @@ class TargetLoweringObjectFile {
Ctx = &ctx;
}
bool isFunctionEHSymbolGlobal() const {
return IsFunctionEHSymbolGlobal;
}
bool isFunctionEHFrameSymbolPrivate() const {
return IsFunctionEHFrameSymbolPrivate;
}
@ -162,6 +155,8 @@ class TargetLoweringObjectFile {
const MCSection *getTLSExtraDataSection() const {
return TLSExtraDataSection;
}
virtual const MCSection *getWin64EHFuncTableSection(StringRef suffix)const=0;
virtual const MCSection *getWin64EHTableSection(StringRef suffix) const = 0;
/// shouldEmitUsedDirectiveFor - This hook allows targets to selectively
/// decide not to emit the UsedDirective for some symbols in llvm.used.

View File

@ -125,10 +125,6 @@ namespace llvm {
/// flag is hidden and is only for debugging the debug info.
extern bool JITEmitDebugInfoToDisk;
/// UnwindTablesMandatory - This flag indicates that unwind tables should
/// be emitted for all functions.
extern bool UnwindTablesMandatory;
/// GuaranteedTailCallOpt - This flag is enabled when -tailcallopt is
/// specified on the commandline. When the flag is on, participating targets
/// will perform tail call optimization on all calls which use the fastcc

View File

@ -47,6 +47,7 @@ struct TargetRegisterDesc {
const unsigned *SubRegs; // Sub-register set, described above
const unsigned *SuperRegs; // Super-register set, described above
unsigned CostPerUse; // Extra cost of instructions using register.
bool inAllocatableClass; // Register belongs to an allocatable regclass.
};
class TargetRegisterClass {
@ -66,6 +67,7 @@ class TargetRegisterClass {
const sc_iterator SuperRegClasses;
const unsigned RegSize, Alignment; // Size & Alignment of register in bytes
const int CopyCost;
const bool Allocatable;
const iterator RegsBegin, RegsEnd;
DenseSet<unsigned> RegSet;
public:
@ -76,11 +78,12 @@ class TargetRegisterClass {
const TargetRegisterClass * const *supcs,
const TargetRegisterClass * const *subregcs,
const TargetRegisterClass * const *superregcs,
unsigned RS, unsigned Al, int CC,
unsigned RS, unsigned Al, int CC, bool Allocable,
iterator RB, iterator RE)
: ID(id), Name(name), VTs(vts), SubClasses(subcs), SuperClasses(supcs),
SubRegClasses(subregcs), SuperRegClasses(superregcs),
RegSize(RS), Alignment(Al), CopyCost(CC), RegsBegin(RB), RegsEnd(RE) {
RegSize(RS), Alignment(Al), CopyCost(CC), Allocatable(Allocable),
RegsBegin(RB), RegsEnd(RE) {
for (iterator I = RegsBegin, E = RegsEnd; I != E; ++I)
RegSet.insert(*I);
}
@ -182,6 +185,12 @@ class TargetRegisterClass {
return false;
}
/// hasSubClassEq - Returns true if RC is a subclass of or equal to this
/// class.
bool hasSubClassEq(const TargetRegisterClass *RC) const {
return RC == this || hasSubClass(RC);
}
/// subclasses_begin / subclasses_end - Loop over all of the classes
/// that are proper subsets of this register class.
sc_iterator subclasses_begin() const {
@ -203,6 +212,12 @@ class TargetRegisterClass {
return false;
}
/// hasSuperClassEq - Returns true if RC is a superclass of or equal to this
/// class.
bool hasSuperClassEq(const TargetRegisterClass *RC) const {
return RC == this || hasSuperClass(RC);
}
/// superclasses_begin / superclasses_end - Loop over all of the classes
/// that are proper supersets of this register class.
sc_iterator superclasses_begin() const {
@ -256,6 +271,10 @@ class TargetRegisterClass {
/// this class. A negative number means the register class is very expensive
/// to copy e.g. status flag register classes.
int getCopyCost() const { return CopyCost; }
/// isAllocatable - Return true if this register class may be used to create
/// virtual registers.
bool isAllocatable() const { return Allocatable; }
};
@ -351,13 +370,13 @@ class TargetRegisterInfo {
/// The first virtual register in a function will get the index 0.
static unsigned virtReg2Index(unsigned Reg) {
assert(isVirtualRegister(Reg) && "Not a virtual register");
return Reg - (1u << 31);
return Reg & ~(1u << 31);
}
/// index2VirtReg - Convert a 0-based index to a virtual register number.
/// This is the inverse operation of VirtReg2IndexFunctor below.
static unsigned index2VirtReg(unsigned Index) {
return Index + (1u << 31);
return Index | (1u << 31);
}
/// getMinimalPhysRegClass - Returns the Register Class of a physical
@ -802,6 +821,8 @@ class TargetRegisterInfo {
/// debugging info.
virtual int getDwarfRegNum(unsigned RegNum, bool isEH) const = 0;
virtual int getLLVMRegNum(unsigned RegNum, bool isEH) const = 0;
/// getFrameRegister - This method should return the register used as a base
/// for values allocated in the current stack frame.
virtual unsigned getFrameRegister(const MachineFunction &MF) const = 0;
@ -809,6 +830,12 @@ class TargetRegisterInfo {
/// getRARegister - This method should return the register where the return
/// address can be found.
virtual unsigned getRARegister() const = 0;
/// getSEHRegNum - Map a target register to an equivalent SEH register
/// number. Returns -1 if there is no equivalent value.
virtual int getSEHRegNum(unsigned i) const {
return i;
}
};

View File

@ -354,6 +354,7 @@ def fmul : SDNode<"ISD::FMUL" , SDTFPBinOp, [SDNPCommutative]>;
def fdiv : SDNode<"ISD::FDIV" , SDTFPBinOp>;
def frem : SDNode<"ISD::FREM" , SDTFPBinOp>;
def fabs : SDNode<"ISD::FABS" , SDTFPUnaryOp>;
def fgetsign : SDNode<"ISD::FGETSIGN" , SDTFPToIntOp>;
def fneg : SDNode<"ISD::FNEG" , SDTFPUnaryOp>;
def fsqrt : SDNode<"ISD::FSQRT" , SDTFPUnaryOp>;
def fsin : SDNode<"ISD::FSIN" , SDTFPUnaryOp>;

View File

@ -28,7 +28,8 @@ ModulePass *createOptimalEdgeProfilerPass();
ModulePass *createPathProfilerPass();
// Insert GCOV profiling instrumentation
ModulePass *createGCOVProfilerPass(bool EmitNotes = true, bool EmitData = true);
ModulePass *createGCOVProfilerPass(bool EmitNotes = true, bool EmitData = true,
bool Use402Format = false);
} // End llvm namespace

View File

@ -43,8 +43,10 @@ template<typename T> class SmallVectorImpl;
/// constant value, convert it into an unconditional branch to the constant
/// destination. This is a nontrivial operation because the successors of this
/// basic block must have their PHI nodes updated.
///
bool ConstantFoldTerminator(BasicBlock *BB);
/// Also calls RecursivelyDeleteTriviallyDeadInstructions() on any branch/switch
/// conditions and indirectbr addresses this might make dead if
/// DeleteDeadConditions is true.
bool ConstantFoldTerminator(BasicBlock *BB, bool DeleteDeadConditions = false);
//===----------------------------------------------------------------------===//
// Local dead code elimination.
@ -176,6 +178,10 @@ bool ConvertDebugDeclareToDebugValue(DbgDeclareInst *DDI,
/// of llvm.dbg.value intrinsics.
bool LowerDbgDeclare(Function &F);
/// FindAllocaDbgDeclare - Finds the llvm.dbg.declare intrinsic corresponding to
/// an alloca, if any.
DbgDeclareInst *FindAllocaDbgDeclare(Value *V);
} // End llvm namespace
#endif

View File

@ -21,6 +21,8 @@ namespace llvm {
class PHINode;
template<typename T> class SmallVectorImpl;
template<typename T> class SSAUpdaterTraits;
class DbgDeclareInst;
class DIBuilder;
class BumpPtrAllocator;
/// SSAUpdater - This class updates SSA form for a set of values defined in
@ -120,9 +122,12 @@ class SSAUpdater {
class LoadAndStorePromoter {
protected:
SSAUpdater &SSA;
DbgDeclareInst *DDI;
DIBuilder *DIB;
public:
LoadAndStorePromoter(const SmallVectorImpl<Instruction*> &Insts,
SSAUpdater &S, StringRef Name = StringRef());
SSAUpdater &S, DbgDeclareInst *DDI, DIBuilder *DIB,
StringRef Name = StringRef());
virtual ~LoadAndStorePromoter() {}
/// run - This does the promotion. Insts is a list of loads and stores to

View File

@ -273,6 +273,9 @@ class Type : public AbstractTypeUser {
/// @brief Determine if this type could be losslessly bitcast to Ty
bool canLosslesslyBitCastTo(const Type *Ty) const;
/// isEmptyTy - Return true if this type is empty, that is, it has no
/// elements or all its elements are empty.
bool isEmptyTy() const;
/// Here are some useful little methods to query what type derived types are
/// Note that all other types can just compare to see if this == Type::xxxTy;

View File

@ -23,6 +23,7 @@ void llvm::initializeAnalysis(PassRegistry &Registry) {
initializeAliasSetPrinterPass(Registry);
initializeNoAAPass(Registry);
initializeBasicAliasAnalysisPass(Registry);
initializeBranchProbabilityInfoPass(Registry);
initializeCFGViewerPass(Registry);
initializeCFGPrinterPass(Registry);
initializeCFGOnlyViewerPass(Registry);

View File

@ -281,17 +281,20 @@ DecomposeGEPExpression(const Value *V, int64_t &BaseOffs,
continue;
}
if (const Instruction *I = dyn_cast<Instruction>(V))
// TODO: Get a DominatorTree and use it here.
if (const Value *Simplified =
SimplifyInstruction(const_cast<Instruction *>(I), TD)) {
V = Simplified;
continue;
}
const GEPOperator *GEPOp = dyn_cast<GEPOperator>(Op);
if (GEPOp == 0)
if (GEPOp == 0) {
// If it's not a GEP, hand it off to SimplifyInstruction to see if it
// can come up with something. This matches what GetUnderlyingObject does.
if (const Instruction *I = dyn_cast<Instruction>(V))
// TODO: Get a DominatorTree and use it here.
if (const Value *Simplified =
SimplifyInstruction(const_cast<Instruction *>(I), TD)) {
V = Simplified;
continue;
}
return V;
}
// Don't attempt to analyze GEPs over unsized objects.
if (!cast<PointerType>(GEPOp->getOperand(0)->getType())
@ -448,7 +451,13 @@ namespace {
/// BasicAliasAnalysis - This is the primary alias analysis implementation.
struct BasicAliasAnalysis : public ImmutablePass, public AliasAnalysis {
static char ID; // Class identification, replacement for typeinfo
BasicAliasAnalysis() : ImmutablePass(ID) {
BasicAliasAnalysis() : ImmutablePass(ID),
// AliasCache rarely has more than 1 or 2 elements,
// so start it off fairly small so that clear()
// doesn't have to tromp through 64 (the default)
// elements on each alias query. This really wants
// something like a SmallDenseMap.
AliasCache(8) {
initializeBasicAliasAnalysisPass(*PassRegistry::getPassRegistry());
}
@ -462,12 +471,12 @@ namespace {
virtual AliasResult alias(const Location &LocA,
const Location &LocB) {
assert(Visited.empty() && "Visited must be cleared after use!");
assert(AliasCache.empty() && "AliasCache must be cleared after use!");
assert(notDifferentParent(LocA.Ptr, LocB.Ptr) &&
"BasicAliasAnalysis doesn't support interprocedural queries.");
AliasResult Alias = aliasCheck(LocA.Ptr, LocA.Size, LocA.TBAATag,
LocB.Ptr, LocB.Size, LocB.TBAATag);
Visited.clear();
AliasCache.clear();
return Alias;
}
@ -503,7 +512,12 @@ namespace {
}
private:
// Visited - Track instructions visited by a aliasPHI, aliasSelect(), and aliasGEP().
// AliasCache - Track alias queries to guard against recursion.
typedef std::pair<Location, Location> LocPair;
typedef DenseMap<LocPair, AliasResult> AliasCacheTy;
AliasCacheTy AliasCache;
// Visited - Track instructions visited by pointsToConstantMemory.
SmallPtrSet<const Value*, 16> Visited;
// aliasGEP - Provide a bunch of ad-hoc rules to disambiguate a GEP
@ -680,9 +694,12 @@ BasicAliasAnalysis::getModRefInfo(ImmutableCallSite CS,
unsigned ArgNo = 0;
for (ImmutableCallSite::arg_iterator CI = CS.arg_begin(), CE = CS.arg_end();
CI != CE; ++CI, ++ArgNo) {
// Only look at the no-capture pointer arguments.
// Only look at the no-capture or byval pointer arguments. If this
// pointer were passed to arguments that were neither of these, then it
// couldn't be no-capture.
if (!(*CI)->getType()->isPointerTy() ||
!CS.paramHasAttr(ArgNo+1, Attribute::NoCapture))
(!CS.paramHasAttr(ArgNo+1, Attribute::NoCapture) &&
!CS.paramHasAttr(ArgNo+1, Attribute::ByVal)))
continue;
// If this is a no-capture pointer argument, see if we can tell that it
@ -816,13 +833,6 @@ BasicAliasAnalysis::aliasGEP(const GEPOperator *GEP1, uint64_t V1Size,
const MDNode *V2TBAAInfo,
const Value *UnderlyingV1,
const Value *UnderlyingV2) {
// If this GEP has been visited before, we're on a use-def cycle.
// Such cycles are only valid when PHI nodes are involved or in unreachable
// code. The visitPHI function catches cycles containing PHIs, but there
// could still be a cycle without PHIs in unreachable code.
if (!Visited.insert(GEP1))
return MayAlias;
int64_t GEP1BaseOffset;
SmallVector<VariableGEPIndex, 4> GEP1VariableIndices;
@ -940,7 +950,30 @@ BasicAliasAnalysis::aliasGEP(const GEPOperator *GEP1, uint64_t V1Size,
return NoAlias;
}
return MayAlias;
// Statically, we can see that the base objects are the same, but the
// pointers have dynamic offsets which we can't resolve. And none of our
// little tricks above worked.
//
// TODO: Returning PartialAlias instead of MayAlias is a mild hack; the
// practical effect of this is protecting TBAA in the case of dynamic
// indices into arrays of unions. An alternative way to solve this would
// be to have clang emit extra metadata for unions and/or union accesses.
// A union-specific solution wouldn't handle the problem for malloc'd
// memory however.
return PartialAlias;
}
static AliasAnalysis::AliasResult
MergeAliasResults(AliasAnalysis::AliasResult A, AliasAnalysis::AliasResult B) {
// If the results agree, take it.
if (A == B)
return A;
// A mix of PartialAlias and MustAlias is PartialAlias.
if ((A == AliasAnalysis::PartialAlias && B == AliasAnalysis::MustAlias) ||
(B == AliasAnalysis::PartialAlias && A == AliasAnalysis::MustAlias))
return AliasAnalysis::PartialAlias;
// Otherwise, we don't know anything.
return AliasAnalysis::MayAlias;
}
/// aliasSelect - Provide a bunch of ad-hoc rules to disambiguate a Select
@ -950,13 +983,6 @@ BasicAliasAnalysis::aliasSelect(const SelectInst *SI, uint64_t SISize,
const MDNode *SITBAAInfo,
const Value *V2, uint64_t V2Size,
const MDNode *V2TBAAInfo) {
// If this select has been visited before, we're on a use-def cycle.
// Such cycles are only valid when PHI nodes are involved or in unreachable
// code. The visitPHI function catches cycles containing PHIs, but there
// could still be a cycle without PHIs in unreachable code.
if (!Visited.insert(SI))
return MayAlias;
// If the values are Selects with the same condition, we can do a more precise
// check: just check for aliases between the values on corresponding arms.
if (const SelectInst *SI2 = dyn_cast<SelectInst>(V2))
@ -969,9 +995,7 @@ BasicAliasAnalysis::aliasSelect(const SelectInst *SI, uint64_t SISize,
AliasResult ThisAlias =
aliasCheck(SI->getFalseValue(), SISize, SITBAAInfo,
SI2->getFalseValue(), V2Size, V2TBAAInfo);
if (ThisAlias != Alias)
return MayAlias;
return Alias;
return MergeAliasResults(ThisAlias, Alias);
}
// If both arms of the Select node NoAlias or MustAlias V2, then returns
@ -981,16 +1005,9 @@ BasicAliasAnalysis::aliasSelect(const SelectInst *SI, uint64_t SISize,
if (Alias == MayAlias)
return MayAlias;
// If V2 is visited, the recursive case will have been caught in the
// above aliasCheck call, so these subsequent calls to aliasCheck
// don't need to assume that V2 is being visited recursively.
Visited.erase(V2);
AliasResult ThisAlias =
aliasCheck(V2, V2Size, V2TBAAInfo, SI->getFalseValue(), SISize, SITBAAInfo);
if (ThisAlias != Alias)
return MayAlias;
return Alias;
return MergeAliasResults(ThisAlias, Alias);
}
// aliasPHI - Provide a bunch of ad-hoc rules to disambiguate a PHI instruction
@ -1000,10 +1017,6 @@ BasicAliasAnalysis::aliasPHI(const PHINode *PN, uint64_t PNSize,
const MDNode *PNTBAAInfo,
const Value *V2, uint64_t V2Size,
const MDNode *V2TBAAInfo) {
// The PHI node has already been visited, avoid recursion any further.
if (!Visited.insert(PN))
return MayAlias;
// If the values are PHIs in the same block, we can do a more precise
// as well as efficient check: just check for aliases between the values
// on corresponding edges.
@ -1020,8 +1033,9 @@ BasicAliasAnalysis::aliasPHI(const PHINode *PN, uint64_t PNSize,
aliasCheck(PN->getIncomingValue(i), PNSize, PNTBAAInfo,
PN2->getIncomingValueForBlock(PN->getIncomingBlock(i)),
V2Size, V2TBAAInfo);
if (ThisAlias != Alias)
return MayAlias;
Alias = MergeAliasResults(ThisAlias, Alias);
if (Alias == MayAlias)
break;
}
return Alias;
}
@ -1052,15 +1066,11 @@ BasicAliasAnalysis::aliasPHI(const PHINode *PN, uint64_t PNSize,
for (unsigned i = 1, e = V1Srcs.size(); i != e; ++i) {
Value *V = V1Srcs[i];
// If V2 is visited, the recursive case will have been caught in the
// above aliasCheck call, so these subsequent calls to aliasCheck
// don't need to assume that V2 is being visited recursively.
Visited.erase(V2);
AliasResult ThisAlias = aliasCheck(V2, V2Size, V2TBAAInfo,
V, PNSize, PNTBAAInfo);
if (ThisAlias != Alias || ThisAlias == MayAlias)
return MayAlias;
Alias = MergeAliasResults(ThisAlias, Alias);
if (Alias == MayAlias)
break;
}
return Alias;
@ -1145,6 +1155,17 @@ BasicAliasAnalysis::aliasCheck(const Value *V1, uint64_t V1Size,
(V2Size != UnknownSize && isObjectSmallerThan(O1, V2Size, *TD)))
return NoAlias;
// Check the cache before climbing up use-def chains. This also terminates
// otherwise infinitely recursive queries.
LocPair Locs(Location(V1, V1Size, V1TBAAInfo),
Location(V2, V2Size, V2TBAAInfo));
if (V1 > V2)
std::swap(Locs.first, Locs.second);
std::pair<AliasCacheTy::iterator, bool> Pair =
AliasCache.insert(std::make_pair(Locs, MayAlias));
if (!Pair.second)
return Pair.first->second;
// FIXME: This isn't aggressively handling alias(GEP, PHI) for example: if the
// GEP can't simplify, we don't even look at the PHI cases.
if (!isa<GEPOperator>(V1) && isa<GEPOperator>(V2)) {
@ -1154,7 +1175,7 @@ BasicAliasAnalysis::aliasCheck(const Value *V1, uint64_t V1Size,
}
if (const GEPOperator *GV1 = dyn_cast<GEPOperator>(V1)) {
AliasResult Result = aliasGEP(GV1, V1Size, V2, V2Size, V2TBAAInfo, O1, O2);
if (Result != MayAlias) return Result;
if (Result != MayAlias) return AliasCache[Locs] = Result;
}
if (isa<PHINode>(V2) && !isa<PHINode>(V1)) {
@ -1164,7 +1185,7 @@ BasicAliasAnalysis::aliasCheck(const Value *V1, uint64_t V1Size,
if (const PHINode *PN = dyn_cast<PHINode>(V1)) {
AliasResult Result = aliasPHI(PN, V1Size, V1TBAAInfo,
V2, V2Size, V2TBAAInfo);
if (Result != MayAlias) return Result;
if (Result != MayAlias) return AliasCache[Locs] = Result;
}
if (isa<SelectInst>(V2) && !isa<SelectInst>(V1)) {
@ -1174,7 +1195,7 @@ BasicAliasAnalysis::aliasCheck(const Value *V1, uint64_t V1Size,
if (const SelectInst *S1 = dyn_cast<SelectInst>(V1)) {
AliasResult Result = aliasSelect(S1, V1Size, V1TBAAInfo,
V2, V2Size, V2TBAAInfo);
if (Result != MayAlias) return Result;
if (Result != MayAlias) return AliasCache[Locs] = Result;
}
// If both pointers are pointing into the same object and one of them
@ -1183,8 +1204,10 @@ BasicAliasAnalysis::aliasCheck(const Value *V1, uint64_t V1Size,
if (TD && O1 == O2)
if ((V1Size != UnknownSize && isObjectSize(O1, V1Size, *TD)) ||
(V2Size != UnknownSize && isObjectSize(O2, V2Size, *TD)))
return PartialAlias;
return AliasCache[Locs] = PartialAlias;
return AliasAnalysis::alias(Location(V1, V1Size, V1TBAAInfo),
Location(V2, V2Size, V2TBAAInfo));
AliasResult Result =
AliasAnalysis::alias(Location(V1, V1Size, V1TBAAInfo),
Location(V2, V2Size, V2TBAAInfo));
return AliasCache[Locs] = Result;
}

View File

@ -0,0 +1,357 @@
//===-- BranchProbabilityInfo.cpp - Branch Probability Analysis -*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// Loops should be simplified before this analysis.
//
//===----------------------------------------------------------------------===//
#include "llvm/Instructions.h"
#include "llvm/Analysis/BranchProbabilityInfo.h"
#include "llvm/Support/Debug.h"
using namespace llvm;
INITIALIZE_PASS_BEGIN(BranchProbabilityInfo, "branch-prob",
"Branch Probability Analysis", false, true)
INITIALIZE_PASS_DEPENDENCY(LoopInfo)
INITIALIZE_PASS_END(BranchProbabilityInfo, "branch-prob",
"Branch Probability Analysis", false, true)
char BranchProbabilityInfo::ID = 0;
// Please note that BranchProbabilityAnalysis is not a FunctionPass.
// It is created by BranchProbabilityInfo (which is a FunctionPass), which
// provides a clear interface. Thanks to that, all heuristics and other
// private methods are hidden in the .cpp file.
class BranchProbabilityAnalysis {
typedef std::pair<BasicBlock *, BasicBlock *> Edge;
DenseMap<Edge, uint32_t> *Weights;
BranchProbabilityInfo *BP;
LoopInfo *LI;
// Weights are for internal use only. They are used by heuristics to help to
// estimate edges' probability. Example:
//
// Using "Loop Branch Heuristics" we predict weights of edges for the
// block BB2.
// ...
// |
// V
// BB1<-+
// | |
// | | (Weight = 128)
// V |
// BB2--+
// |
// | (Weight = 4)
// V
// BB3
//
// Probability of the edge BB2->BB1 = 128 / (128 + 4) = 0.9696..
// Probability of the edge BB2->BB3 = 4 / (128 + 4) = 0.0303..
static const uint32_t LBH_TAKEN_WEIGHT = 128;
static const uint32_t LBH_NONTAKEN_WEIGHT = 4;
// Standard weight value. Used when none of the heuristics set weight for
// the edge.
static const uint32_t NORMAL_WEIGHT = 16;
// Minimum weight of an edge. Please note, that weight is NEVER 0.
static const uint32_t MIN_WEIGHT = 1;
// Return TRUE if BB leads directly to a Return Instruction.
static bool isReturningBlock(BasicBlock *BB) {
SmallPtrSet<BasicBlock *, 8> Visited;
while (true) {
TerminatorInst *TI = BB->getTerminator();
if (isa<ReturnInst>(TI))
return true;
if (TI->getNumSuccessors() > 1)
break;
// It is unreachable block which we can consider as a return instruction.
if (TI->getNumSuccessors() == 0)
return true;
Visited.insert(BB);
BB = TI->getSuccessor(0);
// Stop if cycle is detected.
if (Visited.count(BB))
return false;
}
return false;
}
// Multiply Edge Weight by two.
void incEdgeWeight(BasicBlock *Src, BasicBlock *Dst) {
uint32_t Weight = BP->getEdgeWeight(Src, Dst);
uint32_t MaxWeight = getMaxWeightFor(Src);
if (Weight * 2 > MaxWeight)
BP->setEdgeWeight(Src, Dst, MaxWeight);
else
BP->setEdgeWeight(Src, Dst, Weight * 2);
}
// Divide Edge Weight by two.
void decEdgeWeight(BasicBlock *Src, BasicBlock *Dst) {
uint32_t Weight = BP->getEdgeWeight(Src, Dst);
assert(Weight > 0);
if (Weight / 2 < MIN_WEIGHT)
BP->setEdgeWeight(Src, Dst, MIN_WEIGHT);
else
BP->setEdgeWeight(Src, Dst, Weight / 2);
}
uint32_t getMaxWeightFor(BasicBlock *BB) const {
return UINT32_MAX / BB->getTerminator()->getNumSuccessors();
}
public:
BranchProbabilityAnalysis(DenseMap<Edge, uint32_t> *W,
BranchProbabilityInfo *BP, LoopInfo *LI)
: Weights(W), BP(BP), LI(LI) {
}
// Return Heuristics
void calcReturnHeuristics(BasicBlock *BB);
// Pointer Heuristics
void calcPointerHeuristics(BasicBlock *BB);
// Loop Branch Heuristics
void calcLoopBranchHeuristics(BasicBlock *BB);
bool runOnFunction(Function &F);
};
// Calculate Edge Weights using "Return Heuristics". Predict a successor which
// leads directly to Return Instruction will not be taken.
void BranchProbabilityAnalysis::calcReturnHeuristics(BasicBlock *BB){
if (BB->getTerminator()->getNumSuccessors() == 1)
return;
for (succ_iterator I = succ_begin(BB), E = succ_end(BB); I != E; ++I) {
BasicBlock *Succ = *I;
if (isReturningBlock(Succ)) {
decEdgeWeight(BB, Succ);
}
}
}
// Calculate Edge Weights using "Pointer Heuristics". Predict a comparsion
// between two pointer or pointer and NULL will fail.
void BranchProbabilityAnalysis::calcPointerHeuristics(BasicBlock *BB) {
BranchInst * BI = dyn_cast<BranchInst>(BB->getTerminator());
if (!BI || !BI->isConditional())
return;
Value *Cond = BI->getCondition();
ICmpInst *CI = dyn_cast<ICmpInst>(Cond);
if (!CI)
return;
Value *LHS = CI->getOperand(0);
if (!LHS->getType()->isPointerTy())
return;
assert(CI->getOperand(1)->getType()->isPointerTy());
BasicBlock *Taken = BI->getSuccessor(0);
BasicBlock *NonTaken = BI->getSuccessor(1);
// p != 0 -> isProb = true
// p == 0 -> isProb = false
// p != q -> isProb = true
// p == q -> isProb = false;
bool isProb = !CI->isEquality();
if (!isProb)
std::swap(Taken, NonTaken);
incEdgeWeight(BB, Taken);
decEdgeWeight(BB, NonTaken);
}
// Calculate Edge Weights using "Loop Branch Heuristics". Predict backedges
// as taken, exiting edges as not-taken.
void BranchProbabilityAnalysis::calcLoopBranchHeuristics(BasicBlock *BB) {
uint32_t numSuccs = BB->getTerminator()->getNumSuccessors();
Loop *L = LI->getLoopFor(BB);
if (!L)
return;
SmallVector<BasicBlock *, 8> BackEdges;
SmallVector<BasicBlock *, 8> ExitingEdges;
for (succ_iterator I = succ_begin(BB), E = succ_end(BB); I != E; ++I) {
BasicBlock *Succ = *I;
Loop *SuccL = LI->getLoopFor(Succ);
if (SuccL != L)
ExitingEdges.push_back(Succ);
else if (Succ == L->getHeader())
BackEdges.push_back(Succ);
}
if (uint32_t numBackEdges = BackEdges.size()) {
uint32_t backWeight = LBH_TAKEN_WEIGHT / numBackEdges;
if (backWeight < NORMAL_WEIGHT)
backWeight = NORMAL_WEIGHT;
for (SmallVector<BasicBlock *, 8>::iterator EI = BackEdges.begin(),
EE = BackEdges.end(); EI != EE; ++EI) {
BasicBlock *Back = *EI;
BP->setEdgeWeight(BB, Back, backWeight);
}
}
uint32_t numExitingEdges = ExitingEdges.size();
if (uint32_t numNonExitingEdges = numSuccs - numExitingEdges) {
uint32_t exitWeight = LBH_NONTAKEN_WEIGHT / numNonExitingEdges;
if (exitWeight < MIN_WEIGHT)
exitWeight = MIN_WEIGHT;
for (SmallVector<BasicBlock *, 8>::iterator EI = ExitingEdges.begin(),
EE = ExitingEdges.end(); EI != EE; ++EI) {
BasicBlock *Exiting = *EI;
BP->setEdgeWeight(BB, Exiting, exitWeight);
}
}
}
bool BranchProbabilityAnalysis::runOnFunction(Function &F) {
for (Function::iterator I = F.begin(), E = F.end(); I != E; ) {
BasicBlock *BB = I++;
// Only LBH uses setEdgeWeight method.
calcLoopBranchHeuristics(BB);
// PH and RH use only incEdgeWeight and decEwdgeWeight methods to
// not efface LBH results.
calcPointerHeuristics(BB);
calcReturnHeuristics(BB);
}
return false;
}
bool BranchProbabilityInfo::runOnFunction(Function &F) {
LoopInfo &LI = getAnalysis<LoopInfo>();
BranchProbabilityAnalysis BPA(&Weights, this, &LI);
return BPA.runOnFunction(F);
}
uint32_t BranchProbabilityInfo::getSumForBlock(BasicBlock *BB) const {
uint32_t Sum = 0;
for (succ_iterator I = succ_begin(BB), E = succ_end(BB); I != E; ++I) {
BasicBlock *Succ = *I;
uint32_t Weight = getEdgeWeight(BB, Succ);
uint32_t PrevSum = Sum;
Sum += Weight;
assert(Sum > PrevSum); (void) PrevSum;
}
return Sum;
}
bool BranchProbabilityInfo::isEdgeHot(BasicBlock *Src, BasicBlock *Dst) const {
// Hot probability is at least 4/5 = 80%
uint32_t Weight = getEdgeWeight(Src, Dst);
uint32_t Sum = getSumForBlock(Src);
// FIXME: Implement BranchProbability::compare then change this code to
// compare this BranchProbability against a static "hot" BranchProbability.
return (uint64_t)Weight * 5 > (uint64_t)Sum * 4;
}
BasicBlock *BranchProbabilityInfo::getHotSucc(BasicBlock *BB) const {
uint32_t Sum = 0;
uint32_t MaxWeight = 0;
BasicBlock *MaxSucc = 0;
for (succ_iterator I = succ_begin(BB), E = succ_end(BB); I != E; ++I) {
BasicBlock *Succ = *I;
uint32_t Weight = getEdgeWeight(BB, Succ);
uint32_t PrevSum = Sum;
Sum += Weight;
assert(Sum > PrevSum); (void) PrevSum;
if (Weight > MaxWeight) {
MaxWeight = Weight;
MaxSucc = Succ;
}
}
// FIXME: Use BranchProbability::compare.
if ((uint64_t)MaxWeight * 5 > (uint64_t)Sum * 4)
return MaxSucc;
return 0;
}
// Return edge's weight. If can't find it, return DEFAULT_WEIGHT value.
uint32_t
BranchProbabilityInfo::getEdgeWeight(BasicBlock *Src, BasicBlock *Dst) const {
Edge E(Src, Dst);
DenseMap<Edge, uint32_t>::const_iterator I = Weights.find(E);
if (I != Weights.end())
return I->second;
return DEFAULT_WEIGHT;
}
void BranchProbabilityInfo::setEdgeWeight(BasicBlock *Src, BasicBlock *Dst,
uint32_t Weight) {
Weights[std::make_pair(Src, Dst)] = Weight;
DEBUG(dbgs() << "set edge " << Src->getNameStr() << " -> "
<< Dst->getNameStr() << " weight to " << Weight
<< (isEdgeHot(Src, Dst) ? " [is HOT now]\n" : "\n"));
}
BranchProbability BranchProbabilityInfo::
getEdgeProbability(BasicBlock *Src, BasicBlock *Dst) const {
uint32_t N = getEdgeWeight(Src, Dst);
uint32_t D = getSumForBlock(Src);
return BranchProbability(N, D);
}
raw_ostream &
BranchProbabilityInfo::printEdgeProbability(raw_ostream &OS, BasicBlock *Src,
BasicBlock *Dst) const {
BranchProbability Prob = getEdgeProbability(Src, Dst);
OS << "edge " << Src->getNameStr() << " -> " << Dst->getNameStr()
<< " probability is " << Prob
<< (isEdgeHot(Src, Dst) ? " [HOT edge]\n" : "\n");
return OS;
}

View File

@ -1085,7 +1085,7 @@ llvm::canConstantFoldCallTo(const Function *F) {
case 'c':
return Name == "cos" || Name == "ceil" || Name == "cosf" || Name == "cosh";
case 'e':
return Name == "exp";
return Name == "exp" || Name == "exp2";
case 'f':
return Name == "fabs" || Name == "fmod" || Name == "floor";
case 'l':
@ -1221,6 +1221,12 @@ llvm::ConstantFoldCall(Function *F,
case 'e':
if (Name == "exp")
return ConstantFoldFP(exp, V, Ty);
if (Name == "exp2") {
// Constant fold exp2(x) as pow(2,x) in case the host doesn't have a
// C99 library.
return ConstantFoldBinaryFP(pow, 2.0, V, Ty);
}
break;
case 'f':
if (Name == "fabs")

View File

@ -51,6 +51,10 @@ void DIBuilder::createCompileUnit(unsigned Lang, StringRef Filename,
ConstantInt::get(Type::getInt32Ty(VMContext), RunTimeVer)
};
TheCU = DICompileUnit(MDNode::get(VMContext, Elts));
// Create a named metadata so that it is easier to find cu in a module.
NamedMDNode *NMD = M.getOrInsertNamedMetadata("llvm.dbg.cu");
NMD->addOperand(TheCU);
}
/// createFile - Create a file descriptor to hold debugging information
@ -156,12 +160,12 @@ DIType DIBuilder::createReferenceType(DIType RTy) {
/// createTypedef - Create debugging information entry for a typedef.
DIType DIBuilder::createTypedef(DIType Ty, StringRef Name, DIFile File,
unsigned LineNo) {
unsigned LineNo, DIDescriptor Context) {
// typedefs are encoded in DIDerivedType format.
assert(Ty.Verify() && "Invalid typedef type!");
Value *Elts[] = {
GetTagConstant(VMContext, dwarf::DW_TAG_typedef),
Ty.getContext(),
Context,
MDString::get(VMContext, Name),
File,
ConstantInt::get(Type::getInt32Ty(VMContext), LineNo),

View File

@ -148,7 +148,7 @@ class BasicCallGraph : public ModulePass, public CallGraph {
for (BasicBlock::iterator II = BB->begin(), IE = BB->end();
II != IE; ++II) {
CallSite CS(cast<Value>(II));
if (CS && !isa<DbgInfoIntrinsic>(II)) {
if (CS && !isa<IntrinsicInst>(II)) {
const Function *Callee = CS.getCalledFunction();
if (Callee)
Node->addCalledFunction(CS, getOrInsertFunction(Callee));

View File

@ -245,8 +245,8 @@ bool CGPassManager::RefreshCallGraph(CallGraphSCC &CurSCC,
for (Function::iterator BB = F->begin(), E = F->end(); BB != E; ++BB)
for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; ++I) {
CallSite CS(cast<Value>(I));
if (!CS || isa<DbgInfoIntrinsic>(I)) continue;
CallSite CS(cast<Value>(I));
if (!CS || isa<IntrinsicInst>(I)) continue;
// If this call site already existed in the callgraph, just verify it
// matches up to expectations and remove it from CallSites.

View File

@ -32,7 +32,7 @@ INITIALIZE_PASS(FindUsedTypes, "print-used-types",
void FindUsedTypes::IncorporateType(const Type *Ty) {
// If ty doesn't already exist in the used types map, add it now, otherwise
// return.
if (!UsedTypes.insert(Ty).second) return; // Already contain Ty.
if (!UsedTypes.insert(Ty)) return; // Already contain Ty.
// Make sure to add any types this type references now.
//
@ -94,7 +94,7 @@ bool FindUsedTypes::runOnModule(Module &m) {
//
void FindUsedTypes::print(raw_ostream &OS, const Module *M) const {
OS << "Types in use by this module:\n";
for (std::set<const Type *>::const_iterator I = UsedTypes.begin(),
for (SetVector<const Type *>::const_iterator I = UsedTypes.begin(),
E = UsedTypes.end(); I != E; ++I) {
OS << " ";
WriteTypeSymbolic(OS, *I, M);

View File

@ -21,6 +21,7 @@
#include "llvm/Analysis/Dominators.h"
#include "llvm/Analysis/LoopPass.h"
#include "llvm/Analysis/ScalarEvolutionExpressions.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Target/TargetData.h"
#include "llvm/Assembly/Writer.h"
#include "llvm/ADT/STLExtras.h"
@ -38,6 +39,15 @@ INITIALIZE_PASS_DEPENDENCY(ScalarEvolution)
INITIALIZE_PASS_END(IVUsers, "iv-users",
"Induction Variable Users", false, true)
// IVUsers behavior currently depends on this temporary indvars mode. The
// option must be defined upstream from its uses.
namespace llvm {
bool DisableIVRewrite = false;
}
cl::opt<bool, true> DisableIVRewriteOpt(
"disable-iv-rewrite", cl::Hidden, cl::location(llvm::DisableIVRewrite),
cl::desc("Disable canonical induction variable rewriting"));
Pass *llvm::createIVUsersPass() {
return new IVUsers();
}
@ -79,7 +89,7 @@ static bool isInteresting(const SCEV *S, const Instruction *I, const Loop *L,
/// AddUsersIfInteresting - Inspect the specified instruction. If it is a
/// reducible SCEV, recursively add its users to the IVUsesByStride set and
/// return true. Otherwise, return false.
bool IVUsers::AddUsersIfInteresting(Instruction *I) {
bool IVUsers::AddUsersIfInteresting(Instruction *I, PHINode *Phi) {
if (!SE->isSCEVable(I->getType()))
return false; // Void and FP expressions cannot be reduced.
@ -90,6 +100,11 @@ bool IVUsers::AddUsersIfInteresting(Instruction *I) {
if (Width > 64 || (TD && !TD->isLegalInteger(Width)))
return false;
// We expect Sign/Zero extension to be eliminated from the IR before analyzing
// any downstream uses.
if (DisableIVRewrite && (isa<SExtInst>(I) || isa<ZExtInst>(I)))
return false;
if (!Processed.insert(I))
return true; // Instruction already handled.
@ -121,13 +136,13 @@ bool IVUsers::AddUsersIfInteresting(Instruction *I) {
bool AddUserToIVUsers = false;
if (LI->getLoopFor(User->getParent()) != L) {
if (isa<PHINode>(User) || Processed.count(User) ||
!AddUsersIfInteresting(User)) {
!AddUsersIfInteresting(User, Phi)) {
DEBUG(dbgs() << "FOUND USER in other loop: " << *User << '\n'
<< " OF SCEV: " << *ISE << '\n');
AddUserToIVUsers = true;
}
} else if (Processed.count(User) ||
!AddUsersIfInteresting(User)) {
!AddUsersIfInteresting(User, Phi)) {
DEBUG(dbgs() << "FOUND USER: " << *User << '\n'
<< " OF SCEV: " << *ISE << '\n');
AddUserToIVUsers = true;
@ -135,9 +150,11 @@ bool IVUsers::AddUsersIfInteresting(Instruction *I) {
if (AddUserToIVUsers) {
// Okay, we found a user that we cannot reduce.
IVUses.push_back(new IVStrideUse(this, User, I));
IVUses.push_back(new IVStrideUse(this, User, I, Phi));
IVStrideUse &NewUse = IVUses.back();
// Transform the expression into a normalized form.
// Autodetect the post-inc loop set, populating NewUse.PostIncLoops.
// The regular return value here is discarded; instead of recording
// it, we just recompute it when we need it.
ISE = TransformForPostIncUse(NormalizeAutodetect,
ISE, User, I,
NewUse.PostIncLoops,
@ -148,8 +165,8 @@ bool IVUsers::AddUsersIfInteresting(Instruction *I) {
return true;
}
IVStrideUse &IVUsers::AddUser(Instruction *User, Value *Operand) {
IVUses.push_back(new IVStrideUse(this, User, Operand));
IVStrideUse &IVUsers::AddUser(Instruction *User, Value *Operand, PHINode *Phi) {
IVUses.push_back(new IVStrideUse(this, User, Operand, Phi));
return IVUses.back();
}
@ -177,7 +194,7 @@ bool IVUsers::runOnLoop(Loop *l, LPPassManager &LPM) {
// them by stride. Start by finding all of the PHI nodes in the header for
// this loop. If they are induction variables, inspect their uses.
for (BasicBlock::iterator I = L->getHeader()->begin(); isa<PHINode>(I); ++I)
(void)AddUsersIfInteresting(I);
(void)AddUsersIfInteresting(I, cast<PHINode>(I));
return false;
}

View File

@ -66,21 +66,13 @@ void CodeMetrics::analyzeBasicBlock(const BasicBlock *BB) {
ImmutableCallSite CS(cast<Instruction>(II));
// If this function contains a call to setjmp or _setjmp, never inline
// it. This is a hack because we depend on the user marking their local
// variables as volatile if they are live across a setjmp call, and they
// probably won't do this in callers.
if (const Function *F = CS.getCalledFunction()) {
// If a function is both internal and has a single use, then it is
// extremely likely to get inlined in the future (it was probably
// exposed by an interleaved devirtualization pass).
if (F->hasInternalLinkage() && F->hasOneUse())
++NumInlineCandidates;
if (F->isDeclaration() &&
(F->getName() == "setjmp" || F->getName() == "_setjmp"))
callsSetJmp = true;
// If this call is to function itself, then the function is recursive.
// Inlining it into other functions is a bad idea, because this is
// basically just a form of loop peeling, and our metrics aren't useful
@ -226,6 +218,13 @@ unsigned CodeMetrics::CountCodeReductionForAlloca(Value *V) {
/// analyzeFunction - Fill in the current structure with information gleaned
/// from the specified function.
void CodeMetrics::analyzeFunction(Function *F) {
// If this function contains a call to setjmp or _setjmp, never inline
// it. This is a hack because we depend on the user marking their local
// variables as volatile if they are live across a setjmp call, and they
// probably won't do this in callers.
if (F->callsFunctionThatReturnsTwice())
callsSetJmp = true;
// Look at the size of the callee.
for (Function::const_iterator BB = F->begin(), E = F->end(); BB != E; ++BB)
analyzeBasicBlock(&*BB);
@ -594,7 +593,7 @@ InlineCostAnalyzer::growCachedCostInfo(Function *Caller, Function *Callee) {
CodeMetrics &CallerMetrics = CachedFunctionInfo[Caller].Metrics;
// For small functions we prefer to recalculate the cost for better accuracy.
if (CallerMetrics.NumBlocks < 10 || CallerMetrics.NumInsts < 1000) {
if (CallerMetrics.NumBlocks < 10 && CallerMetrics.NumInsts < 1000) {
resetCachedCostInfo(Caller);
return;
}

View File

@ -913,8 +913,6 @@ static Value *SimplifyRem(Instruction::BinaryOps Opcode, Value *Op0, Value *Op1,
}
}
bool isSigned = Opcode == Instruction::SRem;
// X % undef -> undef
if (match(Op1, m_Undef()))
return Op1;
@ -1378,6 +1376,26 @@ static const Type *GetCompareTy(Value *Op) {
return CmpInst::makeCmpResultType(Op->getType());
}
/// ExtractEquivalentCondition - Rummage around inside V looking for something
/// equivalent to the comparison "LHS Pred RHS". Return such a value if found,
/// otherwise return null. Helper function for analyzing max/min idioms.
static Value *ExtractEquivalentCondition(Value *V, CmpInst::Predicate Pred,
Value *LHS, Value *RHS) {
SelectInst *SI = dyn_cast<SelectInst>(V);
if (!SI)
return 0;
CmpInst *Cmp = dyn_cast<CmpInst>(SI->getCondition());
if (!Cmp)
return 0;
Value *CmpLHS = Cmp->getOperand(0), *CmpRHS = Cmp->getOperand(1);
if (Pred == Cmp->getPredicate() && LHS == CmpLHS && RHS == CmpRHS)
return Cmp;
if (Pred == CmpInst::getSwappedPredicate(Cmp->getPredicate()) &&
LHS == CmpRHS && RHS == CmpLHS)
return Cmp;
return 0;
}
/// SimplifyICmpInst - Given operands for an ICmpInst, see if we can
/// fold the result. If not, this returns null.
static Value *SimplifyICmpInst(unsigned Predicate, Value *LHS, Value *RHS,
@ -1460,46 +1478,48 @@ static Value *SimplifyICmpInst(unsigned Predicate, Value *LHS, Value *RHS,
default:
assert(false && "Unknown ICmp predicate!");
case ICmpInst::ICMP_ULT:
return ConstantInt::getFalse(LHS->getContext());
// getNullValue also works for vectors, unlike getFalse.
return Constant::getNullValue(ITy);
case ICmpInst::ICMP_UGE:
return ConstantInt::getTrue(LHS->getContext());
// getAllOnesValue also works for vectors, unlike getTrue.
return ConstantInt::getAllOnesValue(ITy);
case ICmpInst::ICMP_EQ:
case ICmpInst::ICMP_ULE:
if (isKnownNonZero(LHS, TD))
return ConstantInt::getFalse(LHS->getContext());
return Constant::getNullValue(ITy);
break;
case ICmpInst::ICMP_NE:
case ICmpInst::ICMP_UGT:
if (isKnownNonZero(LHS, TD))
return ConstantInt::getTrue(LHS->getContext());
return ConstantInt::getAllOnesValue(ITy);
break;
case ICmpInst::ICMP_SLT:
ComputeSignBit(LHS, LHSKnownNonNegative, LHSKnownNegative, TD);
if (LHSKnownNegative)
return ConstantInt::getTrue(LHS->getContext());
return ConstantInt::getAllOnesValue(ITy);
if (LHSKnownNonNegative)
return ConstantInt::getFalse(LHS->getContext());
return Constant::getNullValue(ITy);
break;
case ICmpInst::ICMP_SLE:
ComputeSignBit(LHS, LHSKnownNonNegative, LHSKnownNegative, TD);
if (LHSKnownNegative)
return ConstantInt::getTrue(LHS->getContext());
return ConstantInt::getAllOnesValue(ITy);
if (LHSKnownNonNegative && isKnownNonZero(LHS, TD))
return ConstantInt::getFalse(LHS->getContext());
return Constant::getNullValue(ITy);
break;
case ICmpInst::ICMP_SGE:
ComputeSignBit(LHS, LHSKnownNonNegative, LHSKnownNegative, TD);
if (LHSKnownNegative)
return ConstantInt::getFalse(LHS->getContext());
return Constant::getNullValue(ITy);
if (LHSKnownNonNegative)
return ConstantInt::getTrue(LHS->getContext());
return ConstantInt::getAllOnesValue(ITy);
break;
case ICmpInst::ICMP_SGT:
ComputeSignBit(LHS, LHSKnownNonNegative, LHSKnownNegative, TD);
if (LHSKnownNegative)
return ConstantInt::getFalse(LHS->getContext());
return Constant::getNullValue(ITy);
if (LHSKnownNonNegative && isKnownNonZero(LHS, TD))
return ConstantInt::getTrue(LHS->getContext());
return ConstantInt::getAllOnesValue(ITy);
break;
}
}
@ -1791,7 +1811,8 @@ static Value *SimplifyICmpInst(unsigned Predicate, Value *LHS, Value *RHS,
case ICmpInst::ICMP_EQ:
case ICmpInst::ICMP_UGT:
case ICmpInst::ICMP_UGE:
return ConstantInt::getFalse(RHS->getContext());
// getNullValue also works for vectors, unlike getFalse.
return Constant::getNullValue(ITy);
case ICmpInst::ICMP_SLT:
case ICmpInst::ICMP_SLE:
ComputeSignBit(LHS, KnownNonNegative, KnownNegative, TD);
@ -1801,7 +1822,8 @@ static Value *SimplifyICmpInst(unsigned Predicate, Value *LHS, Value *RHS,
case ICmpInst::ICMP_NE:
case ICmpInst::ICMP_ULT:
case ICmpInst::ICMP_ULE:
return ConstantInt::getTrue(RHS->getContext());
// getAllOnesValue also works for vectors, unlike getTrue.
return Constant::getAllOnesValue(ITy);
}
}
if (RBO && match(RBO, m_URem(m_Value(), m_Specific(LHS)))) {
@ -1818,7 +1840,8 @@ static Value *SimplifyICmpInst(unsigned Predicate, Value *LHS, Value *RHS,
case ICmpInst::ICMP_NE:
case ICmpInst::ICMP_UGT:
case ICmpInst::ICMP_UGE:
return ConstantInt::getTrue(RHS->getContext());
// getAllOnesValue also works for vectors, unlike getTrue.
return Constant::getAllOnesValue(ITy);
case ICmpInst::ICMP_SLT:
case ICmpInst::ICMP_SLE:
ComputeSignBit(RHS, KnownNonNegative, KnownNegative, TD);
@ -1828,7 +1851,8 @@ static Value *SimplifyICmpInst(unsigned Predicate, Value *LHS, Value *RHS,
case ICmpInst::ICMP_EQ:
case ICmpInst::ICMP_ULT:
case ICmpInst::ICMP_ULE:
return ConstantInt::getFalse(RHS->getContext());
// getNullValue also works for vectors, unlike getFalse.
return Constant::getNullValue(ITy);
}
}
@ -1843,7 +1867,7 @@ static Value *SimplifyICmpInst(unsigned Predicate, Value *LHS, Value *RHS,
// fall-through
case Instruction::SDiv:
case Instruction::AShr:
if (!LBO->isExact() && !RBO->isExact())
if (!LBO->isExact() || !RBO->isExact())
break;
if (Value *V = SimplifyICmpInst(Pred, LBO->getOperand(0),
RBO->getOperand(0), TD, DT, MaxRecurse-1))
@ -1864,6 +1888,194 @@ static Value *SimplifyICmpInst(unsigned Predicate, Value *LHS, Value *RHS,
}
}
// Simplify comparisons involving max/min.
Value *A, *B;
CmpInst::Predicate P = CmpInst::BAD_ICMP_PREDICATE;
CmpInst::Predicate EqP; // Chosen so that "A == max/min(A,B)" iff "A EqP B".
// Signed variants on "max(a,b)>=a -> true".
if (match(LHS, m_SMax(m_Value(A), m_Value(B))) && (A == RHS || B == RHS)) {
if (A != RHS) std::swap(A, B); // smax(A, B) pred A.
EqP = CmpInst::ICMP_SGE; // "A == smax(A, B)" iff "A sge B".
// We analyze this as smax(A, B) pred A.
P = Pred;
} else if (match(RHS, m_SMax(m_Value(A), m_Value(B))) &&
(A == LHS || B == LHS)) {
if (A != LHS) std::swap(A, B); // A pred smax(A, B).
EqP = CmpInst::ICMP_SGE; // "A == smax(A, B)" iff "A sge B".
// We analyze this as smax(A, B) swapped-pred A.
P = CmpInst::getSwappedPredicate(Pred);
} else if (match(LHS, m_SMin(m_Value(A), m_Value(B))) &&
(A == RHS || B == RHS)) {
if (A != RHS) std::swap(A, B); // smin(A, B) pred A.
EqP = CmpInst::ICMP_SLE; // "A == smin(A, B)" iff "A sle B".
// We analyze this as smax(-A, -B) swapped-pred -A.
// Note that we do not need to actually form -A or -B thanks to EqP.
P = CmpInst::getSwappedPredicate(Pred);
} else if (match(RHS, m_SMin(m_Value(A), m_Value(B))) &&
(A == LHS || B == LHS)) {
if (A != LHS) std::swap(A, B); // A pred smin(A, B).
EqP = CmpInst::ICMP_SLE; // "A == smin(A, B)" iff "A sle B".
// We analyze this as smax(-A, -B) pred -A.
// Note that we do not need to actually form -A or -B thanks to EqP.
P = Pred;
}
if (P != CmpInst::BAD_ICMP_PREDICATE) {
// Cases correspond to "max(A, B) p A".
switch (P) {
default:
break;
case CmpInst::ICMP_EQ:
case CmpInst::ICMP_SLE:
// Equivalent to "A EqP B". This may be the same as the condition tested
// in the max/min; if so, we can just return that.
if (Value *V = ExtractEquivalentCondition(LHS, EqP, A, B))
return V;
if (Value *V = ExtractEquivalentCondition(RHS, EqP, A, B))
return V;
// Otherwise, see if "A EqP B" simplifies.
if (MaxRecurse)
if (Value *V = SimplifyICmpInst(EqP, A, B, TD, DT, MaxRecurse-1))
return V;
break;
case CmpInst::ICMP_NE:
case CmpInst::ICMP_SGT: {
CmpInst::Predicate InvEqP = CmpInst::getInversePredicate(EqP);
// Equivalent to "A InvEqP B". This may be the same as the condition
// tested in the max/min; if so, we can just return that.
if (Value *V = ExtractEquivalentCondition(LHS, InvEqP, A, B))
return V;
if (Value *V = ExtractEquivalentCondition(RHS, InvEqP, A, B))
return V;
// Otherwise, see if "A InvEqP B" simplifies.
if (MaxRecurse)
if (Value *V = SimplifyICmpInst(InvEqP, A, B, TD, DT, MaxRecurse-1))
return V;
break;
}
case CmpInst::ICMP_SGE:
// Always true.
return Constant::getAllOnesValue(ITy);
case CmpInst::ICMP_SLT:
// Always false.
return Constant::getNullValue(ITy);
}
}
// Unsigned variants on "max(a,b)>=a -> true".
P = CmpInst::BAD_ICMP_PREDICATE;
if (match(LHS, m_UMax(m_Value(A), m_Value(B))) && (A == RHS || B == RHS)) {
if (A != RHS) std::swap(A, B); // umax(A, B) pred A.
EqP = CmpInst::ICMP_UGE; // "A == umax(A, B)" iff "A uge B".
// We analyze this as umax(A, B) pred A.
P = Pred;
} else if (match(RHS, m_UMax(m_Value(A), m_Value(B))) &&
(A == LHS || B == LHS)) {
if (A != LHS) std::swap(A, B); // A pred umax(A, B).
EqP = CmpInst::ICMP_UGE; // "A == umax(A, B)" iff "A uge B".
// We analyze this as umax(A, B) swapped-pred A.
P = CmpInst::getSwappedPredicate(Pred);
} else if (match(LHS, m_UMin(m_Value(A), m_Value(B))) &&
(A == RHS || B == RHS)) {
if (A != RHS) std::swap(A, B); // umin(A, B) pred A.
EqP = CmpInst::ICMP_ULE; // "A == umin(A, B)" iff "A ule B".
// We analyze this as umax(-A, -B) swapped-pred -A.
// Note that we do not need to actually form -A or -B thanks to EqP.
P = CmpInst::getSwappedPredicate(Pred);
} else if (match(RHS, m_UMin(m_Value(A), m_Value(B))) &&
(A == LHS || B == LHS)) {
if (A != LHS) std::swap(A, B); // A pred umin(A, B).
EqP = CmpInst::ICMP_ULE; // "A == umin(A, B)" iff "A ule B".
// We analyze this as umax(-A, -B) pred -A.
// Note that we do not need to actually form -A or -B thanks to EqP.
P = Pred;
}
if (P != CmpInst::BAD_ICMP_PREDICATE) {
// Cases correspond to "max(A, B) p A".
switch (P) {
default:
break;
case CmpInst::ICMP_EQ:
case CmpInst::ICMP_ULE:
// Equivalent to "A EqP B". This may be the same as the condition tested
// in the max/min; if so, we can just return that.
if (Value *V = ExtractEquivalentCondition(LHS, EqP, A, B))
return V;
if (Value *V = ExtractEquivalentCondition(RHS, EqP, A, B))
return V;
// Otherwise, see if "A EqP B" simplifies.
if (MaxRecurse)
if (Value *V = SimplifyICmpInst(EqP, A, B, TD, DT, MaxRecurse-1))
return V;
break;
case CmpInst::ICMP_NE:
case CmpInst::ICMP_UGT: {
CmpInst::Predicate InvEqP = CmpInst::getInversePredicate(EqP);
// Equivalent to "A InvEqP B". This may be the same as the condition
// tested in the max/min; if so, we can just return that.
if (Value *V = ExtractEquivalentCondition(LHS, InvEqP, A, B))
return V;
if (Value *V = ExtractEquivalentCondition(RHS, InvEqP, A, B))
return V;
// Otherwise, see if "A InvEqP B" simplifies.
if (MaxRecurse)
if (Value *V = SimplifyICmpInst(InvEqP, A, B, TD, DT, MaxRecurse-1))
return V;
break;
}
case CmpInst::ICMP_UGE:
// Always true.
return Constant::getAllOnesValue(ITy);
case CmpInst::ICMP_ULT:
// Always false.
return Constant::getNullValue(ITy);
}
}
// Variants on "max(x,y) >= min(x,z)".
Value *C, *D;
if (match(LHS, m_SMax(m_Value(A), m_Value(B))) &&
match(RHS, m_SMin(m_Value(C), m_Value(D))) &&
(A == C || A == D || B == C || B == D)) {
// max(x, ?) pred min(x, ?).
if (Pred == CmpInst::ICMP_SGE)
// Always true.
return Constant::getAllOnesValue(ITy);
if (Pred == CmpInst::ICMP_SLT)
// Always false.
return Constant::getNullValue(ITy);
} else if (match(LHS, m_SMin(m_Value(A), m_Value(B))) &&
match(RHS, m_SMax(m_Value(C), m_Value(D))) &&
(A == C || A == D || B == C || B == D)) {
// min(x, ?) pred max(x, ?).
if (Pred == CmpInst::ICMP_SLE)
// Always true.
return Constant::getAllOnesValue(ITy);
if (Pred == CmpInst::ICMP_SGT)
// Always false.
return Constant::getNullValue(ITy);
} else if (match(LHS, m_UMax(m_Value(A), m_Value(B))) &&
match(RHS, m_UMin(m_Value(C), m_Value(D))) &&
(A == C || A == D || B == C || B == D)) {
// max(x, ?) pred min(x, ?).
if (Pred == CmpInst::ICMP_UGE)
// Always true.
return Constant::getAllOnesValue(ITy);
if (Pred == CmpInst::ICMP_ULT)
// Always false.
return Constant::getNullValue(ITy);
} else if (match(LHS, m_UMin(m_Value(A), m_Value(B))) &&
match(RHS, m_UMax(m_Value(C), m_Value(D))) &&
(A == C || A == D || B == C || B == D)) {
// min(x, ?) pred max(x, ?).
if (Pred == CmpInst::ICMP_ULE)
// Always true.
return Constant::getAllOnesValue(ITy);
if (Pred == CmpInst::ICMP_UGT)
// Always false.
return Constant::getNullValue(ITy);
}
// If the comparison is with the result of a select instruction, check whether
// comparing with either branch of the select always yields the same value.
if (isa<SelectInst>(LHS) || isa<SelectInst>(RHS))

View File

@ -589,16 +589,18 @@ static bool InstructionDereferencesPointer(Instruction *I, Value *Ptr) {
}
if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(I)) {
if (MI->isVolatile()) return false;
if (MI->getAddressSpace() != 0) return false;
// FIXME: check whether it has a valuerange that excludes zero?
ConstantInt *Len = dyn_cast<ConstantInt>(MI->getLength());
if (!Len || Len->isZero()) return false;
if (MI->getRawDest() == Ptr || MI->getDest() == Ptr)
return true;
if (MI->getDestAddressSpace() == 0)
if (MI->getRawDest() == Ptr || MI->getDest() == Ptr)
return true;
if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(MI))
return MTI->getRawSource() == Ptr || MTI->getSource() == Ptr;
if (MTI->getSourceAddressSpace() == 0)
if (MTI->getRawSource() == Ptr || MTI->getSource() == Ptr)
return true;
}
return false;
}

View File

@ -31,7 +31,7 @@ using namespace llvm;
static bool AreEquivalentAddressValues(const Value *A, const Value *B) {
// Test if the values are trivially equivalent.
if (A == B) return true;
// Test if the values come from identical arithmetic instructions.
// Use isIdenticalToWhenDefined instead of isIdenticalTo because
// this function is only used when one address use dominates the
@ -42,7 +42,7 @@ static bool AreEquivalentAddressValues(const Value *A, const Value *B) {
if (const Instruction *BI = dyn_cast<Instruction>(B))
if (cast<Instruction>(A)->isIdenticalToWhenDefined(BI))
return true;
// Otherwise they may not be equivalent.
return false;
}

View File

@ -374,10 +374,16 @@ getPointerDependencyFrom(const AliasAnalysis::Location &MemLoc, bool isLoad,
if (R == AliasAnalysis::MustAlias)
return MemDepResult::getDef(Inst);
#if 0 // FIXME: Temporarily disabled. GVN is cleverly rewriting loads
// in terms of clobbering loads, but since it does this by looking
// at the clobbering load directly, it doesn't know about any
// phi translation that may have happened along the way.
// If we have a partial alias, then return this as a clobber for the
// client to handle.
if (R == AliasAnalysis::PartialAlias)
return MemDepResult::getClobber(Inst);
#endif
// Random may-alias loads don't depend on each other without a
// dependence.
@ -497,7 +503,7 @@ MemDepResult MemoryDependenceAnalysis::getDependency(Instruction *QueryInst) {
// If we can do a pointer scan, make it happen.
bool isLoad = !(MR & AliasAnalysis::Mod);
if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(QueryInst))
isLoad |= II->getIntrinsicID() == Intrinsic::lifetime_end;
isLoad |= II->getIntrinsicID() == Intrinsic::lifetime_start;
LocalCache = getPointerDependencyFrom(MemLoc, isLoad, ScanPos,
QueryParent);
@ -937,6 +943,9 @@ getNonLocalPointerDepFromBB(const PHITransAddr &Pointer,
SmallVector<BasicBlock*, 32> Worklist;
Worklist.push_back(StartBB);
// PredList used inside loop.
SmallVector<std::pair<BasicBlock*, PHITransAddr>, 16> PredList;
// Keep track of the entries that we know are sorted. Previously cached
// entries will all be sorted. The entries we add we only sort on demand (we
// don't insert every element into its sorted position). We know that we
@ -973,22 +982,29 @@ getNonLocalPointerDepFromBB(const PHITransAddr &Pointer,
// the same Pointer.
if (!Pointer.NeedsPHITranslationFromBlock(BB)) {
SkipFirstBlock = false;
SmallVector<BasicBlock*, 16> NewBlocks;
for (BasicBlock **PI = PredCache->GetPreds(BB); *PI; ++PI) {
// Verify that we haven't looked at this block yet.
std::pair<DenseMap<BasicBlock*,Value*>::iterator, bool>
InsertRes = Visited.insert(std::make_pair(*PI, Pointer.getAddr()));
if (InsertRes.second) {
// First time we've looked at *PI.
Worklist.push_back(*PI);
NewBlocks.push_back(*PI);
continue;
}
// If we have seen this block before, but it was with a different
// pointer then we have a phi translation failure and we have to treat
// this as a clobber.
if (InsertRes.first->second != Pointer.getAddr())
if (InsertRes.first->second != Pointer.getAddr()) {
// Make sure to clean up the Visited map before continuing on to
// PredTranslationFailure.
for (unsigned i = 0; i < NewBlocks.size(); i++)
Visited.erase(NewBlocks[i]);
goto PredTranslationFailure;
}
}
Worklist.append(NewBlocks.begin(), NewBlocks.end());
continue;
}
@ -1007,13 +1023,15 @@ getNonLocalPointerDepFromBB(const PHITransAddr &Pointer,
NumSortedEntries = Cache->size();
}
Cache = 0;
PredList.clear();
for (BasicBlock **PI = PredCache->GetPreds(BB); *PI; ++PI) {
BasicBlock *Pred = *PI;
PredList.push_back(std::make_pair(Pred, Pointer));
// Get the PHI translated pointer in this predecessor. This can fail if
// not translatable, in which case the getAddr() returns null.
PHITransAddr PredPointer(Pointer);
PHITransAddr &PredPointer = PredList.back().second;
PredPointer.PHITranslateValue(BB, Pred, 0);
Value *PredPtrVal = PredPointer.getAddr();
@ -1027,6 +1045,9 @@ getNonLocalPointerDepFromBB(const PHITransAddr &Pointer,
InsertRes = Visited.insert(std::make_pair(Pred, PredPtrVal));
if (!InsertRes.second) {
// We found the pred; take it off the list of preds to visit.
PredList.pop_back();
// If the predecessor was visited with PredPtr, then we already did
// the analysis and can ignore it.
if (InsertRes.first->second == PredPtrVal)
@ -1035,14 +1056,47 @@ getNonLocalPointerDepFromBB(const PHITransAddr &Pointer,
// Otherwise, the block was previously analyzed with a different
// pointer. We can't represent the result of this case, so we just
// treat this as a phi translation failure.
// Make sure to clean up the Visited map before continuing on to
// PredTranslationFailure.
for (unsigned i = 0; i < PredList.size(); i++)
Visited.erase(PredList[i].first);
goto PredTranslationFailure;
}
}
// Actually process results here; this need to be a separate loop to avoid
// calling getNonLocalPointerDepFromBB for blocks we don't want to return
// any results for. (getNonLocalPointerDepFromBB will modify our
// datastructures in ways the code after the PredTranslationFailure label
// doesn't expect.)
for (unsigned i = 0; i < PredList.size(); i++) {
BasicBlock *Pred = PredList[i].first;
PHITransAddr &PredPointer = PredList[i].second;
Value *PredPtrVal = PredPointer.getAddr();
bool CanTranslate = true;
// If PHI translation was unable to find an available pointer in this
// predecessor, then we have to assume that the pointer is clobbered in
// that predecessor. We can still do PRE of the load, which would insert
// a computation of the pointer in this predecessor.
if (PredPtrVal == 0) {
if (PredPtrVal == 0)
CanTranslate = false;
// FIXME: it is entirely possible that PHI translating will end up with
// the same value. Consider PHI translating something like:
// X = phi [x, bb1], [y, bb2]. PHI translating for bb1 doesn't *need*
// to recurse here, pedantically speaking.
// If getNonLocalPointerDepFromBB fails here, that means the cached
// result conflicted with the Visited list; we have to conservatively
// assume a clobber, but this also does not block PRE of the load.
if (!CanTranslate ||
getNonLocalPointerDepFromBB(PredPointer,
Loc.getWithNewPtr(PredPtrVal),
isLoad, Pred,
Result, Visited)) {
// Add the entry to the Result list.
NonLocalDepResult Entry(Pred,
MemDepResult::getClobber(Pred->getTerminator()),
@ -1058,19 +1112,6 @@ getNonLocalPointerDepFromBB(const PHITransAddr &Pointer,
NLPI.Pair = BBSkipFirstBlockPair();
continue;
}
// FIXME: it is entirely possible that PHI translating will end up with
// the same value. Consider PHI translating something like:
// X = phi [x, bb1], [y, bb2]. PHI translating for bb1 doesn't *need*
// to recurse here, pedantically speaking.
// If we have a problem phi translating, fall through to the code below
// to handle the failure condition.
if (getNonLocalPointerDepFromBB(PredPointer,
Loc.getWithNewPtr(PredPointer.getAddr()),
isLoad, Pred,
Result, Visited))
goto PredTranslationFailure;
}
// Refresh the CacheInfo/Cache pointer so that it isn't invalidated.
@ -1087,6 +1128,9 @@ getNonLocalPointerDepFromBB(const PHITransAddr &Pointer,
continue;
PredTranslationFailure:
// The following code is "failure"; we can't produce a sane translation
// for the given block. It assumes that we haven't modified any of
// our datastructures while processing the current block.
if (Cache == 0) {
// Refresh the CacheInfo/Cache pointer if it got invalidated.
@ -1117,8 +1161,8 @@ getNonLocalPointerDepFromBB(const PHITransAddr &Pointer,
assert(I->getResult().isNonLocal() &&
"Should only be here with transparent block");
I->setResult(MemDepResult::getClobber(BB->begin()));
ReverseNonLocalPtrDeps[BB->begin()].insert(CacheKey);
I->setResult(MemDepResult::getClobber(BB->getTerminator()));
ReverseNonLocalPtrDeps[BB->getTerminator()].insert(CacheKey);
Result.push_back(NonLocalDepResult(I->getBB(), I->getResult(),
Pointer.getAddr()));
break;

View File

@ -249,7 +249,7 @@ void RegionPass::assignPassManager(PMStack &PMS,
assert (!PMS.empty() && "Unable to create Region Pass Manager");
PMDataManager *PMD = PMS.top();
// [1] Create new Call Graph Pass Manager
// [1] Create new Region Pass Manager
RGPM = new RGPassManager(PMD->getDepth() + 1);
RGPM->populateInheritedAnalysis(PMS);

View File

@ -1035,6 +1035,93 @@ const SCEV *ScalarEvolution::getZeroExtendExpr(const SCEV *Op,
return S;
}
// Get the limit of a recurrence such that incrementing by Step cannot cause
// signed overflow as long as the value of the recurrence within the loop does
// not exceed this limit before incrementing.
static const SCEV *getOverflowLimitForStep(const SCEV *Step,
ICmpInst::Predicate *Pred,
ScalarEvolution *SE) {
unsigned BitWidth = SE->getTypeSizeInBits(Step->getType());
if (SE->isKnownPositive(Step)) {
*Pred = ICmpInst::ICMP_SLT;
return SE->getConstant(APInt::getSignedMinValue(BitWidth) -
SE->getSignedRange(Step).getSignedMax());
}
if (SE->isKnownNegative(Step)) {
*Pred = ICmpInst::ICMP_SGT;
return SE->getConstant(APInt::getSignedMaxValue(BitWidth) -
SE->getSignedRange(Step).getSignedMin());
}
return 0;
}
// The recurrence AR has been shown to have no signed wrap. Typically, if we can
// prove NSW for AR, then we can just as easily prove NSW for its preincrement
// or postincrement sibling. This allows normalizing a sign extended AddRec as
// such: {sext(Step + Start),+,Step} => {(Step + sext(Start),+,Step} As a
// result, the expression "Step + sext(PreIncAR)" is congruent with
// "sext(PostIncAR)"
static const SCEV *getPreStartForSignExtend(const SCEVAddRecExpr *AR,
const Type *Ty,
ScalarEvolution *SE) {
const Loop *L = AR->getLoop();
const SCEV *Start = AR->getStart();
const SCEV *Step = AR->getStepRecurrence(*SE);
// Check for a simple looking step prior to loop entry.
const SCEVAddExpr *SA = dyn_cast<SCEVAddExpr>(Start);
if (!SA || SA->getNumOperands() != 2 || SA->getOperand(0) != Step)
return 0;
// This is a postinc AR. Check for overflow on the preinc recurrence using the
// same three conditions that getSignExtendedExpr checks.
// 1. NSW flags on the step increment.
const SCEV *PreStart = SA->getOperand(1);
const SCEVAddRecExpr *PreAR = dyn_cast<SCEVAddRecExpr>(
SE->getAddRecExpr(PreStart, Step, L, SCEV::FlagAnyWrap));
if (PreAR && PreAR->getNoWrapFlags(SCEV::FlagNSW))
return PreStart;
// 2. Direct overflow check on the step operation's expression.
unsigned BitWidth = SE->getTypeSizeInBits(AR->getType());
const Type *WideTy = IntegerType::get(SE->getContext(), BitWidth * 2);
const SCEV *OperandExtendedStart =
SE->getAddExpr(SE->getSignExtendExpr(PreStart, WideTy),
SE->getSignExtendExpr(Step, WideTy));
if (SE->getSignExtendExpr(Start, WideTy) == OperandExtendedStart) {
// Cache knowledge of PreAR NSW.
if (PreAR)
const_cast<SCEVAddRecExpr *>(PreAR)->setNoWrapFlags(SCEV::FlagNSW);
// FIXME: this optimization needs a unit test
DEBUG(dbgs() << "SCEV: untested prestart overflow check\n");
return PreStart;
}
// 3. Loop precondition.
ICmpInst::Predicate Pred;
const SCEV *OverflowLimit = getOverflowLimitForStep(Step, &Pred, SE);
if (OverflowLimit &&
SE->isLoopEntryGuardedByCond(L, Pred, PreStart, OverflowLimit)) {
return PreStart;
}
return 0;
}
// Get the normalized sign-extended expression for this AddRec's Start.
static const SCEV *getSignExtendAddRecStart(const SCEVAddRecExpr *AR,
const Type *Ty,
ScalarEvolution *SE) {
const SCEV *PreStart = getPreStartForSignExtend(AR, Ty, SE);
if (!PreStart)
return SE->getSignExtendExpr(AR->getStart(), Ty);
return SE->getAddExpr(SE->getSignExtendExpr(AR->getStepRecurrence(*SE), Ty),
SE->getSignExtendExpr(PreStart, Ty));
}
const SCEV *ScalarEvolution::getSignExtendExpr(const SCEV *Op,
const Type *Ty) {
assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) &&
@ -1097,7 +1184,7 @@ const SCEV *ScalarEvolution::getSignExtendExpr(const SCEV *Op,
// If we have special knowledge that this addrec won't overflow,
// we don't need to do any further analysis.
if (AR->getNoWrapFlags(SCEV::FlagNSW))
return getAddRecExpr(getSignExtendExpr(Start, Ty),
return getAddRecExpr(getSignExtendAddRecStart(AR, Ty, this),
getSignExtendExpr(Step, Ty),
L, SCEV::FlagNSW);
@ -1133,7 +1220,7 @@ const SCEV *ScalarEvolution::getSignExtendExpr(const SCEV *Op,
// Cache knowledge of AR NSW, which is propagated to this AddRec.
const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNSW);
// Return the expression with the addrec on the outside.
return getAddRecExpr(getSignExtendExpr(Start, Ty),
return getAddRecExpr(getSignExtendAddRecStart(AR, Ty, this),
getSignExtendExpr(Step, Ty),
L, AR->getNoWrapFlags());
}
@ -1149,7 +1236,7 @@ const SCEV *ScalarEvolution::getSignExtendExpr(const SCEV *Op,
// Cache knowledge of AR NSW, which is propagated to this AddRec.
const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNSW);
// Return the expression with the addrec on the outside.
return getAddRecExpr(getSignExtendExpr(Start, Ty),
return getAddRecExpr(getSignExtendAddRecStart(AR, Ty, this),
getZeroExtendExpr(Step, Ty),
L, AR->getNoWrapFlags());
}
@ -1159,34 +1246,18 @@ const SCEV *ScalarEvolution::getSignExtendExpr(const SCEV *Op,
// the addrec is safe. Also, if the entry is guarded by a comparison
// with the start value and the backedge is guarded by a comparison
// with the post-inc value, the addrec is safe.
if (isKnownPositive(Step)) {
const SCEV *N = getConstant(APInt::getSignedMinValue(BitWidth) -
getSignedRange(Step).getSignedMax());
if (isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_SLT, AR, N) ||
(isLoopEntryGuardedByCond(L, ICmpInst::ICMP_SLT, Start, N) &&
isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_SLT,
AR->getPostIncExpr(*this), N))) {
// Cache knowledge of AR NSW, which is propagated to this AddRec.
const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNSW);
// Return the expression with the addrec on the outside.
return getAddRecExpr(getSignExtendExpr(Start, Ty),
getSignExtendExpr(Step, Ty),
L, AR->getNoWrapFlags());
}
} else if (isKnownNegative(Step)) {
const SCEV *N = getConstant(APInt::getSignedMaxValue(BitWidth) -
getSignedRange(Step).getSignedMin());
if (isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_SGT, AR, N) ||
(isLoopEntryGuardedByCond(L, ICmpInst::ICMP_SGT, Start, N) &&
isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_SGT,
AR->getPostIncExpr(*this), N))) {
// Cache knowledge of AR NSW, which is propagated to this AddRec.
const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNSW);
// Return the expression with the addrec on the outside.
return getAddRecExpr(getSignExtendExpr(Start, Ty),
getSignExtendExpr(Step, Ty),
L, AR->getNoWrapFlags());
}
ICmpInst::Predicate Pred;
const SCEV *OverflowLimit = getOverflowLimitForStep(Step, &Pred, this);
if (OverflowLimit &&
(isLoopBackedgeGuardedByCond(L, Pred, AR, OverflowLimit) ||
(isLoopEntryGuardedByCond(L, Pred, Start, OverflowLimit) &&
isLoopBackedgeGuardedByCond(L, Pred, AR->getPostIncExpr(*this),
OverflowLimit)))) {
// Cache knowledge of AR NSW, then propagate NSW to the wide AddRec.
const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNSW);
return getAddRecExpr(getSignExtendAddRecStart(AR, Ty, this),
getSignExtendExpr(Step, Ty),
L, AR->getNoWrapFlags());
}
}
}
@ -3783,7 +3854,7 @@ ScalarEvolution::getBackedgeTakenInfo(const Loop *L) {
// update the value. The temporary CouldNotCompute value tells SCEV
// code elsewhere that it shouldn't attempt to request a new
// backedge-taken count, which could result in infinite recursion.
std::pair<std::map<const Loop *, BackedgeTakenInfo>::iterator, bool> Pair =
std::pair<DenseMap<const Loop *, BackedgeTakenInfo>::iterator, bool> Pair =
BackedgeTakenCounts.insert(std::make_pair(L, getCouldNotCompute()));
if (!Pair.second)
return Pair.first->second;
@ -4433,7 +4504,7 @@ Constant *
ScalarEvolution::getConstantEvolutionLoopExitValue(PHINode *PN,
const APInt &BEs,
const Loop *L) {
std::map<PHINode*, Constant*>::const_iterator I =
DenseMap<PHINode*, Constant*>::const_iterator I =
ConstantEvolutionLoopExitValue.find(PN);
if (I != ConstantEvolutionLoopExitValue.end())
return I->second;

View File

@ -131,8 +131,18 @@ void llvm::ComputeMaskedBits(Value *V, const APInt &Mask,
}
return;
}
if (Argument *A = dyn_cast<Argument>(V)) {
// Get alignment information off byval arguments if specified in the IR.
if (A->hasByValAttr())
if (unsigned Align = A->getParamAlignment())
KnownZero = Mask & APInt::getLowBitsSet(BitWidth,
CountTrailingZeros_32(Align));
return;
}
KnownZero.clearAllBits(); KnownOne.clearAllBits(); // Start out not knowing anything.
// Start out not knowing anything.
KnownZero.clearAllBits(); KnownOne.clearAllBits();
if (Depth == MaxDepth || Mask == 0)
return; // Limit search depth.
@ -670,6 +680,10 @@ void llvm::ComputeMaskedBits(Value *V, const APInt &Mask,
KnownZero = APInt::getHighBitsSet(BitWidth, BitWidth - LowBits);
break;
}
case Intrinsic::x86_sse42_crc32_64_8:
case Intrinsic::x86_sse42_crc32_64_64:
KnownZero = APInt::getHighBitsSet(64, 32);
break;
}
}
break;

View File

@ -308,16 +308,8 @@ lltok::Kind LLLexer::LexAt() {
}
// Handle GlobalVarName: @[-a-zA-Z$._][-a-zA-Z$._0-9]*
if (isalpha(CurPtr[0]) || CurPtr[0] == '-' || CurPtr[0] == '$' ||
CurPtr[0] == '.' || CurPtr[0] == '_') {
++CurPtr;
while (isalnum(CurPtr[0]) || CurPtr[0] == '-' || CurPtr[0] == '$' ||
CurPtr[0] == '.' || CurPtr[0] == '_')
++CurPtr;
StrVal.assign(TokStart+1, CurPtr); // Skip @
if (ReadVarName())
return lltok::GlobalVar;
}
// Handle GlobalVarID: @[0-9]+
if (isdigit(CurPtr[0])) {
@ -334,6 +326,39 @@ lltok::Kind LLLexer::LexAt() {
return lltok::Error;
}
/// ReadString - Read a string until the closing quote.
lltok::Kind LLLexer::ReadString(lltok::Kind kind) {
const char *Start = CurPtr;
while (1) {
int CurChar = getNextChar();
if (CurChar == EOF) {
Error("end of file in string constant");
return lltok::Error;
}
if (CurChar == '"') {
StrVal.assign(Start, CurPtr-1);
UnEscapeLexed(StrVal);
return kind;
}
}
}
/// ReadVarName - Read the rest of a token containing a variable name.
bool LLLexer::ReadVarName() {
const char *NameStart = CurPtr;
if (isalpha(CurPtr[0]) || CurPtr[0] == '-' || CurPtr[0] == '$' ||
CurPtr[0] == '.' || CurPtr[0] == '_') {
++CurPtr;
while (isalnum(CurPtr[0]) || CurPtr[0] == '-' || CurPtr[0] == '$' ||
CurPtr[0] == '.' || CurPtr[0] == '_')
++CurPtr;
StrVal.assign(NameStart, CurPtr);
return true;
}
return false;
}
/// LexPercent - Lex all tokens that start with a % character:
/// LocalVar ::= %\"[^\"]*\"
@ -343,33 +368,12 @@ lltok::Kind LLLexer::LexPercent() {
// Handle LocalVarName: %\"[^\"]*\"
if (CurPtr[0] == '"') {
++CurPtr;
while (1) {
int CurChar = getNextChar();
if (CurChar == EOF) {
Error("end of file in string constant");
return lltok::Error;
}
if (CurChar == '"') {
StrVal.assign(TokStart+2, CurPtr-1);
UnEscapeLexed(StrVal);
return lltok::LocalVar;
}
}
return ReadString(lltok::LocalVar);
}
// Handle LocalVarName: %[-a-zA-Z$._][-a-zA-Z$._0-9]*
if (isalpha(CurPtr[0]) || CurPtr[0] == '-' || CurPtr[0] == '$' ||
CurPtr[0] == '.' || CurPtr[0] == '_') {
++CurPtr;
while (isalnum(CurPtr[0]) || CurPtr[0] == '-' || CurPtr[0] == '$' ||
CurPtr[0] == '.' || CurPtr[0] == '_')
++CurPtr;
StrVal.assign(TokStart+1, CurPtr); // Skip %
if (ReadVarName())
return lltok::LocalVar;
}
// Handle LocalVarID: %[0-9]+
if (isdigit(CurPtr[0])) {
@ -390,27 +394,16 @@ lltok::Kind LLLexer::LexPercent() {
/// QuoteLabel "[^"]+":
/// StringConstant "[^"]*"
lltok::Kind LLLexer::LexQuote() {
while (1) {
int CurChar = getNextChar();
if (CurChar == EOF) {
Error("end of file in quoted string");
return lltok::Error;
}
if (CurChar != '"') continue;
if (CurPtr[0] != ':') {
StrVal.assign(TokStart+1, CurPtr-1);
UnEscapeLexed(StrVal);
return lltok::StringConstant;
}
lltok::Kind kind = ReadString(lltok::StringConstant);
if (kind == lltok::Error || kind == lltok::Eof)
return kind;
if (CurPtr[0] == ':') {
++CurPtr;
StrVal.assign(TokStart+1, CurPtr-2);
UnEscapeLexed(StrVal);
return lltok::LabelStr;
kind = lltok::LabelStr;
}
return kind;
}
static bool JustWhitespaceNewLine(const char *&Ptr) {
@ -565,6 +558,7 @@ lltok::Kind LLLexer::LexIdentifier() {
KEYWORD(nest);
KEYWORD(readnone);
KEYWORD(readonly);
KEYWORD(uwtable);
KEYWORD(inlinehint);
KEYWORD(noinline);

View File

@ -71,6 +71,9 @@ namespace llvm {
int getNextChar();
void SkipLineComment();
lltok::Kind ReadString(lltok::Kind kind);
bool ReadVarName();
lltok::Kind LexIdentifier();
lltok::Kind LexDigitOrNegative();
lltok::Kind LexPositive();

View File

@ -972,6 +972,7 @@ bool LLParser::ParseOptionalAttrs(unsigned &Attrs, unsigned AttrKind) {
case lltok::kw_noreturn: Attrs |= Attribute::NoReturn; break;
case lltok::kw_nounwind: Attrs |= Attribute::NoUnwind; break;
case lltok::kw_uwtable: Attrs |= Attribute::UWTable; break;
case lltok::kw_noinline: Attrs |= Attribute::NoInline; break;
case lltok::kw_readnone: Attrs |= Attribute::ReadNone; break;
case lltok::kw_readonly: Attrs |= Attribute::ReadOnly; break;
@ -3003,7 +3004,6 @@ int LLParser::ParseInstruction(Instruction *&Inst, BasicBlock *BB,
case lltok::kw_sub:
case lltok::kw_mul:
case lltok::kw_shl: {
LocTy ModifierLoc = Lex.getLoc();
bool NUW = EatIfPresent(lltok::kw_nuw);
bool NSW = EatIfPresent(lltok::kw_nsw);
if (!NUW) NUW = EatIfPresent(lltok::kw_nuw);

View File

@ -87,6 +87,7 @@ namespace lltok {
kw_nest,
kw_readnone,
kw_readonly,
kw_uwtable,
kw_inlinehint,
kw_noinline,

Some files were not shown because too many files have changed in this diff Show More