Upgrade our copy of llvm/clang to r155985, from upstream's release_31

branch.  This brings us very close to the 3.1 release, which is planned
for May 14th.

MFC after:	2 weeks
This commit is contained in:
Dimitry Andric 2012-05-03 20:41:21 +00:00
commit cb4dff8563
316 changed files with 7087 additions and 14171 deletions

View File

@ -250,12 +250,6 @@ extern void
lto_codegen_set_assembler_args(lto_code_gen_t cg, const char **args,
int nargs);
/**
* Enables the internalize pass during LTO optimizations.
*/
extern void
lto_codegen_set_whole_program_optimization(lto_code_gen_t cg);
/**
* Adds to a list of all global symbols that must exist in the final
* generated code. If a function is not listed, it might be

View File

@ -126,9 +126,6 @@ protected:
private:
bool isSmall() const { return CurArray == SmallArray; }
unsigned Hash(const void *Ptr) const {
return static_cast<unsigned>(((uintptr_t)Ptr >> 4) & (CurArraySize-1));
}
const void * const *FindBucketFor(const void *Ptr) const;
void shrink_and_clear();

View File

@ -239,7 +239,7 @@ public:
explicit StringMap(AllocatorTy A)
: StringMapImpl(static_cast<unsigned>(sizeof(MapEntryTy))), Allocator(A) {}
explicit StringMap(const StringMap &RHS)
StringMap(const StringMap &RHS)
: StringMapImpl(static_cast<unsigned>(sizeof(MapEntryTy))) {
assert(RHS.empty() &&
"Copy ctor from non-empty stringmap not implemented yet!");

View File

@ -28,7 +28,6 @@
#include "llvm/CodeGen/MachineBasicBlock.h"
#include "llvm/ADT/DenseMap.h"
#include <map>
namespace llvm {
@ -37,7 +36,7 @@ class MachineInstr;
class MachineLoopInfo;
class MachineDominatorTree;
class InstrItineraryData;
class DefaultVLIWScheduler;
class ScheduleDAGInstrs;
class SUnit;
class DFAPacketizer {
@ -78,8 +77,6 @@ public:
// reserveResources - Reserve the resources occupied by a machine
// instruction and change the current state to reflect that change.
void reserveResources(llvm::MachineInstr *MI);
const InstrItineraryData *getInstrItins() const { return InstrItins; }
};
// VLIWPacketizerList - Implements a simple VLIW packetizer using DFA. The
@ -90,21 +87,20 @@ public:
// and machine resource is marked as taken. If any dependency is found, a target
// API call is made to prune the dependence.
class VLIWPacketizerList {
protected:
const TargetMachine &TM;
const MachineFunction &MF;
const TargetInstrInfo *TII;
// The VLIW Scheduler.
DefaultVLIWScheduler *VLIWScheduler;
// Encapsulate data types not exposed to the target interface.
ScheduleDAGInstrs *SchedulerImpl;
protected:
// Vector of instructions assigned to the current packet.
std::vector<MachineInstr*> CurrentPacketMIs;
// DFA resource tracker.
DFAPacketizer *ResourceTracker;
// Generate MI -> SU map.
std::map<MachineInstr*, SUnit*> MIToSUnit;
// Scheduling units.
std::vector<SUnit> SUnits;
public:
VLIWPacketizerList(
@ -122,32 +118,17 @@ public:
DFAPacketizer *getResourceTracker() {return ResourceTracker;}
// addToPacket - Add MI to the current packet.
virtual MachineBasicBlock::iterator addToPacket(MachineInstr *MI) {
MachineBasicBlock::iterator MII = MI;
CurrentPacketMIs.push_back(MI);
ResourceTracker->reserveResources(MI);
return MII;
}
void addToPacket(MachineInstr *MI);
// endPacket - End the current packet.
void endPacket(MachineBasicBlock *MBB, MachineInstr *MI);
// initPacketizerState - perform initialization before packetizing
// an instruction. This function is supposed to be overrided by
// the target dependent packetizer.
virtual void initPacketizerState(void) { return; }
void endPacket(MachineBasicBlock *MBB, MachineInstr *I);
// ignorePseudoInstruction - Ignore bundling of pseudo instructions.
virtual bool ignorePseudoInstruction(MachineInstr *I,
MachineBasicBlock *MBB) {
return false;
}
bool ignorePseudoInstruction(MachineInstr *I, MachineBasicBlock *MBB);
// isSoloInstruction - return true if instruction MI can not be packetized
// with any other instruction, which means that MI itself is a packet.
virtual bool isSoloInstruction(MachineInstr *MI) {
return true;
}
// isSoloInstruction - return true if instruction I must end previous
// packet.
bool isSoloInstruction(MachineInstr *I);
// isLegalToPacketizeTogether - Is it legal to packetize SUI and SUJ
// together.
@ -160,7 +141,6 @@ public:
virtual bool isLegalToPruneDependencies(SUnit *SUI, SUnit *SUJ) {
return false;
}
};
}

View File

@ -56,7 +56,7 @@ public:
protected:
TargetMachine *TM;
PassManagerBase &PM;
PassManagerBase *PM;
PassConfigImpl *Impl; // Internal data structures
bool Initialized; // Flagged after all passes are configured.

View File

@ -181,6 +181,13 @@ namespace llvm {
/// the def-side latency only.
bool UnitLatencies;
/// The standard DAG builder does not normally include terminators as DAG
/// nodes because it does not create the necessary dependencies to prevent
/// reordering. A specialized scheduler can overide
/// TargetInstrInfo::isSchedulingBoundary then enable this flag to indicate
/// it has taken responsibility for scheduling the terminator correctly.
bool CanHandleTerminators;
/// State specific to the current scheduling region.
/// ------------------------------------------------

View File

@ -23,6 +23,7 @@
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
#include "llvm/ADT/PointerIntPair.h"
#include "llvm/ADT/ilist.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/Support/Allocator.h"
@ -33,8 +34,7 @@ namespace llvm {
/// SlotIndexes pass. It should not be used directly. See the
/// SlotIndex & SlotIndexes classes for the public interface to this
/// information.
class IndexListEntry {
IndexListEntry *next, *prev;
class IndexListEntry : public ilist_node<IndexListEntry> {
MachineInstr *mi;
unsigned index;
@ -51,31 +51,26 @@ namespace llvm {
void setIndex(unsigned index) {
this->index = index;
}
IndexListEntry* getNext() { return next; }
const IndexListEntry* getNext() const { return next; }
void setNext(IndexListEntry *next) {
this->next = next;
}
IndexListEntry* getPrev() { return prev; }
const IndexListEntry* getPrev() const { return prev; }
void setPrev(IndexListEntry *prev) {
this->prev = prev;
}
};
// Specialize PointerLikeTypeTraits for IndexListEntry.
template <>
class PointerLikeTypeTraits<IndexListEntry*> {
struct ilist_traits<IndexListEntry> : public ilist_default_traits<IndexListEntry> {
private:
mutable ilist_half_node<IndexListEntry> Sentinel;
public:
static inline void* getAsVoidPointer(IndexListEntry *p) {
return p;
IndexListEntry *createSentinel() const {
return static_cast<IndexListEntry*>(&Sentinel);
}
static inline IndexListEntry* getFromVoidPointer(void *p) {
return static_cast<IndexListEntry*>(p);
}
enum { NumLowBitsAvailable = 3 };
void destroySentinel(IndexListEntry *) const {}
IndexListEntry *provideInitialHead() const { return createSentinel(); }
IndexListEntry *ensureHead(IndexListEntry*) const { return createSentinel(); }
static void noteHead(IndexListEntry*, IndexListEntry*) {}
void deleteNode(IndexListEntry *N) {}
private:
void createNode(const IndexListEntry &);
};
/// SlotIndex - An opaque wrapper around machine indexes.
@ -112,13 +107,13 @@ namespace llvm {
SlotIndex(IndexListEntry *entry, unsigned slot)
: lie(entry, slot) {}
IndexListEntry& entry() const {
IndexListEntry* listEntry() const {
assert(isValid() && "Attempt to compare reserved index.");
return *lie.getPointer();
return lie.getPointer();
}
int getIndex() const {
return entry().getIndex() | getSlot();
return listEntry()->getIndex() | getSlot();
}
/// Returns the slot for this SlotIndex.
@ -150,8 +145,7 @@ namespace llvm {
SlotIndex() : lie(0, 0) {}
// Construct a new slot index from the given one, and set the slot.
SlotIndex(const SlotIndex &li, Slot s)
: lie(&li.entry(), unsigned(s)) {
SlotIndex(const SlotIndex &li, Slot s) : lie(li.listEntry(), unsigned(s)) {
assert(lie.getPointer() != 0 &&
"Attempt to construct index with 0 pointer.");
}
@ -179,7 +173,7 @@ namespace llvm {
bool operator!=(SlotIndex other) const {
return lie != other.lie;
}
/// Compare two SlotIndex objects. Return true if the first index
/// is strictly lower than the second.
bool operator<(SlotIndex other) const {
@ -211,7 +205,7 @@ namespace llvm {
/// isEarlierInstr - Return true if A refers to an instruction earlier than
/// B. This is equivalent to A < B && !isSameInstr(A, B).
static bool isEarlierInstr(SlotIndex A, SlotIndex B) {
return A.entry().getIndex() < B.entry().getIndex();
return A.listEntry()->getIndex() < B.listEntry()->getIndex();
}
/// Return the distance from this index to the given one.
@ -236,25 +230,25 @@ namespace llvm {
/// is the one associated with the Slot_Block slot for the instruction
/// pointed to by this index.
SlotIndex getBaseIndex() const {
return SlotIndex(&entry(), Slot_Block);
return SlotIndex(listEntry(), Slot_Block);
}
/// Returns the boundary index for associated with this index. The boundary
/// index is the one associated with the Slot_Block slot for the instruction
/// pointed to by this index.
SlotIndex getBoundaryIndex() const {
return SlotIndex(&entry(), Slot_Dead);
return SlotIndex(listEntry(), Slot_Dead);
}
/// Returns the register use/def slot in the current instruction for a
/// normal or early-clobber def.
SlotIndex getRegSlot(bool EC = false) const {
return SlotIndex(&entry(), EC ? Slot_EarlyClobber : Slot_Register);
return SlotIndex(listEntry(), EC ? Slot_EarlyClobber : Slot_Register);
}
/// Returns the dead def kill slot for the current instruction.
SlotIndex getDeadSlot() const {
return SlotIndex(&entry(), Slot_Dead);
return SlotIndex(listEntry(), Slot_Dead);
}
/// Returns the next slot in the index list. This could be either the
@ -266,15 +260,15 @@ namespace llvm {
SlotIndex getNextSlot() const {
Slot s = getSlot();
if (s == Slot_Dead) {
return SlotIndex(entry().getNext(), Slot_Block);
return SlotIndex(listEntry()->getNextNode(), Slot_Block);
}
return SlotIndex(&entry(), s + 1);
return SlotIndex(listEntry(), s + 1);
}
/// Returns the next index. This is the index corresponding to the this
/// index's slot, but for the next instruction.
SlotIndex getNextIndex() const {
return SlotIndex(entry().getNext(), getSlot());
return SlotIndex(listEntry()->getNextNode(), getSlot());
}
/// Returns the previous slot in the index list. This could be either the
@ -286,15 +280,15 @@ namespace llvm {
SlotIndex getPrevSlot() const {
Slot s = getSlot();
if (s == Slot_Block) {
return SlotIndex(entry().getPrev(), Slot_Dead);
return SlotIndex(listEntry()->getPrevNode(), Slot_Dead);
}
return SlotIndex(&entry(), s - 1);
return SlotIndex(listEntry(), s - 1);
}
/// Returns the previous index. This is the index corresponding to this
/// index's slot, but for the previous instruction.
SlotIndex getPrevIndex() const {
return SlotIndex(entry().getPrev(), getSlot());
return SlotIndex(listEntry()->getPrevNode(), getSlot());
}
};
@ -315,7 +309,7 @@ namespace llvm {
return (LHS == RHS);
}
};
template <> struct isPodLike<SlotIndex> { static const bool value = true; };
@ -346,8 +340,10 @@ namespace llvm {
class SlotIndexes : public MachineFunctionPass {
private:
typedef ilist<IndexListEntry> IndexList;
IndexList indexList;
MachineFunction *mf;
IndexListEntry *indexListHead;
unsigned functionSize;
typedef DenseMap<const MachineInstr*, SlotIndex> Mi2IndexMap;
@ -374,84 +370,18 @@ namespace llvm {
return entry;
}
void initList() {
assert(indexListHead == 0 && "Zero entry non-null at initialisation.");
indexListHead = createEntry(0, ~0U);
indexListHead->setNext(0);
indexListHead->setPrev(indexListHead);
}
void clearList() {
indexListHead = 0;
ileAllocator.Reset();
}
IndexListEntry* getTail() {
assert(indexListHead != 0 && "Call to getTail on uninitialized list.");
return indexListHead->getPrev();
}
const IndexListEntry* getTail() const {
assert(indexListHead != 0 && "Call to getTail on uninitialized list.");
return indexListHead->getPrev();
}
// Returns true if the index list is empty.
bool empty() const { return (indexListHead == getTail()); }
IndexListEntry* front() {
assert(!empty() && "front() called on empty index list.");
return indexListHead;
}
const IndexListEntry* front() const {
assert(!empty() && "front() called on empty index list.");
return indexListHead;
}
IndexListEntry* back() {
assert(!empty() && "back() called on empty index list.");
return getTail()->getPrev();
}
const IndexListEntry* back() const {
assert(!empty() && "back() called on empty index list.");
return getTail()->getPrev();
}
/// Insert a new entry before itr.
void insert(IndexListEntry *itr, IndexListEntry *val) {
assert(itr != 0 && "itr should not be null.");
IndexListEntry *prev = itr->getPrev();
val->setNext(itr);
val->setPrev(prev);
if (itr != indexListHead) {
prev->setNext(val);
}
else {
indexListHead = val;
}
itr->setPrev(val);
}
/// Push a new entry on to the end of the list.
void push_back(IndexListEntry *val) {
insert(getTail(), val);
}
/// Renumber locally after inserting newEntry.
void renumberIndexes(IndexListEntry *newEntry);
/// Renumber locally after inserting curItr.
void renumberIndexes(IndexList::iterator curItr);
public:
static char ID;
SlotIndexes() : MachineFunctionPass(ID), indexListHead(0) {
SlotIndexes() : MachineFunctionPass(ID) {
initializeSlotIndexesPass(*PassRegistry::getPassRegistry());
}
virtual void getAnalysisUsage(AnalysisUsage &au) const;
virtual void releaseMemory();
virtual void releaseMemory();
virtual bool runOnMachineFunction(MachineFunction &fn);
@ -463,22 +393,21 @@ namespace llvm {
/// Returns the zero index for this analysis.
SlotIndex getZeroIndex() {
assert(front()->getIndex() == 0 && "First index is not 0?");
return SlotIndex(front(), 0);
assert(indexList.front().getIndex() == 0 && "First index is not 0?");
return SlotIndex(&indexList.front(), 0);
}
/// Returns the base index of the last slot in this analysis.
SlotIndex getLastIndex() {
return SlotIndex(back(), 0);
return SlotIndex(&indexList.back(), 0);
}
/// Returns the distance between the highest and lowest indexes allocated
/// so far.
unsigned getIndexesLength() const {
assert(front()->getIndex() == 0 &&
assert(indexList.front().getIndex() == 0 &&
"Initial index isn't zero?");
return back()->getIndex();
return indexList.back().getIndex();
}
/// Returns the number of instructions in the function.
@ -503,19 +432,15 @@ namespace llvm {
/// Returns the instruction for the given index, or null if the given
/// index has no instruction associated with it.
MachineInstr* getInstructionFromIndex(SlotIndex index) const {
return index.isValid() ? index.entry().getInstr() : 0;
return index.isValid() ? index.listEntry()->getInstr() : 0;
}
/// Returns the next non-null index.
SlotIndex getNextNonNullIndex(SlotIndex index) {
SlotIndex nextNonNull = index.getNextIndex();
while (&nextNonNull.entry() != getTail() &&
getInstructionFromIndex(nextNonNull) == 0) {
nextNonNull = nextNonNull.getNextIndex();
}
return nextNonNull;
IndexList::iterator itr(index.listEntry());
++itr;
while (itr != indexList.end() && itr->getInstr() == 0) { ++itr; }
return SlotIndex(itr, index.getSlot());
}
/// getIndexBefore - Returns the index of the last indexed instruction
@ -659,31 +584,31 @@ namespace llvm {
assert(mi->getParent() != 0 && "Instr must be added to function.");
// Get the entries where mi should be inserted.
IndexListEntry *prevEntry, *nextEntry;
IndexList::iterator prevItr, nextItr;
if (Late) {
// Insert mi's index immediately before the following instruction.
nextEntry = &getIndexAfter(mi).entry();
prevEntry = nextEntry->getPrev();
nextItr = getIndexAfter(mi).listEntry();
prevItr = prior(nextItr);
} else {
// Insert mi's index immediately after the preceeding instruction.
prevEntry = &getIndexBefore(mi).entry();
nextEntry = prevEntry->getNext();
prevItr = getIndexBefore(mi).listEntry();
nextItr = llvm::next(prevItr);
}
// Get a number for the new instr, or 0 if there's no room currently.
// In the latter case we'll force a renumber later.
unsigned dist = ((nextEntry->getIndex() - prevEntry->getIndex())/2) & ~3u;
unsigned newNumber = prevEntry->getIndex() + dist;
unsigned dist = ((nextItr->getIndex() - prevItr->getIndex())/2) & ~3u;
unsigned newNumber = prevItr->getIndex() + dist;
// Insert a new list entry for mi.
IndexListEntry *newEntry = createEntry(mi, newNumber);
insert(nextEntry, newEntry);
IndexList::iterator newItr =
indexList.insert(nextItr, createEntry(mi, newNumber));
// Renumber locally if we need to.
if (dist == 0)
renumberIndexes(newEntry);
renumberIndexes(newItr);
SlotIndex newIndex(newEntry, SlotIndex::Slot_Block);
SlotIndex newIndex(&*newItr, SlotIndex::Slot_Block);
mi2iMap.insert(std::make_pair(mi, newIndex));
return newIndex;
}
@ -694,7 +619,7 @@ namespace llvm {
// MachineInstr -> index mappings
Mi2IndexMap::iterator mi2iItr = mi2iMap.find(mi);
if (mi2iItr != mi2iMap.end()) {
IndexListEntry *miEntry(&mi2iItr->second.entry());
IndexListEntry *miEntry(mi2iItr->second.listEntry());
assert(miEntry->getInstr() == mi && "Instruction indexes broken.");
// FIXME: Eventually we want to actually delete these indexes.
miEntry->setInstr(0);
@ -709,7 +634,7 @@ namespace llvm {
if (mi2iItr == mi2iMap.end())
return;
SlotIndex replaceBaseIndex = mi2iItr->second;
IndexListEntry *miEntry(&replaceBaseIndex.entry());
IndexListEntry *miEntry(replaceBaseIndex.listEntry());
assert(miEntry->getInstr() == mi &&
"Mismatched instruction in index tables.");
miEntry->setInstr(newMI);
@ -726,13 +651,13 @@ namespace llvm {
IndexListEntry *nextEntry = 0;
if (nextMBB == mbb->getParent()->end()) {
nextEntry = getTail();
nextEntry = indexList.end();
} else {
nextEntry = &getMBBStartIdx(nextMBB).entry();
nextEntry = getMBBStartIdx(nextMBB).listEntry();
}
insert(nextEntry, startEntry);
insert(nextEntry, stopEntry);
indexList.insert(nextEntry, startEntry);
indexList.insert(nextEntry, stopEntry);
SlotIndex startIdx(startEntry, SlotIndex::Slot_Block);
SlotIndex endIdx(nextEntry, SlotIndex::Slot_Block);
@ -766,4 +691,4 @@ namespace llvm {
}
#endif // LLVM_CODEGEN_LIVEINDEX_H
#endif // LLVM_CODEGEN_SLOTINDEXES_H

View File

@ -1091,20 +1091,6 @@ let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
GCCBuiltin<"__builtin_ia32_vperm2f128_si256">,
Intrinsic<[llvm_v8i32_ty], [llvm_v8i32_ty,
llvm_v8i32_ty, llvm_i8_ty], [IntrNoMem]>;
def int_x86_avx_vpermil_pd :
Intrinsic<[llvm_v2f64_ty], [llvm_v2f64_ty,
llvm_i8_ty], [IntrNoMem]>;
def int_x86_avx_vpermil_ps :
Intrinsic<[llvm_v4f32_ty], [llvm_v4f32_ty,
llvm_i8_ty], [IntrNoMem]>;
def int_x86_avx_vpermil_pd_256 :
Intrinsic<[llvm_v4f64_ty], [llvm_v4f64_ty,
llvm_i8_ty], [IntrNoMem]>;
def int_x86_avx_vpermil_ps_256 :
Intrinsic<[llvm_v8f32_ty], [llvm_v8f32_ty,
llvm_i8_ty], [IntrNoMem]>;
}
// Vector blend
@ -1659,15 +1645,9 @@ let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
def int_x86_avx2_permd : GCCBuiltin<"__builtin_ia32_permvarsi256">,
Intrinsic<[llvm_v8i32_ty], [llvm_v8i32_ty, llvm_v8i32_ty],
[IntrNoMem]>;
def int_x86_avx2_permq : GCCBuiltin<"__builtin_ia32_permdi256">,
Intrinsic<[llvm_v4i64_ty], [llvm_v4i64_ty, llvm_i8_ty],
[IntrNoMem]>;
def int_x86_avx2_permps : GCCBuiltin<"__builtin_ia32_permvarsf256">,
Intrinsic<[llvm_v8f32_ty], [llvm_v8f32_ty, llvm_v8f32_ty],
[IntrNoMem]>;
def int_x86_avx2_permpd : GCCBuiltin<"__builtin_ia32_permdf256">,
Intrinsic<[llvm_v4f64_ty], [llvm_v4f64_ty, llvm_i8_ty],
[IntrNoMem]>;
def int_x86_avx2_vperm2i128 : GCCBuiltin<"__builtin_ia32_permti256">,
Intrinsic<[llvm_v4i64_ty], [llvm_v4i64_ty,
llvm_v4i64_ty, llvm_i8_ty], [IntrNoMem]>;

View File

@ -42,7 +42,7 @@ public:
MD_dbg = 0, // "dbg"
MD_tbaa = 1, // "tbaa"
MD_prof = 2, // "prof"
MD_fpaccuracy = 3, // "fpaccuracy"
MD_fpmath = 3, // "fpmath"
MD_range = 4 // "range"
};

View File

@ -16,14 +16,11 @@
#include "llvm/ADT/StringRef.h"
#include "llvm/MC/MCParser/MCAsmLexer.h"
#include "llvm/MC/MCAsmInfo.h"
#include "llvm/Support/DataTypes.h"
#include <string>
#include <cassert>
namespace llvm {
class MemoryBuffer;
class SMLoc;
class MCAsmInfo;
/// AsmLexer - Lexer class for assembly files.

View File

@ -15,8 +15,6 @@
#include "llvm/Support/SMLoc.h"
namespace llvm {
class MCAsmLexer;
class MCInst;
/// AsmToken - Target independent representation for an assembler token.
class AsmToken {
@ -53,6 +51,7 @@ public:
Greater, GreaterEqual, GreaterGreater, At
};
private:
TokenKind Kind;
/// A reference to the entire token contents; this is always a pointer into

View File

@ -33,6 +33,15 @@
namespace llvm {
namespace object {
// Subclasses of ELFObjectFile may need this for template instantiation
inline std::pair<unsigned char, unsigned char>
getElfArchType(MemoryBuffer *Object) {
if (Object->getBufferSize() < ELF::EI_NIDENT)
return std::make_pair((uint8_t)ELF::ELFCLASSNONE,(uint8_t)ELF::ELFDATANONE);
return std::make_pair( (uint8_t)Object->getBufferStart()[ELF::EI_CLASS]
, (uint8_t)Object->getBufferStart()[ELF::EI_DATA]);
}
// Templates to choose Elf_Addr and Elf_Off depending on is64Bits.
template<support::endianness target_endianness>
struct ELFDataTypeTypedefHelperCommon {

View File

@ -15,8 +15,9 @@
#ifndef LLVM_OPERATOR_H
#define LLVM_OPERATOR_H
#include "llvm/Instruction.h"
#include "llvm/Constants.h"
#include "llvm/Instruction.h"
#include "llvm/Type.h"
namespace llvm {
@ -129,14 +130,15 @@ public:
IsExact = (1 << 0)
};
private:
~PossiblyExactOperator(); // do not implement
friend class BinaryOperator;
friend class ConstantExpr;
void setIsExact(bool B) {
SubclassOptionalData = (SubclassOptionalData & ~IsExact) | (B * IsExact);
}
private:
~PossiblyExactOperator(); // do not implement
public:
/// isExact - Test whether this division is known to be exact, with
/// zero remainder.
@ -161,7 +163,28 @@ public:
(isa<ConstantExpr>(V) && classof(cast<ConstantExpr>(V)));
}
};
/// FPMathOperator - Utility class for floating point operations which can have
/// information about relaxed accuracy requirements attached to them.
class FPMathOperator : public Operator {
private:
~FPMathOperator(); // do not implement
public:
/// \brief Get the maximum error permitted by this operation in ULPs. An
/// accuracy of 0.0 means that the operation should be performed with the
/// default precision.
float getFPAccuracy() const;
static inline bool classof(const FPMathOperator *) { return true; }
static inline bool classof(const Instruction *I) {
return I->getType()->isFPOrFPVectorTy();
}
static inline bool classof(const Value *V) {
return isa<Instruction>(V) && classof(cast<Instruction>(V));
}
};
/// ConcreteOperator - A helper template for defining operators for individual

View File

@ -17,6 +17,7 @@
#include "llvm/Instructions.h"
#include "llvm/BasicBlock.h"
#include "llvm/LLVMContext.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/Twine.h"
@ -331,49 +332,63 @@ template<bool preserveNames = true, typename T = ConstantFolder,
typename Inserter = IRBuilderDefaultInserter<preserveNames> >
class IRBuilder : public IRBuilderBase, public Inserter {
T Folder;
MDNode *DefaultFPMathTag;
public:
IRBuilder(LLVMContext &C, const T &F, const Inserter &I = Inserter())
: IRBuilderBase(C), Inserter(I), Folder(F) {
IRBuilder(LLVMContext &C, const T &F, const Inserter &I = Inserter(),
MDNode *FPMathTag = 0)
: IRBuilderBase(C), Inserter(I), Folder(F), DefaultFPMathTag(FPMathTag) {
}
explicit IRBuilder(LLVMContext &C) : IRBuilderBase(C), Folder() {
explicit IRBuilder(LLVMContext &C, MDNode *FPMathTag = 0) : IRBuilderBase(C),
Folder(), DefaultFPMathTag(FPMathTag) {
}
explicit IRBuilder(BasicBlock *TheBB, const T &F)
: IRBuilderBase(TheBB->getContext()), Folder(F) {
explicit IRBuilder(BasicBlock *TheBB, const T &F, MDNode *FPMathTag = 0)
: IRBuilderBase(TheBB->getContext()), Folder(F),
DefaultFPMathTag(FPMathTag) {
SetInsertPoint(TheBB);
}
explicit IRBuilder(BasicBlock *TheBB)
: IRBuilderBase(TheBB->getContext()), Folder() {
explicit IRBuilder(BasicBlock *TheBB, MDNode *FPMathTag = 0)
: IRBuilderBase(TheBB->getContext()), Folder(),
DefaultFPMathTag(FPMathTag) {
SetInsertPoint(TheBB);
}
explicit IRBuilder(Instruction *IP)
: IRBuilderBase(IP->getContext()), Folder() {
explicit IRBuilder(Instruction *IP, MDNode *FPMathTag = 0)
: IRBuilderBase(IP->getContext()), Folder(), DefaultFPMathTag(FPMathTag) {
SetInsertPoint(IP);
SetCurrentDebugLocation(IP->getDebugLoc());
}
explicit IRBuilder(Use &U)
: IRBuilderBase(U->getContext()), Folder() {
explicit IRBuilder(Use &U, MDNode *FPMathTag = 0)
: IRBuilderBase(U->getContext()), Folder(), DefaultFPMathTag(FPMathTag) {
SetInsertPoint(U);
SetCurrentDebugLocation(cast<Instruction>(U.getUser())->getDebugLoc());
}
IRBuilder(BasicBlock *TheBB, BasicBlock::iterator IP, const T& F)
: IRBuilderBase(TheBB->getContext()), Folder(F) {
IRBuilder(BasicBlock *TheBB, BasicBlock::iterator IP, const T& F,
MDNode *FPMathTag = 0)
: IRBuilderBase(TheBB->getContext()), Folder(F),
DefaultFPMathTag(FPMathTag) {
SetInsertPoint(TheBB, IP);
}
IRBuilder(BasicBlock *TheBB, BasicBlock::iterator IP)
: IRBuilderBase(TheBB->getContext()), Folder() {
IRBuilder(BasicBlock *TheBB, BasicBlock::iterator IP, MDNode *FPMathTag = 0)
: IRBuilderBase(TheBB->getContext()), Folder(),
DefaultFPMathTag(FPMathTag) {
SetInsertPoint(TheBB, IP);
}
/// getFolder - Get the constant folder being used.
const T &getFolder() { return Folder; }
/// getDefaultFPMathTag - Get the floating point math metadata being used.
MDNode *getDefaultFPMathTag() const { return DefaultFPMathTag; }
/// SetDefaultFPMathTag - Set the floating point math metadata to be used.
void SetDefaultFPMathTag(MDNode *FPMathTag) { DefaultFPMathTag = FPMathTag; }
/// isNamePreserving - Return true if this builder is configured to actually
/// add the requested names to IR created through it.
bool isNamePreserving() const { return preserveNames; }
@ -496,6 +511,14 @@ private:
if (HasNSW) BO->setHasNoSignedWrap();
return BO;
}
Instruction *AddFPMathTag(Instruction *I, MDNode *FPMathTag) const {
if (!FPMathTag)
FPMathTag = DefaultFPMathTag;
if (FPMathTag)
I->setMetadata(LLVMContext::MD_fpmath, FPMathTag);
return I;
}
public:
Value *CreateAdd(Value *LHS, Value *RHS, const Twine &Name = "",
bool HasNUW = false, bool HasNSW = false) {
@ -511,11 +534,13 @@ public:
Value *CreateNUWAdd(Value *LHS, Value *RHS, const Twine &Name = "") {
return CreateAdd(LHS, RHS, Name, true, false);
}
Value *CreateFAdd(Value *LHS, Value *RHS, const Twine &Name = "") {
Value *CreateFAdd(Value *LHS, Value *RHS, const Twine &Name = "",
MDNode *FPMathTag = 0) {
if (Constant *LC = dyn_cast<Constant>(LHS))
if (Constant *RC = dyn_cast<Constant>(RHS))
return Insert(Folder.CreateFAdd(LC, RC), Name);
return Insert(BinaryOperator::CreateFAdd(LHS, RHS), Name);
return Insert(AddFPMathTag(BinaryOperator::CreateFAdd(LHS, RHS),
FPMathTag), Name);
}
Value *CreateSub(Value *LHS, Value *RHS, const Twine &Name = "",
bool HasNUW = false, bool HasNSW = false) {
@ -531,11 +556,13 @@ public:
Value *CreateNUWSub(Value *LHS, Value *RHS, const Twine &Name = "") {
return CreateSub(LHS, RHS, Name, true, false);
}
Value *CreateFSub(Value *LHS, Value *RHS, const Twine &Name = "") {
Value *CreateFSub(Value *LHS, Value *RHS, const Twine &Name = "",
MDNode *FPMathTag = 0) {
if (Constant *LC = dyn_cast<Constant>(LHS))
if (Constant *RC = dyn_cast<Constant>(RHS))
return Insert(Folder.CreateFSub(LC, RC), Name);
return Insert(BinaryOperator::CreateFSub(LHS, RHS), Name);
return Insert(AddFPMathTag(BinaryOperator::CreateFSub(LHS, RHS),
FPMathTag), Name);
}
Value *CreateMul(Value *LHS, Value *RHS, const Twine &Name = "",
bool HasNUW = false, bool HasNSW = false) {
@ -551,11 +578,13 @@ public:
Value *CreateNUWMul(Value *LHS, Value *RHS, const Twine &Name = "") {
return CreateMul(LHS, RHS, Name, true, false);
}
Value *CreateFMul(Value *LHS, Value *RHS, const Twine &Name = "") {
Value *CreateFMul(Value *LHS, Value *RHS, const Twine &Name = "",
MDNode *FPMathTag = 0) {
if (Constant *LC = dyn_cast<Constant>(LHS))
if (Constant *RC = dyn_cast<Constant>(RHS))
return Insert(Folder.CreateFMul(LC, RC), Name);
return Insert(BinaryOperator::CreateFMul(LHS, RHS), Name);
return Insert(AddFPMathTag(BinaryOperator::CreateFMul(LHS, RHS),
FPMathTag), Name);
}
Value *CreateUDiv(Value *LHS, Value *RHS, const Twine &Name = "",
bool isExact = false) {
@ -581,11 +610,13 @@ public:
Value *CreateExactSDiv(Value *LHS, Value *RHS, const Twine &Name = "") {
return CreateSDiv(LHS, RHS, Name, true);
}
Value *CreateFDiv(Value *LHS, Value *RHS, const Twine &Name = "") {
Value *CreateFDiv(Value *LHS, Value *RHS, const Twine &Name = "",
MDNode *FPMathTag = 0) {
if (Constant *LC = dyn_cast<Constant>(LHS))
if (Constant *RC = dyn_cast<Constant>(RHS))
return Insert(Folder.CreateFDiv(LC, RC), Name);
return Insert(BinaryOperator::CreateFDiv(LHS, RHS), Name);
return Insert(AddFPMathTag(BinaryOperator::CreateFDiv(LHS, RHS),
FPMathTag), Name);
}
Value *CreateURem(Value *LHS, Value *RHS, const Twine &Name = "") {
if (Constant *LC = dyn_cast<Constant>(LHS))
@ -599,11 +630,13 @@ public:
return Insert(Folder.CreateSRem(LC, RC), Name);
return Insert(BinaryOperator::CreateSRem(LHS, RHS), Name);
}
Value *CreateFRem(Value *LHS, Value *RHS, const Twine &Name = "") {
Value *CreateFRem(Value *LHS, Value *RHS, const Twine &Name = "",
MDNode *FPMathTag = 0) {
if (Constant *LC = dyn_cast<Constant>(LHS))
if (Constant *RC = dyn_cast<Constant>(RHS))
return Insert(Folder.CreateFRem(LC, RC), Name);
return Insert(BinaryOperator::CreateFRem(LHS, RHS), Name);
return Insert(AddFPMathTag(BinaryOperator::CreateFRem(LHS, RHS),
FPMathTag), Name);
}
Value *CreateShl(Value *LHS, Value *RHS, const Twine &Name = "",
@ -729,10 +762,10 @@ public:
Value *CreateNUWNeg(Value *V, const Twine &Name = "") {
return CreateNeg(V, Name, true, false);
}
Value *CreateFNeg(Value *V, const Twine &Name = "") {
Value *CreateFNeg(Value *V, const Twine &Name = "", MDNode *FPMathTag = 0) {
if (Constant *VC = dyn_cast<Constant>(V))
return Insert(Folder.CreateFNeg(VC), Name);
return Insert(BinaryOperator::CreateFNeg(V), Name);
return Insert(AddFPMathTag(BinaryOperator::CreateFNeg(V), FPMathTag), Name);
}
Value *CreateNot(Value *V, const Twine &Name = "") {
if (Constant *VC = dyn_cast<Constant>(V))

View File

@ -1,448 +0,0 @@
//===--- JSONParser.h - Simple JSON parser ----------------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file implements a JSON parser.
//
// See http://www.json.org/ for an overview.
// See http://www.ietf.org/rfc/rfc4627.txt for the full standard.
//
// FIXME: Currently this supports a subset of JSON. Specifically, support
// for numbers, booleans and null for values is missing.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_SUPPORT_JSON_PARSER_H
#define LLVM_SUPPORT_JSON_PARSER_H
#include "llvm/ADT/StringRef.h"
#include "llvm/Support/Allocator.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/SourceMgr.h"
namespace llvm {
class JSONContainer;
class JSONString;
class JSONValue;
class JSONKeyValuePair;
/// \brief Base class for a parsable JSON atom.
///
/// This class has no semantics other than being a unit of JSON data which can
/// be parsed out of a JSON document.
class JSONAtom {
public:
/// \brief Possible types of JSON objects.
enum Kind { JK_KeyValuePair, JK_Array, JK_Object, JK_String };
/// \brief Returns the type of this value.
Kind getKind() const { return MyKind; }
static bool classof(const JSONAtom *Atom) { return true; }
protected:
JSONAtom(Kind MyKind) : MyKind(MyKind) {}
private:
Kind MyKind;
};
/// \brief A parser for JSON text.
///
/// Use an object of JSONParser to iterate over the values of a JSON text.
/// All objects are parsed during the iteration, so you can only iterate once
/// over the JSON text, but the cost of partial iteration is minimized.
/// Create a new JSONParser if you want to iterate multiple times.
class JSONParser {
public:
/// \brief Create a JSONParser for the given input.
///
/// Parsing is started via parseRoot(). Access to the object returned from
/// parseRoot() will parse the input lazily.
JSONParser(StringRef Input, SourceMgr *SM);
/// \brief Returns the outermost JSON value (either an array or an object).
///
/// Can return NULL if the input does not start with an array or an object.
/// The object is not parsed yet - the caller must iterate over the
/// returned object to trigger parsing.
///
/// A JSONValue can be either a JSONString, JSONObject or JSONArray.
JSONValue *parseRoot();
/// \brief Parses the JSON text and returns whether it is valid JSON.
///
/// In case validate() return false, failed() will return true and
/// getErrorMessage() will return the parsing error.
bool validate();
/// \brief Returns true if an error occurs during parsing.
///
/// If there was an error while parsing an object that was created by
/// iterating over the result of 'parseRoot', 'failed' will return true.
bool failed() const;
private:
/// \brief These methods manage the implementation details of parsing new JSON
/// atoms.
/// @{
JSONString *parseString();
JSONValue *parseValue();
JSONKeyValuePair *parseKeyValuePair();
/// @}
/// \brief Helpers to parse the elements out of both forms of containers.
/// @{
const JSONAtom *parseElement(JSONAtom::Kind ContainerKind);
StringRef::iterator parseFirstElement(JSONAtom::Kind ContainerKind,
char StartChar, char EndChar,
const JSONAtom *&Element);
StringRef::iterator parseNextElement(JSONAtom::Kind ContainerKind,
char EndChar,
const JSONAtom *&Element);
/// @}
/// \brief Whitespace parsing.
/// @{
void nextNonWhitespace();
bool isWhitespace();
/// @}
/// \brief These methods are used for error handling.
/// {
void setExpectedError(StringRef Expected, StringRef Found);
void setExpectedError(StringRef Expected, char Found);
bool errorIfAtEndOfFile(StringRef Message);
bool errorIfNotAt(char C, StringRef Message);
/// }
/// \brief Skips all elements in the given container.
bool skipContainer(const JSONContainer &Container);
/// \brief Skips to the next position behind the given JSON atom.
bool skip(const JSONAtom &Atom);
/// All nodes are allocated by the parser and will be deallocated when the
/// parser is destroyed.
BumpPtrAllocator ValueAllocator;
/// \brief The original input to the parser.
MemoryBuffer *InputBuffer;
/// \brief The source manager used for diagnostics and buffer management.
SourceMgr *SM;
/// \brief The current position in the parse stream.
StringRef::iterator Position;
/// \brief The end position for fast EOF checks without introducing
/// unnecessary dereferences.
StringRef::iterator End;
/// \brief If true, an error has occurred.
bool Failed;
friend class JSONContainer;
};
/// \brief Base class for JSON value objects.
///
/// This object represents an abstract JSON value. It is the root node behind
/// the group of JSON entities that can represent top-level values in a JSON
/// document. It has no API, and is just a placeholder in the type hierarchy of
/// nodes.
class JSONValue : public JSONAtom {
protected:
JSONValue(Kind MyKind) : JSONAtom(MyKind) {}
public:
/// \brief dyn_cast helpers
///@{
static bool classof(const JSONAtom *Atom) {
switch (Atom->getKind()) {
case JK_Array:
case JK_Object:
case JK_String:
return true;
case JK_KeyValuePair:
return false;
}
llvm_unreachable("Invalid JSONAtom kind");
}
static bool classof(const JSONValue *Value) { return true; }
///@}
};
/// \brief Gives access to the text of a JSON string.
///
/// FIXME: Implement a method to return the unescaped text.
class JSONString : public JSONValue {
public:
/// \brief Returns the underlying parsed text of the string.
///
/// This is the unescaped content of the JSON text.
/// See http://www.ietf.org/rfc/rfc4627.txt for details.
StringRef getRawText() const { return RawText; }
private:
JSONString(StringRef RawText) : JSONValue(JK_String), RawText(RawText) {}
StringRef RawText;
friend class JSONParser;
public:
/// \brief dyn_cast helpers
///@{
static bool classof(const JSONAtom *Atom) {
return Atom->getKind() == JK_String;
}
static bool classof(const JSONString *String) { return true; }
///@}
};
/// \brief A (key, value) tuple of type (JSONString *, JSONValue *).
///
/// Note that JSONKeyValuePair is not a JSONValue, it is a bare JSONAtom.
/// JSONKeyValuePairs can be elements of a JSONObject, but not of a JSONArray.
/// They are not viable as top-level values either.
class JSONKeyValuePair : public JSONAtom {
public:
const JSONString * const Key;
const JSONValue * const Value;
private:
JSONKeyValuePair(const JSONString *Key, const JSONValue *Value)
: JSONAtom(JK_KeyValuePair), Key(Key), Value(Value) {}
friend class JSONParser;
public:
/// \brief dyn_cast helpers
///@{
static bool classof(const JSONAtom *Atom) {
return Atom->getKind() == JK_KeyValuePair;
}
static bool classof(const JSONKeyValuePair *KeyValuePair) { return true; }
///@}
};
/// \brief Implementation of JSON containers (arrays and objects).
///
/// JSONContainers drive the lazy parsing of JSON arrays and objects via
/// forward iterators.
class JSONContainer : public JSONValue {
private:
/// \brief An iterator that parses the underlying container during iteration.
///
/// Iterators on the same collection use shared state, so when multiple copies
/// of an iterator exist, only one is allowed to be used for iteration;
/// iterating multiple copies of an iterator of the same collection will lead
/// to undefined behavior.
class AtomIterator {
public:
AtomIterator(const AtomIterator &I) : Container(I.Container) {}
/// \brief Iterator interface.
///@{
bool operator==(const AtomIterator &I) const {
if (isEnd() || I.isEnd())
return isEnd() == I.isEnd();
return Container->Position == I.Container->Position;
}
bool operator!=(const AtomIterator &I) const {
return !(*this == I);
}
AtomIterator &operator++() {
Container->parseNextElement();
return *this;
}
const JSONAtom *operator*() {
return Container->Current;
}
///@}
private:
/// \brief Create an iterator for which 'isEnd' returns true.
AtomIterator() : Container(0) {}
/// \brief Create an iterator for the given container.
AtomIterator(const JSONContainer *Container) : Container(Container) {}
bool isEnd() const {
return Container == 0 || Container->Position == StringRef::iterator();
}
const JSONContainer * const Container;
friend class JSONContainer;
};
protected:
/// \brief An iterator for the specified AtomT.
///
/// Used for the implementation of iterators for JSONArray and JSONObject.
template <typename AtomT>
class IteratorTemplate : public std::iterator<std::forward_iterator_tag,
const AtomT*> {
public:
explicit IteratorTemplate(const AtomIterator& AtomI)
: AtomI(AtomI) {}
bool operator==(const IteratorTemplate &I) const {
return AtomI == I.AtomI;
}
bool operator!=(const IteratorTemplate &I) const { return !(*this == I); }
IteratorTemplate &operator++() {
++AtomI;
return *this;
}
const AtomT *operator*() { return dyn_cast<AtomT>(*AtomI); }
private:
AtomIterator AtomI;
};
JSONContainer(JSONParser *Parser, char StartChar, char EndChar,
JSONAtom::Kind ContainerKind)
: JSONValue(ContainerKind), Parser(Parser),
Position(), Current(0), Started(false),
StartChar(StartChar), EndChar(EndChar) {}
/// \brief Returns a lazy parsing iterator over the container.
///
/// As the iterator drives the parse stream, begin() must only be called
/// once per container.
AtomIterator atom_begin() const {
if (Started)
report_fatal_error("Cannot parse container twice.");
Started = true;
// Set up the position and current element when we begin iterating over the
// container.
Position = Parser->parseFirstElement(getKind(), StartChar, EndChar, Current);
return AtomIterator(this);
}
AtomIterator atom_end() const {
return AtomIterator();
}
private:
AtomIterator atom_current() const {
if (!Started)
return atom_begin();
return AtomIterator(this);
}
/// \brief Parse the next element in the container into the Current element.
///
/// This routine is called as an iterator into this container walks through
/// its elements. It mutates the container's internal current node to point to
/// the next atom of the container.
void parseNextElement() const {
Parser->skip(*Current);
Position = Parser->parseNextElement(getKind(), EndChar, Current);
}
// For parsing, JSONContainers call back into the JSONParser.
JSONParser * const Parser;
// 'Position', 'Current' and 'Started' store the state of the parse stream
// for iterators on the container, they don't change the container's elements
// and are thus marked as mutable.
mutable StringRef::iterator Position;
mutable const JSONAtom *Current;
mutable bool Started;
const char StartChar;
const char EndChar;
friend class JSONParser;
public:
/// \brief dyn_cast helpers
///@{
static bool classof(const JSONAtom *Atom) {
switch (Atom->getKind()) {
case JK_Array:
case JK_Object:
return true;
case JK_KeyValuePair:
case JK_String:
return false;
}
llvm_unreachable("Invalid JSONAtom kind");
}
static bool classof(const JSONContainer *Container) { return true; }
///@}
};
/// \brief A simple JSON array.
class JSONArray : public JSONContainer {
public:
typedef IteratorTemplate<JSONValue> const_iterator;
/// \brief Returns a lazy parsing iterator over the container.
///
/// As the iterator drives the parse stream, begin() must only be called
/// once per container.
const_iterator begin() const { return const_iterator(atom_begin()); }
const_iterator end() const { return const_iterator(atom_end()); }
private:
JSONArray(JSONParser *Parser)
: JSONContainer(Parser, '[', ']', JSONAtom::JK_Array) {}
public:
/// \brief dyn_cast helpers
///@{
static bool classof(const JSONAtom *Atom) {
return Atom->getKind() == JSONAtom::JK_Array;
}
static bool classof(const JSONArray *Array) { return true; }
///@}
friend class JSONParser;
};
/// \brief A JSON object: an iterable list of JSON key-value pairs.
class JSONObject : public JSONContainer {
public:
typedef IteratorTemplate<JSONKeyValuePair> const_iterator;
/// \brief Returns a lazy parsing iterator over the container.
///
/// As the iterator drives the parse stream, begin() must only be called
/// once per container.
const_iterator begin() const { return const_iterator(atom_begin()); }
const_iterator end() const { return const_iterator(atom_end()); }
private:
JSONObject(JSONParser *Parser)
: JSONContainer(Parser, '{', '}', JSONAtom::JK_Object) {}
public:
/// \brief dyn_cast helpers
///@{
static bool classof(const JSONAtom *Atom) {
return Atom->getKind() == JSONAtom::JK_Object;
}
static bool classof(const JSONObject *Object) { return true; }
///@}
friend class JSONParser;
};
} // end namespace llvm
#endif // LLVM_SUPPORT_JSON_PARSER_H

View File

@ -0,0 +1,17 @@
#ifndef LLVM_SUPPORT_LOCALE
#define LLVM_SUPPORT_LOCALE
#include "llvm/ADT/StringRef.h"
namespace llvm {
namespace sys {
namespace locale {
int columnWidth(StringRef s);
bool isPrint(int c);
}
}
}
#endif // LLVM_SUPPORT_LOCALE

View File

@ -0,0 +1,118 @@
//===---- llvm/Support/MDBuilder.h - Builder for LLVM metadata --*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file defines the MDBuilder class, which is used as a convenient way to
// create LLVM metadata with a consistent and simplified interface.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_SUPPORT_MDBUILDER_H
#define LLVM_SUPPORT_MDBUILDER_H
#include "llvm/Constants.h"
#include "llvm/DerivedTypes.h"
#include "llvm/LLVMContext.h"
#include "llvm/Metadata.h"
#include "llvm/ADT/APInt.h"
namespace llvm {
class MDBuilder {
LLVMContext &Context;
public:
MDBuilder(LLVMContext &context) : Context(context) {}
/// \brief Return the given string as metadata.
MDString *createString(StringRef Str) {
return MDString::get(Context, Str);
}
//===------------------------------------------------------------------===//
// FPMath metadata.
//===------------------------------------------------------------------===//
/// \brief Return metadata with the given settings. The special value 0.0
/// for the Accuracy parameter indicates the default (maximal precision)
/// setting.
MDNode *createFPMath(float Accuracy) {
if (Accuracy == 0.0)
return 0;
assert(Accuracy > 0.0 && "Invalid fpmath accuracy!");
Value *Op = ConstantFP::get(Type::getFloatTy(Context), Accuracy);
return MDNode::get(Context, Op);
}
//===------------------------------------------------------------------===//
// Range metadata.
//===------------------------------------------------------------------===//
/// \brief Return metadata describing the range [Lo, Hi).
MDNode *createRange(const APInt &Lo, const APInt &Hi) {
assert(Lo.getBitWidth() == Hi.getBitWidth() && "Mismatched bitwidths!");
// If the range is everything then it is useless.
if (Hi == Lo)
return 0;
// Return the range [Lo, Hi).
Type *Ty = IntegerType::get(Context, Lo.getBitWidth());
Value *Range[2] = { ConstantInt::get(Ty, Lo), ConstantInt::get(Ty, Hi) };
return MDNode::get(Context, Range);
}
//===------------------------------------------------------------------===//
// TBAA metadata.
//===------------------------------------------------------------------===//
/// \brief Return metadata appropriate for a TBAA root node. Each returned
/// node is distinct from all other metadata and will never be identified
/// (uniqued) with anything else.
MDNode *createAnonymousTBAARoot() {
// To ensure uniqueness the root node is self-referential.
MDNode *Dummy = MDNode::getTemporary(Context, ArrayRef<Value*>());
MDNode *Root = MDNode::get(Context, Dummy);
// At this point we have
// !0 = metadata !{} <- dummy
// !1 = metadata !{metadata !0} <- root
// Replace the dummy operand with the root node itself and delete the dummy.
Root->replaceOperandWith(0, Root);
MDNode::deleteTemporary(Dummy);
// We now have
// !1 = metadata !{metadata !1} <- self-referential root
return Root;
}
/// \brief Return metadata appropriate for a TBAA root node with the given
/// name. This may be identified (uniqued) with other roots with the same
/// name.
MDNode *createTBAARoot(StringRef Name) {
return MDNode::get(Context, createString(Name));
}
/// \brief Return metadata for a non-root TBAA node with the given name,
/// parent in the TBAA tree, and value for 'pointsToConstantMemory'.
MDNode *createTBAANode(StringRef Name, MDNode *Parent,
bool isConstant = false) {
if (isConstant) {
Constant *Flags = ConstantInt::get(Type::getInt64Ty(Context), 1);
Value *Ops[3] = { createString(Name), Parent, Flags };
return MDNode::get(Context, Ops);
} else {
Value *Ops[2] = { createString(Name), Parent };
return MDNode::get(Context, Ops);
}
}
};
} // end namespace llvm
#endif

View File

@ -136,6 +136,10 @@ namespace sys {
/// Same as OutputColor, but only enables the bold attribute.
static const char *OutputBold(bool bg);
/// This function returns the escape sequence to reverse forground and
/// background colors.
static const char *OutputReverse();
/// Resets the terminals colors, or returns an escape sequence to do so.
static const char *ResetColor();
/// @}

View File

@ -128,8 +128,11 @@ public:
/// PrintMessage - Emit a message about the specified location with the
/// specified string.
///
/// @param ShowColors - Display colored messages if output is a terminal and
/// the default error handler is used.
void PrintMessage(SMLoc Loc, DiagKind Kind, const Twine &Msg,
ArrayRef<SMRange> Ranges = ArrayRef<SMRange>()) const;
ArrayRef<SMRange> Ranges = ArrayRef<SMRange>(),
bool ShowColors = true) const;
/// GetMessage - Return an SMDiagnostic at the specified location with the
@ -188,7 +191,7 @@ public:
const std::vector<std::pair<unsigned, unsigned> > &getRanges() const {
return Ranges;
}
void print(const char *ProgName, raw_ostream &S) const;
void print(const char *ProgName, raw_ostream &S, bool ShowColors = true) const;
};
} // end llvm namespace

View File

@ -516,8 +516,11 @@ public:
document_iterator() : Doc(NullDoc) {}
document_iterator(OwningPtr<Document> &D) : Doc(D) {}
bool operator ==(const document_iterator &Other) {
return Doc == Other.Doc;
}
bool operator !=(const document_iterator &Other) {
return Doc != Other.Doc;
return !(*this == Other);
}
document_iterator operator ++() {

View File

@ -222,6 +222,9 @@ public:
/// outputting colored text, or before program exit.
virtual raw_ostream &resetColor() { return *this; }
/// Reverses the forground and background colors.
virtual raw_ostream &reverseColor() { return *this; }
/// This function determines if this stream is connected to a "tty" or
/// "console" window. That is, the output would be displayed to the user
/// rather than being put on a pipe or stored in a file.
@ -379,6 +382,8 @@ public:
bool bg=false);
virtual raw_ostream &resetColor();
virtual raw_ostream &reverseColor();
virtual bool is_displayed() const;
/// has_error - Return the value of the flag in this raw_fd_ostream indicating

View File

@ -29,6 +29,11 @@ public:
const std::string &getMessage() const { return Message; }
};
void PrintWarning(SMLoc WarningLoc, const Twine &Msg);
void PrintWarning(const char *Loc, const Twine &Msg);
void PrintWarning(const Twine &Msg);
void PrintWarning(const TGError &Warning);
void PrintError(SMLoc ErrorLoc, const Twine &Msg);
void PrintError(const char *Loc, const Twine &Msg);
void PrintError(const Twine &Msg);

View File

@ -454,7 +454,7 @@ public:
/// without adding quote markers. This primaruly affects
/// StringInits where we will not surround the string value with
/// quotes.
virtual std::string getAsUnquotedString() const { return getAsString(); }
virtual std::string getAsUnquotedString() const { return getAsString(); }
/// dump - Debugging method that may be called through a debugger, just
/// invokes print on stderr.
@ -1529,7 +1529,7 @@ struct MultiClass {
void dump() const;
MultiClass(const std::string &Name, SMLoc Loc, RecordKeeper &Records) :
MultiClass(const std::string &Name, SMLoc Loc, RecordKeeper &Records) :
Rec(Name, Loc, Records) {}
};

View File

@ -83,7 +83,7 @@ namespace llvm {
/// long double expm1l(long double x);
expm1l,
/// float expm1f(float x);
expl1f,
expm1f,
/// double fabs(double x);
fabs,
/// long double fabsl(long double x);
@ -159,8 +159,14 @@ namespace llvm {
rint,
/// float rintf(float x);
rintf,
/// long dobule rintl(long double x);
/// long double rintl(long double x);
rintl,
/// double round(double x);
round,
/// float roundf(float x);
roundf,
/// long double roundl(long double x);
roundl,
/// double sin(double x);
sin,
/// long double sinl(long double x);

View File

@ -205,7 +205,7 @@ struct TargetRegisterInfoDesc {
/// Each TargetRegisterClass has a per register weight, and weight
/// limit which must be less than the limits of its pressure sets.
struct RegClassWeight {
unsigned RegWeigt;
unsigned RegWeight;
unsigned WeightLimit;
};

View File

@ -110,7 +110,8 @@ bool isCriticalEdge(const TerminatorInst *TI, unsigned SuccNum,
///
BasicBlock *SplitCriticalEdge(TerminatorInst *TI, unsigned SuccNum,
Pass *P = 0, bool MergeIdenticalEdges = false,
bool DontDeleteUselessPHIs = false);
bool DontDeleteUselessPHIs = false,
bool SplitLandingPads = false);
inline BasicBlock *SplitCriticalEdge(BasicBlock *BB, succ_iterator SI,
Pass *P = 0) {

View File

@ -34,6 +34,9 @@ struct VectorizeConfig {
/// @brief Vectorize floating-point values.
bool VectorizeFloats;
/// @brief Vectorize pointer values.
bool VectorizePointers;
/// @brief Vectorize casting (conversion) operations.
bool VectorizeCasts;
@ -43,6 +46,12 @@ struct VectorizeConfig {
/// @brief Vectorize the fused-multiply-add intrinsic.
bool VectorizeFMA;
/// @brief Vectorize select instructions.
bool VectorizeSelect;
/// @brief Vectorize getelementptr instructions.
bool VectorizeGEP;
/// @brief Vectorize loads and stores.
bool VectorizeMemOps;

View File

@ -681,6 +681,7 @@ static Constant *SymbolicallyEvaluateGEP(ArrayRef<Constant *> Ops,
// This makes it easy to determine if the getelementptr is "inbounds".
// Also, this helps GlobalOpt do SROA on GlobalVariables.
Type *Ty = Ptr->getType();
assert(Ty->isPointerTy() && "Forming regular GEP of non-pointer type");
SmallVector<Constant*, 32> NewIdxs;
do {
if (SequentialType *ATy = dyn_cast<SequentialType>(Ty)) {
@ -711,10 +712,17 @@ static Constant *SymbolicallyEvaluateGEP(ArrayRef<Constant *> Ops,
}
Ty = ATy->getElementType();
} else if (StructType *STy = dyn_cast<StructType>(Ty)) {
// Determine which field of the struct the offset points into. The
// getZExtValue is at least as safe as the StructLayout API because we
// know the offset is within the struct at this point.
// If we end up with an offset that isn't valid for this struct type, we
// can't re-form this GEP in a regular form, so bail out. The pointer
// operand likely went through casts that are necessary to make the GEP
// sensible.
const StructLayout &SL = *TD->getStructLayout(STy);
if (Offset.uge(SL.getSizeInBytes()))
break;
// Determine which field of the struct the offset points into. The
// getZExtValue is fine as we've already ensured that the offset is
// within the range representable by the StructLayout API.
unsigned ElIdx = SL.getElementContainingOffset(Offset.getZExtValue());
NewIdxs.push_back(ConstantInt::get(Type::getInt32Ty(Ty->getContext()),
ElIdx));

View File

@ -3187,7 +3187,7 @@ const SCEV *ScalarEvolution::createNodeForGEP(GEPOperator *GEP) {
// Add the total offset from all the GEP indices to the base.
return getAddExpr(BaseS, TotalOffset,
isInBounds ? SCEV::FlagNUW : SCEV::FlagAnyWrap);
isInBounds ? SCEV::FlagNSW : SCEV::FlagAnyWrap);
}
/// GetMinTrailingZeros - Determine the minimum number of zero bits that S is

View File

@ -564,7 +564,7 @@ void llvm::ComputeMaskedBits(Value *V, APInt &KnownZero, APInt &KnownOne,
Depth+1);
// If it's known zero, our sign bit is also zero.
if (LHSKnownZero.isNegative())
KnownZero |= LHSKnownZero;
KnownZero.setBit(BitWidth - 1);
}
break;

View File

@ -36,35 +36,20 @@ const char *DwarfAccelTable::Atom::AtomTypeString(enum AtomType AT) {
llvm_unreachable("invalid AtomType!");
}
// The general case would need to have a less hard coded size for the
// length of the HeaderData, however, if we're constructing based on a
// single Atom then we know it will always be: 4 + 4 + 2 + 2.
DwarfAccelTable::DwarfAccelTable(DwarfAccelTable::Atom atom) :
Header(12),
HeaderData(atom) {
}
// The length of the header data is always going to be 4 + 4 + 4*NumAtoms.
DwarfAccelTable::DwarfAccelTable(std::vector<DwarfAccelTable::Atom> &atomList) :
DwarfAccelTable::DwarfAccelTable(ArrayRef<DwarfAccelTable::Atom> atomList) :
Header(8 + (atomList.size() * 4)),
HeaderData(atomList) {
}
HeaderData(atomList),
Entries(Allocator) { }
DwarfAccelTable::~DwarfAccelTable() {
for (size_t i = 0, e = Data.size(); i < e; ++i)
delete Data[i];
for (StringMap<DataArray>::iterator
EI = Entries.begin(), EE = Entries.end(); EI != EE; ++EI)
for (DataArray::iterator DI = EI->second.begin(),
DE = EI->second.end(); DI != DE; ++DI)
delete (*DI);
}
DwarfAccelTable::~DwarfAccelTable() { }
void DwarfAccelTable::AddName(StringRef Name, DIE* die, char Flags) {
assert(Data.empty() && "Already finalized!");
// If the string is in the list already then add this die to the list
// otherwise add a new one.
DataArray &DIEs = Entries[Name];
DIEs.push_back(new HashDataContents(die, Flags));
DIEs.push_back(new (Allocator) HashDataContents(die, Flags));
}
void DwarfAccelTable::ComputeBucketCount(void) {
@ -85,31 +70,23 @@ void DwarfAccelTable::ComputeBucketCount(void) {
Header.hashes_count = num;
}
namespace {
// DIESorter - comparison predicate that sorts DIEs by their offset.
struct DIESorter {
bool operator()(const struct DwarfAccelTable::HashDataContents *A,
const struct DwarfAccelTable::HashDataContents *B) const {
return A->Die->getOffset() < B->Die->getOffset();
}
};
// compareDIEs - comparison predicate that sorts DIEs by their offset.
static bool compareDIEs(const DwarfAccelTable::HashDataContents *A,
const DwarfAccelTable::HashDataContents *B) {
return A->Die->getOffset() < B->Die->getOffset();
}
void DwarfAccelTable::FinalizeTable(AsmPrinter *Asm, const char *Prefix) {
// Create the individual hash data outputs.
for (StringMap<DataArray>::iterator
EI = Entries.begin(), EE = Entries.end(); EI != EE; ++EI) {
struct HashData *Entry = new HashData((*EI).getKeyData());
// Unique the entries.
std::stable_sort(EI->second.begin(), EI->second.end(), DIESorter());
std::stable_sort(EI->second.begin(), EI->second.end(), compareDIEs);
EI->second.erase(std::unique(EI->second.begin(), EI->second.end()),
EI->second.end());
for (DataArray::const_iterator DI = EI->second.begin(),
DE = EI->second.end();
DI != DE; ++DI)
Entry->addData((*DI));
HashData *Entry = new (Allocator) HashData(EI->getKey(), EI->second);
Data.push_back(Entry);
}
@ -216,7 +193,7 @@ void DwarfAccelTable::EmitData(AsmPrinter *Asm, DwarfDebug *D) {
D->getStringPool());
Asm->OutStreamer.AddComment("Num DIEs");
Asm->EmitInt32((*HI)->Data.size());
for (std::vector<struct HashDataContents*>::const_iterator
for (ArrayRef<HashDataContents*>::const_iterator
DI = (*HI)->Data.begin(), DE = (*HI)->Data.end();
DI != DE; ++DI) {
// Emit the DIE offset

View File

@ -15,6 +15,7 @@
#define CODEGEN_ASMPRINTER_DWARFACCELTABLE_H__
#include "llvm/ADT/StringMap.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/MC/MCSymbol.h"
#include "llvm/Support/Dwarf.h"
#include "llvm/Support/DataTypes.h"
@ -164,22 +165,12 @@ public:
private:
struct TableHeaderData {
uint32_t die_offset_base;
std::vector<Atom> Atoms;
SmallVector<Atom, 1> Atoms;
TableHeaderData(ArrayRef<Atom> AtomList, uint32_t offset = 0)
: die_offset_base(offset), Atoms(AtomList.begin(), AtomList.end()) { }
TableHeaderData(std::vector<DwarfAccelTable::Atom> &AtomList,
uint32_t offset = 0) :
die_offset_base(offset) {
for (size_t i = 0, e = AtomList.size(); i != e; ++i)
Atoms.push_back(AtomList[i]);
}
TableHeaderData(DwarfAccelTable::Atom Atom, uint32_t offset = 0)
: die_offset_base(offset) {
Atoms.push_back(Atom);
}
#ifndef NDEBUG
void print (raw_ostream &O) {
O << "die_offset_base: " << die_offset_base << "\n";
@ -221,11 +212,11 @@ private:
StringRef Str;
uint32_t HashValue;
MCSymbol *Sym;
std::vector<struct HashDataContents*> Data; // offsets
HashData(StringRef S) : Str(S) {
ArrayRef<HashDataContents*> Data; // offsets
HashData(StringRef S, ArrayRef<HashDataContents*> Data)
: Str(S), Data(Data) {
HashValue = DwarfAccelTable::HashDJB(S);
}
void addData(struct HashDataContents *Datum) { Data.push_back(Datum); }
#ifndef NDEBUG
void print(raw_ostream &O) {
O << "Name: " << Str << "\n";
@ -255,15 +246,18 @@ private:
void EmitHashes(AsmPrinter *);
void EmitOffsets(AsmPrinter *, MCSymbol *);
void EmitData(AsmPrinter *, DwarfDebug *D);
// Allocator for HashData and HashDataContents.
BumpPtrAllocator Allocator;
// Output Variables
TableHeader Header;
TableHeaderData HeaderData;
std::vector<HashData*> Data;
// String Data
typedef std::vector<struct HashDataContents*> DataArray;
typedef StringMap<DataArray> StringEntries;
typedef std::vector<HashDataContents*> DataArray;
typedef StringMap<DataArray, BumpPtrAllocator&> StringEntries;
StringEntries Entries;
// Buckets/Hashes/Offsets
@ -274,8 +268,7 @@ private:
// Public Implementation
public:
DwarfAccelTable(DwarfAccelTable::Atom);
DwarfAccelTable(std::vector<DwarfAccelTable::Atom> &);
DwarfAccelTable(ArrayRef<DwarfAccelTable::Atom>);
~DwarfAccelTable();
void AddName(StringRef, DIE*, char = 0);
void FinalizeTable(AsmPrinter *, const char *);

View File

@ -1032,9 +1032,10 @@ DIE *CompileUnit::getOrCreateSubprogramDIE(DISubprogram SP) {
// Add function template parameters.
addTemplateParams(*SPDie, SP.getTemplateParams());
// Unfortunately this code needs to stay here to work around
// a bug in older gdbs that requires the linkage name to resolve
// multiple template functions.
// Unfortunately this code needs to stay here instead of below the
// AT_specification code in order to work around a bug in older
// gdbs that requires the linkage name to resolve multiple template
// functions.
StringRef LinkageName = SP.getLinkageName();
if (!LinkageName.empty())
addString(SPDie, dwarf::DW_AT_MIPS_linkage_name,

View File

@ -23,10 +23,10 @@
//
//===----------------------------------------------------------------------===//
#include "llvm/CodeGen/ScheduleDAGInstrs.h"
#include "llvm/CodeGen/DFAPacketizer.h"
#include "llvm/CodeGen/MachineInstr.h"
#include "llvm/CodeGen/MachineInstrBundle.h"
#include "llvm/CodeGen/ScheduleDAGInstrs.h"
#include "llvm/Target/TargetInstrInfo.h"
#include "llvm/MC/MCInstrItineraries.h"
using namespace llvm;
@ -100,17 +100,17 @@ void DFAPacketizer::reserveResources(llvm::MachineInstr *MI) {
reserveResources(&MID);
}
namespace llvm {
namespace {
// DefaultVLIWScheduler - This class extends ScheduleDAGInstrs and overrides
// Schedule method to build the dependence graph.
class DefaultVLIWScheduler : public ScheduleDAGInstrs {
public:
DefaultVLIWScheduler(MachineFunction &MF, MachineLoopInfo &MLI,
MachineDominatorTree &MDT, bool IsPostRA);
MachineDominatorTree &MDT, bool IsPostRA);
// Schedule - Actual scheduling work.
void schedule();
};
}
} // end anonymous namespace
DefaultVLIWScheduler::DefaultVLIWScheduler(
MachineFunction &MF, MachineLoopInfo &MLI, MachineDominatorTree &MDT,
@ -129,25 +129,49 @@ VLIWPacketizerList::VLIWPacketizerList(
bool IsPostRA) : TM(MF.getTarget()), MF(MF) {
TII = TM.getInstrInfo();
ResourceTracker = TII->CreateTargetScheduleState(&TM, 0);
VLIWScheduler = new DefaultVLIWScheduler(MF, MLI, MDT, IsPostRA);
SchedulerImpl = new DefaultVLIWScheduler(MF, MLI, MDT, IsPostRA);
}
// VLIWPacketizerList Dtor
VLIWPacketizerList::~VLIWPacketizerList() {
if (VLIWScheduler)
delete VLIWScheduler;
delete SchedulerImpl;
delete ResourceTracker;
}
if (ResourceTracker)
delete ResourceTracker;
// ignorePseudoInstruction - ignore pseudo instructions.
bool VLIWPacketizerList::ignorePseudoInstruction(MachineInstr *MI,
MachineBasicBlock *MBB) {
if (MI->isDebugValue())
return true;
if (TII->isSchedulingBoundary(MI, MBB, MF))
return true;
return false;
}
// isSoloInstruction - return true if instruction I must end previous
// packet.
bool VLIWPacketizerList::isSoloInstruction(MachineInstr *I) {
if (I->isInlineAsm())
return true;
return false;
}
// addToPacket - Add I to the current packet and reserve resource.
void VLIWPacketizerList::addToPacket(MachineInstr *MI) {
CurrentPacketMIs.push_back(MI);
ResourceTracker->reserveResources(MI);
}
// endPacket - End the current packet, bundle packet instructions and reset
// DFA state.
void VLIWPacketizerList::endPacket(MachineBasicBlock *MBB,
MachineInstr *MI) {
MachineInstr *I) {
if (CurrentPacketMIs.size() > 1) {
MachineInstr *MIFirst = CurrentPacketMIs.front();
finalizeBundle(*MBB, MIFirst, MI);
finalizeBundle(*MBB, MIFirst, I);
}
CurrentPacketMIs.clear();
ResourceTracker->clearResources();
@ -157,36 +181,31 @@ void VLIWPacketizerList::endPacket(MachineBasicBlock *MBB,
void VLIWPacketizerList::PacketizeMIs(MachineBasicBlock *MBB,
MachineBasicBlock::iterator BeginItr,
MachineBasicBlock::iterator EndItr) {
assert(VLIWScheduler && "VLIW Scheduler is not initialized!");
VLIWScheduler->enterRegion(MBB, BeginItr, EndItr, MBB->size());
VLIWScheduler->schedule();
VLIWScheduler->exitRegion();
assert(MBB->end() == EndItr && "Bad EndIndex");
// Generate MI -> SU map.
//std::map <MachineInstr*, SUnit*> MIToSUnit;
MIToSUnit.clear();
for (unsigned i = 0, e = VLIWScheduler->SUnits.size(); i != e; ++i) {
SUnit *SU = &VLIWScheduler->SUnits[i];
MIToSUnit[SU->getInstr()] = SU;
}
SchedulerImpl->enterRegion(MBB, BeginItr, EndItr, MBB->size());
// Build the DAG without reordering instructions.
SchedulerImpl->schedule();
// Remember scheduling units.
SUnits = SchedulerImpl->SUnits;
// The main packetizer loop.
for (; BeginItr != EndItr; ++BeginItr) {
MachineInstr *MI = BeginItr;
this->initPacketizerState();
// Ignore pseudo instructions.
if (ignorePseudoInstruction(MI, MBB))
continue;
// End the current packet if needed.
if (this->isSoloInstruction(MI)) {
if (isSoloInstruction(MI)) {
endPacket(MBB, MI);
continue;
}
// Ignore pseudo instructions.
if (this->ignorePseudoInstruction(MI, MBB))
continue;
SUnit *SUI = MIToSUnit[MI];
SUnit *SUI = SchedulerImpl->getSUnit(MI);
assert(SUI && "Missing SUnit Info!");
// Ask DFA if machine resource is available for MI.
@ -196,13 +215,13 @@ void VLIWPacketizerList::PacketizeMIs(MachineBasicBlock *MBB,
for (std::vector<MachineInstr*>::iterator VI = CurrentPacketMIs.begin(),
VE = CurrentPacketMIs.end(); VI != VE; ++VI) {
MachineInstr *MJ = *VI;
SUnit *SUJ = MIToSUnit[MJ];
SUnit *SUJ = SchedulerImpl->getSUnit(MJ);
assert(SUJ && "Missing SUnit Info!");
// Is it legal to packetize SUI and SUJ together.
if (!this->isLegalToPacketizeTogether(SUI, SUJ)) {
if (!isLegalToPacketizeTogether(SUI, SUJ)) {
// Allow packetization if dependency can be pruned.
if (!this->isLegalToPruneDependencies(SUI, SUJ)) {
if (!isLegalToPruneDependencies(SUI, SUJ)) {
// End the packet if dependency cannot be pruned.
endPacket(MBB, MI);
break;
@ -215,9 +234,11 @@ void VLIWPacketizerList::PacketizeMIs(MachineBasicBlock *MBB,
}
// Add MI to the current packet.
BeginItr = this->addToPacket(MI);
addToPacket(MI);
} // For all instructions in BB.
// End any packet left behind.
endPacket(MBB, EndItr);
SchedulerImpl->exitRegion();
}

View File

@ -1068,9 +1068,9 @@ public:
#ifndef NDEBUG
LIValidator validator;
std::for_each(Entering.begin(), Entering.end(), validator);
std::for_each(Internal.begin(), Internal.end(), validator);
std::for_each(Exiting.begin(), Exiting.end(), validator);
validator = std::for_each(Entering.begin(), Entering.end(), validator);
validator = std::for_each(Internal.begin(), Internal.end(), validator);
validator = std::for_each(Exiting.begin(), Exiting.end(), validator);
assert(validator.rangesOk() && "moveAllOperandsFrom broke liveness.");
#endif
@ -1115,9 +1115,9 @@ public:
#ifndef NDEBUG
LIValidator validator;
std::for_each(Entering.begin(), Entering.end(), validator);
std::for_each(Internal.begin(), Internal.end(), validator);
std::for_each(Exiting.begin(), Exiting.end(), validator);
validator = std::for_each(Entering.begin(), Entering.end(), validator);
validator = std::for_each(Internal.begin(), Internal.end(), validator);
validator = std::for_each(Exiting.begin(), Exiting.end(), validator);
assert(validator.rangesOk() && "moveAllOperandsInto broke liveness.");
#endif
}

View File

@ -392,22 +392,44 @@ void MachineBasicBlock::updateTerminator() {
TII->InsertBranch(*this, TBB, 0, Cond, dl);
}
} else {
// Walk through the successors and find the successor which is not
// a landing pad and is not the conditional branch destination (in TBB)
// as the fallthrough successor.
MachineBasicBlock *FallthroughBB = 0;
for (succ_iterator SI = succ_begin(), SE = succ_end(); SI != SE; ++SI) {
if ((*SI)->isLandingPad() || *SI == TBB)
continue;
assert(!FallthroughBB && "Found more than one fallthrough successor.");
FallthroughBB = *SI;
}
if (!FallthroughBB && canFallThrough()) {
// We fallthrough to the same basic block as the conditional jump
// targets. Remove the conditional jump, leaving unconditional
// fallthrough.
// FIXME: This does not seem like a reasonable pattern to support, but it
// has been seen in the wild coming out of degenerate ARM test cases.
TII->RemoveBranch(*this);
// Finally update the unconditional successor to be reached via a branch
// if it would not be reached by fallthrough.
if (!isLayoutSuccessor(TBB))
TII->InsertBranch(*this, TBB, 0, Cond, dl);
return;
}
// The block has a fallthrough conditional branch.
MachineBasicBlock *MBBA = *succ_begin();
MachineBasicBlock *MBBB = *llvm::next(succ_begin());
if (MBBA == TBB) std::swap(MBBB, MBBA);
if (isLayoutSuccessor(TBB)) {
if (TII->ReverseBranchCondition(Cond)) {
// We can't reverse the condition, add an unconditional branch.
Cond.clear();
TII->InsertBranch(*this, MBBA, 0, Cond, dl);
TII->InsertBranch(*this, FallthroughBB, 0, Cond, dl);
return;
}
TII->RemoveBranch(*this);
TII->InsertBranch(*this, MBBA, 0, Cond, dl);
} else if (!isLayoutSuccessor(MBBA)) {
TII->InsertBranch(*this, FallthroughBB, 0, Cond, dl);
} else if (!isLayoutSuccessor(FallthroughBB)) {
TII->RemoveBranch(*this);
TII->InsertBranch(*this, TBB, MBBA, Cond, dl);
TII->InsertBranch(*this, TBB, FallthroughBB, Cond, dl);
}
}
}

View File

@ -102,13 +102,13 @@ public:
}
/// \brief Iterator over blocks within the chain.
typedef SmallVectorImpl<MachineBasicBlock *>::const_iterator iterator;
typedef SmallVectorImpl<MachineBasicBlock *>::iterator iterator;
/// \brief Beginning of blocks within the chain.
iterator begin() const { return Blocks.begin(); }
iterator begin() { return Blocks.begin(); }
/// \brief End of blocks within the chain.
iterator end() const { return Blocks.end(); }
iterator end() { return Blocks.end(); }
/// \brief Merge a block chain into this one.
///
@ -211,12 +211,15 @@ class MachineBlockPlacement : public MachineFunctionPass {
void buildChain(MachineBasicBlock *BB, BlockChain &Chain,
SmallVectorImpl<MachineBasicBlock *> &BlockWorkList,
const BlockFilterSet *BlockFilter = 0);
MachineBasicBlock *findBestLoopTop(MachineFunction &F,
MachineLoop &L,
MachineBasicBlock *findBestLoopTop(MachineLoop &L,
const BlockFilterSet &LoopBlockSet);
MachineBasicBlock *findBestLoopExit(MachineFunction &F,
MachineLoop &L,
const BlockFilterSet &LoopBlockSet);
void buildLoopChains(MachineFunction &F, MachineLoop &L);
void rotateLoop(BlockChain &LoopChain, MachineBasicBlock *ExitingBB,
const BlockFilterSet &LoopBlockSet);
void buildCFGChains(MachineFunction &F);
void AlignLoops(MachineFunction &F);
public:
static char ID; // Pass identification, replacement for typeid
@ -540,13 +543,74 @@ void MachineBlockPlacement::buildChain(
/// \brief Find the best loop top block for layout.
///
/// Look for a block which is strictly better than the loop header for laying
/// out at the top of the loop. This looks for one and only one pattern:
/// a latch block with no conditional exit. This block will cause a conditional
/// jump around it or will be the bottom of the loop if we lay it out in place,
/// but if it it doesn't end up at the bottom of the loop for any reason,
/// rotation alone won't fix it. Because such a block will always result in an
/// unconditional jump (for the backedge) rotating it in front of the loop
/// header is always profitable.
MachineBasicBlock *
MachineBlockPlacement::findBestLoopTop(MachineLoop &L,
const BlockFilterSet &LoopBlockSet) {
// Check that the header hasn't been fused with a preheader block due to
// crazy branches. If it has, we need to start with the header at the top to
// prevent pulling the preheader into the loop body.
BlockChain &HeaderChain = *BlockToChain[L.getHeader()];
if (!LoopBlockSet.count(*HeaderChain.begin()))
return L.getHeader();
DEBUG(dbgs() << "Finding best loop top for: "
<< getBlockName(L.getHeader()) << "\n");
BlockFrequency BestPredFreq;
MachineBasicBlock *BestPred = 0;
for (MachineBasicBlock::pred_iterator PI = L.getHeader()->pred_begin(),
PE = L.getHeader()->pred_end();
PI != PE; ++PI) {
MachineBasicBlock *Pred = *PI;
if (!LoopBlockSet.count(Pred))
continue;
DEBUG(dbgs() << " header pred: " << getBlockName(Pred) << ", "
<< Pred->succ_size() << " successors, "
<< MBFI->getBlockFreq(Pred) << " freq\n");
if (Pred->succ_size() > 1)
continue;
BlockFrequency PredFreq = MBFI->getBlockFreq(Pred);
if (!BestPred || PredFreq > BestPredFreq ||
(!(PredFreq < BestPredFreq) &&
Pred->isLayoutSuccessor(L.getHeader()))) {
BestPred = Pred;
BestPredFreq = PredFreq;
}
}
// If no direct predecessor is fine, just use the loop header.
if (!BestPred)
return L.getHeader();
// Walk backwards through any straight line of predecessors.
while (BestPred->pred_size() == 1 &&
(*BestPred->pred_begin())->succ_size() == 1 &&
*BestPred->pred_begin() != L.getHeader())
BestPred = *BestPred->pred_begin();
DEBUG(dbgs() << " final top: " << getBlockName(BestPred) << "\n");
return BestPred;
}
/// \brief Find the best loop exiting block for layout.
///
/// This routine implements the logic to analyze the loop looking for the best
/// block to layout at the top of the loop. Typically this is done to maximize
/// fallthrough opportunities.
MachineBasicBlock *
MachineBlockPlacement::findBestLoopTop(MachineFunction &F,
MachineLoop &L,
const BlockFilterSet &LoopBlockSet) {
MachineBlockPlacement::findBestLoopExit(MachineFunction &F,
MachineLoop &L,
const BlockFilterSet &LoopBlockSet) {
// We don't want to layout the loop linearly in all cases. If the loop header
// is just a normal basic block in the loop, we want to look for what block
// within the loop is the best one to layout at the top. However, if the loop
@ -557,11 +621,11 @@ MachineBlockPlacement::findBestLoopTop(MachineFunction &F,
// header and only rotate if safe.
BlockChain &HeaderChain = *BlockToChain[L.getHeader()];
if (!LoopBlockSet.count(*HeaderChain.begin()))
return L.getHeader();
return 0;
BlockFrequency BestExitEdgeFreq;
unsigned BestExitLoopDepth = 0;
MachineBasicBlock *ExitingBB = 0;
MachineBasicBlock *LoopingBB = 0;
// If there are exits to outer loops, loop rotation can severely limit
// fallthrough opportunites unless it selects such an exit. Keep a set of
// blocks where rotating to exit with that block will reach an outer loop.
@ -584,15 +648,10 @@ MachineBlockPlacement::findBestLoopTop(MachineFunction &F,
// successor isn't found.
MachineBasicBlock *OldExitingBB = ExitingBB;
BlockFrequency OldBestExitEdgeFreq = BestExitEdgeFreq;
// We also compute and store the best looping successor for use in layout.
MachineBasicBlock *BestLoopSucc = 0;
bool HasLoopingSucc = false;
// FIXME: Due to the performance of the probability and weight routines in
// the MBPI analysis, we use the internal weights. This is only valid
// because it is purely a ranking function, we don't care about anything
// but the relative values.
uint32_t BestLoopSuccWeight = 0;
// FIXME: We also manually compute the probabilities to avoid quadratic
// behavior.
// the MBPI analysis, we use the internal weights and manually compute the
// probabilities to avoid quadratic behavior.
uint32_t WeightScale = 0;
uint32_t SumWeight = MBPI->getSumForBlock(*I, WeightScale);
for (MachineBasicBlock::succ_iterator SI = (*I)->succ_begin(),
@ -604,10 +663,8 @@ MachineBlockPlacement::findBestLoopTop(MachineFunction &F,
continue;
BlockChain &SuccChain = *BlockToChain[*SI];
// Don't split chains, either this chain or the successor's chain.
if (&Chain == &SuccChain || *SI != *SuccChain.begin()) {
DEBUG(dbgs() << " " << (LoopBlockSet.count(*SI) ? "looping: "
: "exiting: ")
<< getBlockName(*I) << " -> "
if (&Chain == &SuccChain) {
DEBUG(dbgs() << " exiting: " << getBlockName(*I) << " -> "
<< getBlockName(*SI) << " (chain conflict)\n");
continue;
}
@ -616,60 +673,103 @@ MachineBlockPlacement::findBestLoopTop(MachineFunction &F,
if (LoopBlockSet.count(*SI)) {
DEBUG(dbgs() << " looping: " << getBlockName(*I) << " -> "
<< getBlockName(*SI) << " (" << SuccWeight << ")\n");
if (BestLoopSucc && BestLoopSuccWeight >= SuccWeight)
continue;
BestLoopSucc = *SI;
BestLoopSuccWeight = SuccWeight;
HasLoopingSucc = true;
continue;
}
unsigned SuccLoopDepth = 0;
if (MachineLoop *ExitLoop = MLI->getLoopFor(*SI)) {
SuccLoopDepth = ExitLoop->getLoopDepth();
if (ExitLoop->contains(&L))
BlocksExitingToOuterLoop.insert(*I);
}
BranchProbability SuccProb(SuccWeight / WeightScale, SumWeight);
BlockFrequency ExitEdgeFreq = MBFI->getBlockFreq(*I) * SuccProb;
DEBUG(dbgs() << " exiting: " << getBlockName(*I) << " -> "
<< getBlockName(*SI) << " (" << ExitEdgeFreq << ")\n");
<< getBlockName(*SI) << " [L:" << SuccLoopDepth
<< "] (" << ExitEdgeFreq << ")\n");
// Note that we slightly bias this toward an existing layout successor to
// retain incoming order in the absence of better information.
// FIXME: Should we bias this more strongly? It's pretty weak.
if (!ExitingBB || ExitEdgeFreq > BestExitEdgeFreq ||
if (!ExitingBB || BestExitLoopDepth < SuccLoopDepth ||
ExitEdgeFreq > BestExitEdgeFreq ||
((*I)->isLayoutSuccessor(*SI) &&
!(ExitEdgeFreq < BestExitEdgeFreq))) {
BestExitEdgeFreq = ExitEdgeFreq;
ExitingBB = *I;
}
if (MachineLoop *ExitLoop = MLI->getLoopFor(*SI))
if (ExitLoop->contains(&L))
BlocksExitingToOuterLoop.insert(*I);
}
// Restore the old exiting state, no viable looping successor was found.
if (!BestLoopSucc) {
if (!HasLoopingSucc) {
ExitingBB = OldExitingBB;
BestExitEdgeFreq = OldBestExitEdgeFreq;
continue;
}
// If this was best exiting block thus far, also record the looping block.
if (ExitingBB == *I)
LoopingBB = BestLoopSucc;
}
// Without a candidate exitting block or with only a single block in the
// Without a candidate exiting block or with only a single block in the
// loop, just use the loop header to layout the loop.
if (!ExitingBB || L.getNumBlocks() == 1)
return L.getHeader();
return 0;
// Also, if we have exit blocks which lead to outer loops but didn't select
// one of them as the exiting block we are rotating toward, disable loop
// rotation altogether.
if (!BlocksExitingToOuterLoop.empty() &&
!BlocksExitingToOuterLoop.count(ExitingBB))
return L.getHeader();
return 0;
assert(LoopingBB && "All successors of a loop block are exit blocks!");
DEBUG(dbgs() << " Best exiting block: " << getBlockName(ExitingBB) << "\n");
DEBUG(dbgs() << " Best top block: " << getBlockName(LoopingBB) << "\n");
return LoopingBB;
return ExitingBB;
}
/// \brief Attempt to rotate an exiting block to the bottom of the loop.
///
/// Once we have built a chain, try to rotate it to line up the hot exit block
/// with fallthrough out of the loop if doing so doesn't introduce unnecessary
/// branches. For example, if the loop has fallthrough into its header and out
/// of its bottom already, don't rotate it.
void MachineBlockPlacement::rotateLoop(BlockChain &LoopChain,
MachineBasicBlock *ExitingBB,
const BlockFilterSet &LoopBlockSet) {
if (!ExitingBB)
return;
MachineBasicBlock *Top = *LoopChain.begin();
bool ViableTopFallthrough = false;
for (MachineBasicBlock::pred_iterator PI = Top->pred_begin(),
PE = Top->pred_end();
PI != PE; ++PI) {
BlockChain *PredChain = BlockToChain[*PI];
if (!LoopBlockSet.count(*PI) &&
(!PredChain || *PI == *llvm::prior(PredChain->end()))) {
ViableTopFallthrough = true;
break;
}
}
// If the header has viable fallthrough, check whether the current loop
// bottom is a viable exiting block. If so, bail out as rotating will
// introduce an unnecessary branch.
if (ViableTopFallthrough) {
MachineBasicBlock *Bottom = *llvm::prior(LoopChain.end());
for (MachineBasicBlock::succ_iterator SI = Bottom->succ_begin(),
SE = Bottom->succ_end();
SI != SE; ++SI) {
BlockChain *SuccChain = BlockToChain[*SI];
if (!LoopBlockSet.count(*SI) &&
(!SuccChain || *SI == *SuccChain->begin()))
return;
}
}
BlockChain::iterator ExitIt = std::find(LoopChain.begin(), LoopChain.end(),
ExitingBB);
if (ExitIt == LoopChain.end())
return;
std::rotate(LoopChain.begin(), llvm::next(ExitIt), LoopChain.end());
}
/// \brief Forms basic block chains from the natural loop structures.
@ -688,8 +788,20 @@ void MachineBlockPlacement::buildLoopChains(MachineFunction &F,
SmallVector<MachineBasicBlock *, 16> BlockWorkList;
BlockFilterSet LoopBlockSet(L.block_begin(), L.block_end());
MachineBasicBlock *LayoutTop = findBestLoopTop(F, L, LoopBlockSet);
BlockChain &LoopChain = *BlockToChain[LayoutTop];
// First check to see if there is an obviously preferable top block for the
// loop. This will default to the header, but may end up as one of the
// predecessors to the header if there is one which will result in strictly
// fewer branches in the loop body.
MachineBasicBlock *LoopTop = findBestLoopTop(L, LoopBlockSet);
// If we selected just the header for the loop top, look for a potentially
// profitable exit block in the event that rotating the loop can eliminate
// branches by placing an exit edge at the bottom.
MachineBasicBlock *ExitingBB = 0;
if (LoopTop == L.getHeader())
ExitingBB = findBestLoopExit(F, L, LoopBlockSet);
BlockChain &LoopChain = *BlockToChain[LoopTop];
// FIXME: This is a really lame way of walking the chains in the loop: we
// walk the blocks, and use a set to prevent visiting a particular chain
@ -721,7 +833,8 @@ void MachineBlockPlacement::buildLoopChains(MachineFunction &F,
BlockWorkList.push_back(*Chain.begin());
}
buildChain(LayoutTop, LoopChain, BlockWorkList, &LoopBlockSet);
buildChain(LoopTop, LoopChain, BlockWorkList, &LoopBlockSet);
rotateLoop(LoopChain, ExitingBB, LoopBlockSet);
DEBUG({
// Crash at the end so we get all of the debugging output first.
@ -733,7 +846,8 @@ void MachineBlockPlacement::buildLoopChains(MachineFunction &F,
<< " Chain header: " << getBlockName(*LoopChain.begin()) << "\n";
}
for (BlockChain::iterator BCI = LoopChain.begin(), BCE = LoopChain.end();
BCI != BCE; ++BCI)
BCI != BCE; ++BCI) {
dbgs() << " ... " << getBlockName(*BCI) << "\n";
if (!LoopBlockSet.erase(*BCI)) {
// We don't mark the loop as bad here because there are real situations
// where this can occur. For example, with an unanalyzable fallthrough
@ -743,6 +857,7 @@ void MachineBlockPlacement::buildLoopChains(MachineFunction &F,
<< " Chain header: " << getBlockName(*LoopChain.begin()) << "\n"
<< " Bad block: " << getBlockName(*BCI) << "\n";
}
}
if (!LoopBlockSet.empty()) {
BadLoop = true;
@ -882,28 +997,33 @@ void MachineBlockPlacement::buildCFGChains(MachineFunction &F) {
MachineBasicBlock *TBB = 0, *FBB = 0; // For AnalyzeBranch.
if (!TII->AnalyzeBranch(F.back(), TBB, FBB, Cond))
F.back().updateTerminator();
}
/// \brief Recursive helper to align a loop and any nested loops.
static void AlignLoop(MachineFunction &F, MachineLoop *L, unsigned Align) {
// Recurse through nested loops.
for (MachineLoop::iterator I = L->begin(), E = L->end(); I != E; ++I)
AlignLoop(F, *I, Align);
L->getTopBlock()->setAlignment(Align);
}
/// \brief Align loop headers to target preferred alignments.
void MachineBlockPlacement::AlignLoops(MachineFunction &F) {
// Walk through the backedges of the function now that we have fully laid out
// the basic blocks and align the destination of each backedge. We don't rely
// on the loop info here so that we can align backedges in unnatural CFGs and
// backedges that were introduced purely because of the loop rotations done
// during this layout pass.
// FIXME: This isn't quite right, we shouldn't align backedges that result
// from blocks being sunken below the exit block for the function.
if (F.getFunction()->hasFnAttr(Attribute::OptimizeForSize))
return;
unsigned Align = TLI->getPrefLoopAlignment();
if (!Align)
return; // Don't care about loop alignment.
for (MachineLoopInfo::iterator I = MLI->begin(), E = MLI->end(); I != E; ++I)
AlignLoop(F, *I, Align);
SmallPtrSet<MachineBasicBlock *, 16> PreviousBlocks;
for (BlockChain::iterator BI = FunctionChain.begin(),
BE = FunctionChain.end();
BI != BE; ++BI) {
PreviousBlocks.insert(*BI);
// Set alignment on the destination of all the back edges in the new
// ordering.
for (MachineBasicBlock::succ_iterator SI = (*BI)->succ_begin(),
SE = (*BI)->succ_end();
SI != SE; ++SI)
if (PreviousBlocks.count(*SI))
(*SI)->setAlignment(Align);
}
}
bool MachineBlockPlacement::runOnMachineFunction(MachineFunction &F) {
@ -919,7 +1039,6 @@ bool MachineBlockPlacement::runOnMachineFunction(MachineFunction &F) {
assert(BlockToChain.empty());
buildCFGChains(F);
AlignLoops(F);
BlockToChain.clear();
ChainAllocator.DestroyAll();

View File

@ -37,8 +37,9 @@ static cl::opt<bool> DisableTailDuplicate("disable-tail-duplicate", cl::Hidden,
cl::desc("Disable tail duplication"));
static cl::opt<bool> DisableEarlyTailDup("disable-early-taildup", cl::Hidden,
cl::desc("Disable pre-register allocation tail duplication"));
static cl::opt<bool> EnableBlockPlacement("enable-block-placement",
cl::Hidden, cl::desc("Enable probability-driven block placement"));
static cl::opt<bool> DisableBlockPlacement("disable-block-placement",
cl::Hidden, cl::desc("Disable the probability-driven block placement, and "
"re-enable the old code placement pass"));
static cl::opt<bool> EnableBlockPlacementStats("enable-block-placement-stats",
cl::Hidden, cl::desc("Collect probability-driven block placement stats"));
static cl::opt<bool> DisableCodePlace("disable-code-place", cl::Hidden,
@ -206,7 +207,7 @@ TargetPassConfig::~TargetPassConfig() {
// Out of line constructor provides default values for pass options and
// registers all common codegen passes.
TargetPassConfig::TargetPassConfig(TargetMachine *tm, PassManagerBase &pm)
: ImmutablePass(ID), TM(tm), PM(pm), Impl(0), Initialized(false),
: ImmutablePass(ID), TM(tm), PM(&pm), Impl(0), Initialized(false),
DisableVerify(false),
EnableTailMerge(true) {
@ -233,7 +234,7 @@ TargetPassConfig *LLVMTargetMachine::createPassConfig(PassManagerBase &PM) {
}
TargetPassConfig::TargetPassConfig()
: ImmutablePass(ID), PM(*(PassManagerBase*)0) {
: ImmutablePass(ID), PM(0) {
llvm_unreachable("TargetPassConfig should not be constructed on-the-fly");
}
@ -268,16 +269,16 @@ AnalysisID TargetPassConfig::addPass(char &ID) {
Pass *P = Pass::createPass(FinalID);
if (!P)
llvm_unreachable("Pass ID not registered");
PM.add(P);
PM->add(P);
return FinalID;
}
void TargetPassConfig::printAndVerify(const char *Banner) const {
if (TM->shouldPrintMachineCode())
PM.add(createMachineFunctionPrinterPass(dbgs(), Banner));
PM->add(createMachineFunctionPrinterPass(dbgs(), Banner));
if (VerifyMachineCode)
PM.add(createMachineVerifierPass(Banner));
PM->add(createMachineVerifierPass(Banner));
}
/// Add common target configurable passes that perform LLVM IR to IR transforms
@ -287,46 +288,46 @@ void TargetPassConfig::addIRPasses() {
// Add TypeBasedAliasAnalysis before BasicAliasAnalysis so that
// BasicAliasAnalysis wins if they disagree. This is intended to help
// support "obvious" type-punning idioms.
PM.add(createTypeBasedAliasAnalysisPass());
PM.add(createBasicAliasAnalysisPass());
PM->add(createTypeBasedAliasAnalysisPass());
PM->add(createBasicAliasAnalysisPass());
// Before running any passes, run the verifier to determine if the input
// coming from the front-end and/or optimizer is valid.
if (!DisableVerify)
PM.add(createVerifierPass());
PM->add(createVerifierPass());
// Run loop strength reduction before anything else.
if (getOptLevel() != CodeGenOpt::None && !DisableLSR) {
PM.add(createLoopStrengthReducePass(getTargetLowering()));
PM->add(createLoopStrengthReducePass(getTargetLowering()));
if (PrintLSR)
PM.add(createPrintFunctionPass("\n\n*** Code after LSR ***\n", &dbgs()));
PM->add(createPrintFunctionPass("\n\n*** Code after LSR ***\n", &dbgs()));
}
PM.add(createGCLoweringPass());
PM->add(createGCLoweringPass());
// Make sure that no unreachable blocks are instruction selected.
PM.add(createUnreachableBlockEliminationPass());
PM->add(createUnreachableBlockEliminationPass());
}
/// Add common passes that perform LLVM IR to IR transforms in preparation for
/// instruction selection.
void TargetPassConfig::addISelPrepare() {
if (getOptLevel() != CodeGenOpt::None && !DisableCGP)
PM.add(createCodeGenPreparePass(getTargetLowering()));
PM->add(createCodeGenPreparePass(getTargetLowering()));
PM.add(createStackProtectorPass(getTargetLowering()));
PM->add(createStackProtectorPass(getTargetLowering()));
addPreISel();
if (PrintISelInput)
PM.add(createPrintFunctionPass("\n\n"
"*** Final LLVM Code input to ISel ***\n",
&dbgs()));
PM->add(createPrintFunctionPass("\n\n"
"*** Final LLVM Code input to ISel ***\n",
&dbgs()));
// All passes which modify the LLVM IR are now complete; run the verifier
// to ensure that the IR is valid.
if (!DisableVerify)
PM.add(createVerifierPass());
PM->add(createVerifierPass());
}
/// Add the complete set of target-independent postISel code generator passes.
@ -404,7 +405,7 @@ void TargetPassConfig::addMachinePasses() {
// GC
addPass(GCMachineCodeAnalysisID);
if (PrintGCInfo)
PM.add(createGCInfoPrinter(dbgs()));
PM->add(createGCInfoPrinter(dbgs()));
// Basic block placement.
if (getOptLevel() != CodeGenOpt::None)
@ -521,7 +522,7 @@ void TargetPassConfig::addFastRegAlloc(FunctionPass *RegAllocPass) {
addPass(PHIEliminationID);
addPass(TwoAddressInstructionPassID);
PM.add(RegAllocPass);
PM->add(RegAllocPass);
printAndVerify("After Register Allocation");
}
@ -563,7 +564,7 @@ void TargetPassConfig::addOptimizedRegAlloc(FunctionPass *RegAllocPass) {
printAndVerify("After Machine Scheduling");
// Add the selected register allocation pass.
PM.add(RegAllocPass);
PM->add(RegAllocPass);
printAndVerify("After Register Allocation");
// FinalizeRegAlloc is convenient until MachineInstrBundles is more mature,
@ -610,10 +611,10 @@ void TargetPassConfig::addMachineLateOptimization() {
/// Add standard basic block placement passes.
void TargetPassConfig::addBlockPlacement() {
AnalysisID ID = &NoPassID;
if (EnableBlockPlacement) {
// MachineBlockPlacement is an experimental pass which is disabled by
// default currently. Eventually it should subsume CodePlacementOpt, so
// when enabled, the other is disabled.
if (!DisableBlockPlacement) {
// MachineBlockPlacement is a new pass which subsumes the functionality of
// CodPlacementOpt. The old code placement pass can be restored by
// disabling block placement, but eventually it will be removed.
ID = addPass(MachineBlockPlacementID);
} else {
ID = addPass(CodePlacementOptID);

View File

@ -39,8 +39,8 @@ ScheduleDAGInstrs::ScheduleDAGInstrs(MachineFunction &mf,
LiveIntervals *lis)
: ScheduleDAG(mf), MLI(mli), MDT(mdt), MFI(mf.getFrameInfo()),
InstrItins(mf.getTarget().getInstrItineraryData()), LIS(lis),
IsPostRA(IsPostRAFlag), UnitLatencies(false), LoopRegs(MLI, MDT),
FirstDbgValue(0) {
IsPostRA(IsPostRAFlag), UnitLatencies(false), CanHandleTerminators(false),
LoopRegs(MLI, MDT), FirstDbgValue(0) {
assert((IsPostRA || LIS) && "PreRA scheduling requires LiveIntervals");
DbgValues.clear();
assert(!(IsPostRA && MRI.getNumVirtRegs()) &&
@ -554,7 +554,7 @@ void ScheduleDAGInstrs::buildSchedGraph(AliasAnalysis *AA) {
continue;
}
assert(!MI->isTerminator() && !MI->isLabel() &&
assert((!MI->isTerminator() || CanHandleTerminators) && !MI->isLabel() &&
"Cannot schedule terminators or labels!");
SUnit *SU = MISUnitMap[MI];

View File

@ -1080,6 +1080,7 @@ void DAGCombiner::Run(CombineLevel AtLevel) {
// If the root changed (e.g. it was a dead load, update the root).
DAG.setRoot(Dummy.getValue());
DAG.RemoveDeadNodes();
}
SDValue DAGCombiner::visit(SDNode *N) {

View File

@ -417,7 +417,8 @@ SDValue VectorLegalizer::ExpandVSELECT(SDValue Op) {
Op1 = DAG.getNode(ISD::AND, DL, VT, Op1, Mask);
Op2 = DAG.getNode(ISD::AND, DL, VT, Op2, NotMask);
return DAG.getNode(ISD::OR, DL, VT, Op1, Op2);
SDValue Val = DAG.getNode(ISD::OR, DL, VT, Op1, Op2);
return DAG.getNode(ISD::BITCAST, DL, Op.getValueType(), Val);
}
SDValue VectorLegalizer::ExpandUINT_TO_FLOAT(SDValue Op) {

View File

@ -138,9 +138,11 @@ static void AddGlue(SDNode *N, SDValue Glue, bool AddGlue, SelectionDAG *DAG) {
// Don't add glue from a node to itself.
if (GlueDestNode == N) return;
// Don't add glue to something which already has glue.
if (N->getValueType(N->getNumValues() - 1) == MVT::Glue) return;
// Don't add glue to something that already has it, either as a use or value.
if (N->getOperand(N->getNumOperands()-1).getValueType() == MVT::Glue ||
N->getValueType(N->getNumValues() - 1) == MVT::Glue) {
return;
}
for (unsigned I = 0, E = N->getNumValues(); I != E; ++I)
VTs.push_back(N->getValueType(I));

View File

@ -5050,7 +5050,7 @@ SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I, unsigned Intrinsic) {
}
case Intrinsic::gcroot:
if (GFI) {
const Value *Alloca = I.getArgOperand(0);
const Value *Alloca = I.getArgOperand(0)->stripPointerCasts();
const Constant *TypeMap = cast<Constant>(I.getArgOperand(1));
FrameIndexSDNode *FI = cast<FrameIndexSDNode>(getValue(Alloca).getNode());

View File

@ -1367,8 +1367,9 @@ bool TargetLowering::SimplifyDemandedBits(SDValue Op,
// bits on that side are also known to be set on the other side, turn this
// into an AND, as we know the bits will be cleared.
// e.g. (X | C1) ^ C2 --> (X | C1) & ~C2 iff (C1&C2) == C2
if ((NewMask & (KnownZero|KnownOne)) == NewMask) { // all known
if ((KnownOne & KnownOne2) == KnownOne) {
// NB: it is okay if more bits are known than are requested
if ((NewMask & (KnownZero|KnownOne)) == NewMask) { // all known on one side
if (KnownOne == KnownOne2) { // set bits are the same on both sides
EVT VT = Op.getValueType();
SDValue ANDC = TLO.DAG.getConstant(~KnownOne & NewMask, VT);
return TLO.CombineTo(Op, TLO.DAG.getNode(ISD::AND, dl, VT,

View File

@ -34,7 +34,8 @@ void SlotIndexes::releaseMemory() {
mi2iMap.clear();
MBBRanges.clear();
idx2MBBMap.clear();
clearList();
indexList.clear();
ileAllocator.Reset();
}
bool SlotIndexes::runOnMachineFunction(MachineFunction &fn) {
@ -45,17 +46,15 @@ bool SlotIndexes::runOnMachineFunction(MachineFunction &fn) {
// iterator in lock-step (though skipping it over indexes which have
// null pointers in the instruction field).
// At each iteration assert that the instruction pointed to in the index
// is the same one pointed to by the MI iterator. This
// is the same one pointed to by the MI iterator. This
// FIXME: This can be simplified. The mi2iMap_, Idx2MBBMap, etc. should
// only need to be set up once after the first numbering is computed.
mf = &fn;
initList();
// Check that the list contains only the sentinal.
assert(indexListHead->getNext() == 0 &&
"Index list non-empty at initial numbering?");
assert(indexList.empty() && "Index list non-empty at initial numbering?");
assert(idx2MBBMap.empty() &&
"Index -> MBB mapping non-empty at initial numbering?");
assert(MBBRanges.empty() &&
@ -68,7 +67,7 @@ bool SlotIndexes::runOnMachineFunction(MachineFunction &fn) {
MBBRanges.resize(mf->getNumBlockIDs());
idx2MBBMap.reserve(mf->size());
push_back(createEntry(0, index));
indexList.push_back(createEntry(0, index));
// Iterate over the function.
for (MachineFunction::iterator mbbItr = mf->begin(), mbbEnd = mf->end();
@ -76,7 +75,7 @@ bool SlotIndexes::runOnMachineFunction(MachineFunction &fn) {
MachineBasicBlock *mbb = &*mbbItr;
// Insert an index for the MBB start.
SlotIndex blockStartIndex(back(), SlotIndex::Slot_Block);
SlotIndex blockStartIndex(&indexList.back(), SlotIndex::Slot_Block);
for (MachineBasicBlock::iterator miItr = mbb->begin(), miEnd = mbb->end();
miItr != miEnd; ++miItr) {
@ -85,20 +84,20 @@ bool SlotIndexes::runOnMachineFunction(MachineFunction &fn) {
continue;
// Insert a store index for the instr.
push_back(createEntry(mi, index += SlotIndex::InstrDist));
indexList.push_back(createEntry(mi, index += SlotIndex::InstrDist));
// Save this base index in the maps.
mi2iMap.insert(std::make_pair(mi, SlotIndex(back(),
mi2iMap.insert(std::make_pair(mi, SlotIndex(&indexList.back(),
SlotIndex::Slot_Block)));
++functionSize;
}
// We insert one blank instructions between basic blocks.
push_back(createEntry(0, index += SlotIndex::InstrDist));
indexList.push_back(createEntry(0, index += SlotIndex::InstrDist));
MBBRanges[mbb->getNumber()].first = blockStartIndex;
MBBRanges[mbb->getNumber()].second = SlotIndex(back(),
MBBRanges[mbb->getNumber()].second = SlotIndex(&indexList.back(),
SlotIndex::Slot_Block);
idx2MBBMap.push_back(IdxMBBPair(blockStartIndex, mbb));
}
@ -119,38 +118,37 @@ void SlotIndexes::renumberIndexes() {
unsigned index = 0;
for (IndexListEntry *curEntry = front(); curEntry != getTail();
curEntry = curEntry->getNext()) {
curEntry->setIndex(index);
for (IndexList::iterator I = indexList.begin(), E = indexList.end();
I != E; ++I) {
I->setIndex(index);
index += SlotIndex::InstrDist;
}
}
// Renumber indexes locally after curEntry was inserted, but failed to get a new
// Renumber indexes locally after curItr was inserted, but failed to get a new
// index.
void SlotIndexes::renumberIndexes(IndexListEntry *curEntry) {
void SlotIndexes::renumberIndexes(IndexList::iterator curItr) {
// Number indexes with half the default spacing so we can catch up quickly.
const unsigned Space = SlotIndex::InstrDist/2;
assert((Space & 3) == 0 && "InstrDist must be a multiple of 2*NUM");
IndexListEntry *start = curEntry->getPrev();
unsigned index = start->getIndex();
IndexListEntry *tail = getTail();
IndexList::iterator startItr = prior(curItr);
unsigned index = startItr->getIndex();
do {
curEntry->setIndex(index += Space);
curEntry = curEntry->getNext();
curItr->setIndex(index += Space);
++curItr;
// If the next index is bigger, we have caught up.
} while (curEntry != tail && curEntry->getIndex() <= index);
} while (curItr != indexList.end() && curItr->getIndex() <= index);
DEBUG(dbgs() << "\n*** Renumbered SlotIndexes " << start->getIndex() << '-'
DEBUG(dbgs() << "\n*** Renumbered SlotIndexes " << startItr->getIndex() << '-'
<< index << " ***\n");
++NumLocalRenum;
}
void SlotIndexes::dump() const {
for (const IndexListEntry *itr = front(); itr != getTail();
itr = itr->getNext()) {
for (IndexList::const_iterator itr = indexList.begin();
itr != indexList.end(); ++itr) {
dbgs() << itr->getIndex() << " ";
if (itr->getInstr() != 0) {
@ -168,7 +166,7 @@ void SlotIndexes::dump() const {
// Print a SlotIndex to a raw_ostream.
void SlotIndex::print(raw_ostream &os) const {
if (isValid())
os << entry().getIndex() << "Berd"[getSlot()];
os << listEntry()->getIndex() << "Berd"[getSlot()];
else
os << "invalid";
}

View File

@ -0,0 +1,214 @@
//===-- GDBRegistrar.cpp - Registers objects with GDB ---------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
#include "JITRegistrar.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/Support/MutexGuard.h"
#include "llvm/Support/Mutex.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/Compiler.h"
using namespace llvm;
// This must be kept in sync with gdb/gdb/jit.h .
extern "C" {
typedef enum {
JIT_NOACTION = 0,
JIT_REGISTER_FN,
JIT_UNREGISTER_FN
} jit_actions_t;
struct jit_code_entry {
struct jit_code_entry *next_entry;
struct jit_code_entry *prev_entry;
const char *symfile_addr;
uint64_t symfile_size;
};
struct jit_descriptor {
uint32_t version;
// This should be jit_actions_t, but we want to be specific about the
// bit-width.
uint32_t action_flag;
struct jit_code_entry *relevant_entry;
struct jit_code_entry *first_entry;
};
// We put information about the JITed function in this global, which the
// debugger reads. Make sure to specify the version statically, because the
// debugger checks the version before we can set it during runtime.
static struct jit_descriptor __jit_debug_descriptor = { 1, 0, 0, 0 };
// Debuggers puts a breakpoint in this function.
LLVM_ATTRIBUTE_NOINLINE void __jit_debug_register_code() { }
}
namespace {
// Buffer for an in-memory object file in executable memory
typedef llvm::DenseMap< const char*,
std::pair<std::size_t, jit_code_entry*> >
RegisteredObjectBufferMap;
/// Global access point for the JIT debugging interface designed for use with a
/// singleton toolbox. Handles thread-safe registration and deregistration of
/// object files that are in executable memory managed by the client of this
/// class.
class GDBJITRegistrar : public JITRegistrar {
/// A map of in-memory object files that have been registered with the
/// JIT interface.
RegisteredObjectBufferMap ObjectBufferMap;
public:
/// Instantiates the JIT service.
GDBJITRegistrar() : ObjectBufferMap() {}
/// Unregisters each object that was previously registered and releases all
/// internal resources.
virtual ~GDBJITRegistrar();
/// Creates an entry in the JIT registry for the buffer @p Object,
/// which must contain an object file in executable memory with any
/// debug information for the debugger.
void registerObject(const MemoryBuffer &Object);
/// Removes the internal registration of @p Object, and
/// frees associated resources.
/// Returns true if @p Object was found in ObjectBufferMap.
bool deregisterObject(const MemoryBuffer &Object);
private:
/// Deregister the debug info for the given object file from the debugger
/// and delete any temporary copies. This private method does not remove
/// the function from Map so that it can be called while iterating over Map.
void deregisterObjectInternal(RegisteredObjectBufferMap::iterator I);
};
/// Lock used to serialize all jit registration events, since they
/// modify global variables.
llvm::sys::Mutex JITDebugLock;
/// Acquire the lock and do the registration.
void NotifyDebugger(jit_code_entry* JITCodeEntry) {
llvm::MutexGuard locked(JITDebugLock);
__jit_debug_descriptor.action_flag = JIT_REGISTER_FN;
// Insert this entry at the head of the list.
JITCodeEntry->prev_entry = NULL;
jit_code_entry* NextEntry = __jit_debug_descriptor.first_entry;
JITCodeEntry->next_entry = NextEntry;
if (NextEntry != NULL) {
NextEntry->prev_entry = JITCodeEntry;
}
__jit_debug_descriptor.first_entry = JITCodeEntry;
__jit_debug_descriptor.relevant_entry = JITCodeEntry;
__jit_debug_register_code();
}
GDBJITRegistrar::~GDBJITRegistrar() {
// Free all registered object files.
for (RegisteredObjectBufferMap::iterator I = ObjectBufferMap.begin(), E = ObjectBufferMap.end();
I != E; ++I) {
// Call the private method that doesn't update the map so our iterator
// doesn't break.
deregisterObjectInternal(I);
}
ObjectBufferMap.clear();
}
void GDBJITRegistrar::registerObject(const MemoryBuffer &Object) {
const char *Buffer = Object.getBufferStart();
size_t Size = Object.getBufferSize();
assert(Buffer && "Attempt to register a null object with a debugger.");
assert(ObjectBufferMap.find(Buffer) == ObjectBufferMap.end() &&
"Second attempt to perform debug registration.");
jit_code_entry* JITCodeEntry = new jit_code_entry();
if (JITCodeEntry == 0) {
llvm::report_fatal_error(
"Allocation failed when registering a JIT entry!\n");
}
else {
JITCodeEntry->symfile_addr = Buffer;
JITCodeEntry->symfile_size = Size;
ObjectBufferMap[Buffer] = std::make_pair(Size, JITCodeEntry);
NotifyDebugger(JITCodeEntry);
}
}
bool GDBJITRegistrar::deregisterObject(const MemoryBuffer& Object) {
const char *Buffer = Object.getBufferStart();
RegisteredObjectBufferMap::iterator I = ObjectBufferMap.find(Buffer);
if (I != ObjectBufferMap.end()) {
deregisterObjectInternal(I);
ObjectBufferMap.erase(I);
return true;
}
return false;
}
void GDBJITRegistrar::deregisterObjectInternal(
RegisteredObjectBufferMap::iterator I) {
jit_code_entry*& JITCodeEntry = I->second.second;
// Acquire the lock and do the unregistration.
{
llvm::MutexGuard locked(JITDebugLock);
__jit_debug_descriptor.action_flag = JIT_UNREGISTER_FN;
// Remove the jit_code_entry from the linked list.
jit_code_entry* PrevEntry = JITCodeEntry->prev_entry;
jit_code_entry* NextEntry = JITCodeEntry->next_entry;
if (NextEntry) {
NextEntry->prev_entry = PrevEntry;
}
if (PrevEntry) {
PrevEntry->next_entry = NextEntry;
}
else {
assert(__jit_debug_descriptor.first_entry == JITCodeEntry);
__jit_debug_descriptor.first_entry = NextEntry;
}
// Tell the debugger which entry we removed, and unregister the code.
__jit_debug_descriptor.relevant_entry = JITCodeEntry;
__jit_debug_register_code();
}
delete JITCodeEntry;
JITCodeEntry = NULL;
}
} // end namespace
namespace llvm {
JITRegistrar& JITRegistrar::getGDBRegistrar() {
static GDBJITRegistrar* sRegistrar = NULL;
if (sRegistrar == NULL) {
// The mutex is here so that it won't slow down access once the registrar
// is instantiated
llvm::MutexGuard locked(JITDebugLock);
// Check again to be sure another thread didn't create this while we waited
if (sRegistrar == NULL) {
sRegistrar = new GDBJITRegistrar;
}
}
return *sRegistrar;
}
} // namespace llvm

View File

@ -0,0 +1,43 @@
//===-- JITRegistrar.h - Registers objects with a debugger ----------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_EXECUTION_ENGINE_JIT_REGISTRAR_H
#define LLVM_EXECUTION_ENGINE_JIT_REGISTRAR_H
#include "llvm/Support/MemoryBuffer.h"
namespace llvm {
/// Global access point for the JIT debugging interface.
class JITRegistrar {
public:
/// Instantiates the JIT service.
JITRegistrar() {}
/// Unregisters each object that was previously registered and releases all
/// internal resources.
virtual ~JITRegistrar() {}
/// Creates an entry in the JIT registry for the buffer @p Object,
/// which must contain an object file in executable memory with any
/// debug information for the debugger.
virtual void registerObject(const MemoryBuffer &Object) = 0;
/// Removes the internal registration of @p Object, and
/// frees associated resources.
/// Returns true if @p Object was previously registered.
virtual bool deregisterObject(const MemoryBuffer &Object) = 0;
/// Returns a reference to a GDB JIT registrar singleton
static JITRegistrar& getGDBRegistrar();
};
} // end namespace llvm
#endif // LLVM_EXECUTION_ENGINE_JIT_REGISTRAR_H

View File

@ -0,0 +1,59 @@
//===---- ObjectImage.h - Format independent executuable object image -----===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file declares a file format independent ObjectImage class.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_RUNTIMEDYLD_OBJECT_IMAGE_H
#define LLVM_RUNTIMEDYLD_OBJECT_IMAGE_H
#include "llvm/Object/ObjectFile.h"
namespace llvm {
class ObjectImage {
ObjectImage(); // = delete
ObjectImage(const ObjectImage &other); // = delete
protected:
object::ObjectFile *ObjFile;
public:
ObjectImage(object::ObjectFile *Obj) { ObjFile = Obj; }
virtual ~ObjectImage() {}
virtual object::symbol_iterator begin_symbols() const
{ return ObjFile->begin_symbols(); }
virtual object::symbol_iterator end_symbols() const
{ return ObjFile->end_symbols(); }
virtual object::section_iterator begin_sections() const
{ return ObjFile->begin_sections(); }
virtual object::section_iterator end_sections() const
{ return ObjFile->end_sections(); }
virtual /* Triple::ArchType */ unsigned getArch() const
{ return ObjFile->getArch(); }
// Subclasses can override these methods to update the image with loaded
// addresses for sections and common symbols
virtual void updateSectionAddress(const object::SectionRef &Sec,
uint64_t Addr) {}
virtual void updateSymbolAddress(const object::SymbolRef &Sym, uint64_t Addr)
{}
// Subclasses can override this method to provide JIT debugging support
virtual void registerWithDebugger() {}
virtual void deregisterWithDebugger() {}
};
} // end namespace llvm
#endif // LLVM_RUNTIMEDYLD_OBJECT_IMAGE_H

View File

@ -59,11 +59,17 @@ void RuntimeDyldImpl::mapSectionAddress(void *LocalAddress,
llvm_unreachable("Attempting to remap address of unknown section!");
}
// Subclasses can implement this method to create specialized image instances
// The caller owns the the pointer that is returned.
ObjectImage *RuntimeDyldImpl::createObjectImage(const MemoryBuffer *InputBuffer) {
ObjectFile *ObjFile = ObjectFile::createObjectFile(const_cast<MemoryBuffer*>
(InputBuffer));
ObjectImage *Obj = new ObjectImage(ObjFile);
return Obj;
}
bool RuntimeDyldImpl::loadObject(const MemoryBuffer *InputBuffer) {
// FIXME: ObjectFile don't modify MemoryBuffer.
// It should use const MemoryBuffer as parameter.
OwningPtr<ObjectFile> obj(ObjectFile::createObjectFile(
const_cast<MemoryBuffer*>(InputBuffer)));
OwningPtr<ObjectImage> obj(createObjectImage(InputBuffer));
if (!obj)
report_fatal_error("Unable to create object image from memory buffer!");
@ -110,7 +116,8 @@ bool RuntimeDyldImpl::loadObject(const MemoryBuffer *InputBuffer) {
(uintptr_t)FileOffset;
uintptr_t SectOffset = (uintptr_t)(SymPtr - (const uint8_t*)sData.begin());
unsigned SectionID =
findOrEmitSection(*si,
findOrEmitSection(*obj,
*si,
SymType == object::SymbolRef::ST_Function,
LocalSections);
bool isGlobal = flags & SymbolRef::SF_Global;
@ -128,7 +135,7 @@ bool RuntimeDyldImpl::loadObject(const MemoryBuffer *InputBuffer) {
// Allocate common symbols
if (CommonSize != 0)
emitCommonSymbols(CommonSymbols, CommonSize, LocalSymbols);
emitCommonSymbols(*obj, CommonSymbols, CommonSize, LocalSymbols);
// Parse and proccess relocations
DEBUG(dbgs() << "Parse relocations:\n");
@ -145,7 +152,7 @@ bool RuntimeDyldImpl::loadObject(const MemoryBuffer *InputBuffer) {
// If it's first relocation in this section, find its SectionID
if (isFirstRelocation) {
SectionID = findOrEmitSection(*si, true, LocalSections);
SectionID = findOrEmitSection(*obj, *si, true, LocalSections);
DEBUG(dbgs() << "\tSectionID: " << SectionID << "\n");
isFirstRelocation = false;
}
@ -164,10 +171,14 @@ bool RuntimeDyldImpl::loadObject(const MemoryBuffer *InputBuffer) {
processRelocationRef(RI, *obj, LocalSections, LocalSymbols, Stubs);
}
}
handleObjectLoaded(obj.take());
return false;
}
unsigned RuntimeDyldImpl::emitCommonSymbols(const CommonSymbolMap &Map,
unsigned RuntimeDyldImpl::emitCommonSymbols(ObjectImage &Obj,
const CommonSymbolMap &Map,
uint64_t TotalSize,
LocalSymbolMap &LocalSymbols) {
// Allocate memory for the section
@ -191,6 +202,7 @@ unsigned RuntimeDyldImpl::emitCommonSymbols(const CommonSymbolMap &Map,
uint64_t Size = it->second;
StringRef Name;
it->first.getName(Name);
Obj.updateSymbolAddress(it->first, (uint64_t)Addr);
LocalSymbols[Name.data()] = SymbolLoc(SectionID, Offset);
Offset += Size;
Addr += Size;
@ -199,7 +211,8 @@ unsigned RuntimeDyldImpl::emitCommonSymbols(const CommonSymbolMap &Map,
return SectionID;
}
unsigned RuntimeDyldImpl::emitSection(const SectionRef &Section,
unsigned RuntimeDyldImpl::emitSection(ObjectImage &Obj,
const SectionRef &Section,
bool IsCode) {
unsigned StubBufSize = 0,
@ -257,6 +270,7 @@ unsigned RuntimeDyldImpl::emitSection(const SectionRef &Section,
<< " StubBufSize: " << StubBufSize
<< " Allocate: " << Allocate
<< "\n");
Obj.updateSectionAddress(Section, (uint64_t)Addr);
}
else {
// Even if we didn't load the section, we need to record an entry for it
@ -277,7 +291,8 @@ unsigned RuntimeDyldImpl::emitSection(const SectionRef &Section,
return SectionID;
}
unsigned RuntimeDyldImpl::findOrEmitSection(const SectionRef &Section,
unsigned RuntimeDyldImpl::findOrEmitSection(ObjectImage &Obj,
const SectionRef &Section,
bool IsCode,
ObjSectionToIDMap &LocalSections) {
@ -286,7 +301,7 @@ unsigned RuntimeDyldImpl::findOrEmitSection(const SectionRef &Section,
if (i != LocalSections.end())
SectionID = i->second;
else {
SectionID = emitSection(Section, IsCode);
SectionID = emitSection(Obj, Section, IsCode);
LocalSections[Section] = SectionID;
}
return SectionID;

View File

@ -20,11 +20,176 @@
#include "llvm/Object/ObjectFile.h"
#include "llvm/Support/ELF.h"
#include "llvm/ADT/Triple.h"
#include "llvm/Object/ELF.h"
#include "JITRegistrar.h"
using namespace llvm;
using namespace llvm::object;
namespace {
template<support::endianness target_endianness, bool is64Bits>
class DyldELFObject : public ELFObjectFile<target_endianness, is64Bits> {
LLVM_ELF_IMPORT_TYPES(target_endianness, is64Bits)
typedef Elf_Shdr_Impl<target_endianness, is64Bits> Elf_Shdr;
typedef Elf_Sym_Impl<target_endianness, is64Bits> Elf_Sym;
typedef Elf_Rel_Impl<target_endianness, is64Bits, false> Elf_Rel;
typedef Elf_Rel_Impl<target_endianness, is64Bits, true> Elf_Rela;
typedef typename ELFObjectFile<target_endianness, is64Bits>::
Elf_Ehdr Elf_Ehdr;
typedef typename ELFDataTypeTypedefHelper<
target_endianness, is64Bits>::value_type addr_type;
protected:
// This duplicates the 'Data' member in the 'Binary' base class
// but it is necessary to workaround a bug in gcc 4.2
MemoryBuffer *InputData;
public:
DyldELFObject(MemoryBuffer *Object, error_code &ec);
void updateSectionAddress(const SectionRef &Sec, uint64_t Addr);
void updateSymbolAddress(const SymbolRef &Sym, uint64_t Addr);
const MemoryBuffer& getBuffer() const { return *InputData; }
// Methods for type inquiry through isa, cast, and dyn_cast
static inline bool classof(const Binary *v) {
return (isa<ELFObjectFile<target_endianness, is64Bits> >(v)
&& classof(cast<ELFObjectFile<target_endianness, is64Bits> >(v)));
}
static inline bool classof(
const ELFObjectFile<target_endianness, is64Bits> *v) {
return v->isDyldType();
}
static inline bool classof(const DyldELFObject *v) {
return true;
}
};
template<support::endianness target_endianness, bool is64Bits>
class ELFObjectImage : public ObjectImage {
protected:
DyldELFObject<target_endianness, is64Bits> *DyldObj;
bool Registered;
public:
ELFObjectImage(DyldELFObject<target_endianness, is64Bits> *Obj)
: ObjectImage(Obj),
DyldObj(Obj),
Registered(false) {}
virtual ~ELFObjectImage() {
if (Registered)
deregisterWithDebugger();
}
// Subclasses can override these methods to update the image with loaded
// addresses for sections and common symbols
virtual void updateSectionAddress(const SectionRef &Sec, uint64_t Addr)
{
DyldObj->updateSectionAddress(Sec, Addr);
}
virtual void updateSymbolAddress(const SymbolRef &Sym, uint64_t Addr)
{
DyldObj->updateSymbolAddress(Sym, Addr);
}
virtual void registerWithDebugger()
{
JITRegistrar::getGDBRegistrar().registerObject(DyldObj->getBuffer());
Registered = true;
}
virtual void deregisterWithDebugger()
{
JITRegistrar::getGDBRegistrar().deregisterObject(DyldObj->getBuffer());
}
};
template<support::endianness target_endianness, bool is64Bits>
DyldELFObject<target_endianness, is64Bits>::DyldELFObject(MemoryBuffer *Object,
error_code &ec)
: ELFObjectFile<target_endianness, is64Bits>(Object, ec),
InputData(Object) {
this->isDyldELFObject = true;
}
template<support::endianness target_endianness, bool is64Bits>
void DyldELFObject<target_endianness, is64Bits>::updateSectionAddress(
const SectionRef &Sec,
uint64_t Addr) {
DataRefImpl ShdrRef = Sec.getRawDataRefImpl();
Elf_Shdr *shdr = const_cast<Elf_Shdr*>(
reinterpret_cast<const Elf_Shdr *>(ShdrRef.p));
// This assumes the address passed in matches the target address bitness
// The template-based type cast handles everything else.
shdr->sh_addr = static_cast<addr_type>(Addr);
}
template<support::endianness target_endianness, bool is64Bits>
void DyldELFObject<target_endianness, is64Bits>::updateSymbolAddress(
const SymbolRef &SymRef,
uint64_t Addr) {
Elf_Sym *sym = const_cast<Elf_Sym*>(
ELFObjectFile<target_endianness, is64Bits>::
getSymbol(SymRef.getRawDataRefImpl()));
// This assumes the address passed in matches the target address bitness
// The template-based type cast handles everything else.
sym->st_value = static_cast<addr_type>(Addr);
}
} // namespace
namespace llvm {
ObjectImage *RuntimeDyldELF::createObjectImage(
const MemoryBuffer *ConstInputBuffer) {
MemoryBuffer *InputBuffer = const_cast<MemoryBuffer*>(ConstInputBuffer);
std::pair<unsigned char, unsigned char> Ident = getElfArchType(InputBuffer);
error_code ec;
if (Ident.first == ELF::ELFCLASS32 && Ident.second == ELF::ELFDATA2LSB) {
DyldELFObject<support::little, false> *Obj =
new DyldELFObject<support::little, false>(InputBuffer, ec);
return new ELFObjectImage<support::little, false>(Obj);
}
else if (Ident.first == ELF::ELFCLASS32 && Ident.second == ELF::ELFDATA2MSB) {
DyldELFObject<support::big, false> *Obj =
new DyldELFObject<support::big, false>(InputBuffer, ec);
return new ELFObjectImage<support::big, false>(Obj);
}
else if (Ident.first == ELF::ELFCLASS64 && Ident.second == ELF::ELFDATA2MSB) {
DyldELFObject<support::big, true> *Obj =
new DyldELFObject<support::big, true>(InputBuffer, ec);
return new ELFObjectImage<support::big, true>(Obj);
}
else if (Ident.first == ELF::ELFCLASS64 && Ident.second == ELF::ELFDATA2LSB) {
DyldELFObject<support::little, true> *Obj =
new DyldELFObject<support::little, true>(InputBuffer, ec);
return new ELFObjectImage<support::little, true>(Obj);
}
else
llvm_unreachable("Unexpected ELF format");
}
void RuntimeDyldELF::handleObjectLoaded(ObjectImage *Obj)
{
Obj->registerWithDebugger();
// Save the loaded object. It will deregister itself when deleted
LoadedObject = Obj;
}
RuntimeDyldELF::~RuntimeDyldELF() {
if (LoadedObject)
delete LoadedObject;
}
void RuntimeDyldELF::resolveX86_64Relocation(uint8_t *LocalAddress,
uint64_t FinalAddress,
@ -167,7 +332,7 @@ void RuntimeDyldELF::resolveRelocation(uint8_t *LocalAddress,
}
void RuntimeDyldELF::processRelocationRef(const ObjRelocationInfo &Rel,
const ObjectFile &Obj,
ObjectImage &Obj,
ObjSectionToIDMap &ObjSectionToID,
LocalSymbolMap &Symbols,
StubMap &Stubs) {
@ -206,7 +371,7 @@ void RuntimeDyldELF::processRelocationRef(const ObjRelocationInfo &Rel,
if (si == Obj.end_sections())
llvm_unreachable("Symbol section not found, bad object file format!");
DEBUG(dbgs() << "\t\tThis is section symbol\n");
Value.SectionID = findOrEmitSection((*si), true, ObjSectionToID);
Value.SectionID = findOrEmitSection(Obj, (*si), true, ObjSectionToID);
Value.Addend = Addend;
break;
}
@ -236,7 +401,7 @@ void RuntimeDyldELF::processRelocationRef(const ObjRelocationInfo &Rel,
// Look up for existing stub.
StubMap::const_iterator i = Stubs.find(Value);
if (i != Stubs.end()) {
resolveRelocation(Target, Section.LoadAddress, (uint64_t)Section.Address +
resolveRelocation(Target, (uint64_t)Target, (uint64_t)Section.Address +
i->second, RelType, 0);
DEBUG(dbgs() << " Stub function found\n");
} else {
@ -247,7 +412,7 @@ void RuntimeDyldELF::processRelocationRef(const ObjRelocationInfo &Rel,
Section.StubOffset);
AddRelocation(Value, Rel.SectionID,
StubTargetAddr - Section.Address, ELF::R_ARM_ABS32);
resolveRelocation(Target, Section.LoadAddress, (uint64_t)Section.Address +
resolveRelocation(Target, (uint64_t)Target, (uint64_t)Section.Address +
Section.StubOffset, RelType, 0);
Section.StubOffset += getMaxStubSize();
}

View File

@ -22,6 +22,8 @@ using namespace llvm;
namespace llvm {
class RuntimeDyldELF : public RuntimeDyldImpl {
protected:
ObjectImage *LoadedObject;
void resolveX86_64Relocation(uint8_t *LocalAddress,
uint64_t FinalAddress,
uint64_t Value,
@ -47,12 +49,18 @@ protected:
int64_t Addend);
virtual void processRelocationRef(const ObjRelocationInfo &Rel,
const ObjectFile &Obj,
ObjectImage &Obj,
ObjSectionToIDMap &ObjSectionToID,
LocalSymbolMap &Symbols, StubMap &Stubs);
virtual ObjectImage *createObjectImage(const MemoryBuffer *InputBuffer);
virtual void handleObjectLoaded(ObjectImage *Obj);
public:
RuntimeDyldELF(RTDyldMemoryManager *mm) : RuntimeDyldImpl(mm) {}
RuntimeDyldELF(RTDyldMemoryManager *mm)
: RuntimeDyldImpl(mm), LoadedObject(0) {}
virtual ~RuntimeDyldELF();
bool isCompatibleFormat(const MemoryBuffer *InputBuffer) const;
};

View File

@ -29,6 +29,7 @@
#include "llvm/ADT/Triple.h"
#include <map>
#include "llvm/Support/Format.h"
#include "ObjectImage.h"
using namespace llvm;
using namespace llvm::object;
@ -154,7 +155,8 @@ protected:
/// \brief Emits a section containing common symbols.
/// \return SectionID.
unsigned emitCommonSymbols(const CommonSymbolMap &Map,
unsigned emitCommonSymbols(ObjectImage &Obj,
const CommonSymbolMap &Map,
uint64_t TotalSize,
LocalSymbolMap &Symbols);
@ -162,14 +164,18 @@ protected:
/// \param IsCode if it's true then allocateCodeSection() will be
/// used for emmits, else allocateDataSection() will be used.
/// \return SectionID.
unsigned emitSection(const SectionRef &Section, bool IsCode);
unsigned emitSection(ObjectImage &Obj,
const SectionRef &Section,
bool IsCode);
/// \brief Find Section in LocalSections. If the secton is not found - emit
/// it and store in LocalSections.
/// \param IsCode if it's true then allocateCodeSection() will be
/// used for emmits, else allocateDataSection() will be used.
/// \return SectionID.
unsigned findOrEmitSection(const SectionRef &Section, bool IsCode,
unsigned findOrEmitSection(ObjectImage &Obj,
const SectionRef &Section,
bool IsCode,
ObjSectionToIDMap &LocalSections);
/// \brief If Value.SymbolName is NULL then store relocation to the
@ -200,11 +206,18 @@ protected:
/// \brief Parses the object file relocation and store it to Relocations
/// or SymbolRelocations. Its depend from object file type.
virtual void processRelocationRef(const ObjRelocationInfo &Rel,
const ObjectFile &Obj,
ObjectImage &Obj,
ObjSectionToIDMap &ObjSectionToID,
LocalSymbolMap &Symbols, StubMap &Stubs) = 0;
void resolveSymbols();
virtual ObjectImage *createObjectImage(const MemoryBuffer *InputBuffer);
virtual void handleObjectLoaded(ObjectImage *Obj)
{
// Subclasses may choose to retain this image if they have a use for it
delete Obj;
}
public:
RuntimeDyldImpl(RTDyldMemoryManager *mm) : MemMgr(mm), HasError(false) {}

View File

@ -205,7 +205,7 @@ resolveARMRelocation(uint8_t *LocalAddress,
}
void RuntimeDyldMachO::processRelocationRef(const ObjRelocationInfo &Rel,
const ObjectFile &Obj,
ObjectImage &Obj,
ObjSectionToIDMap &ObjSectionToID,
LocalSymbolMap &Symbols,
StubMap &Stubs) {
@ -246,7 +246,7 @@ void RuntimeDyldMachO::processRelocationRef(const ObjRelocationInfo &Rel,
break;
}
assert(si != se && "No section containing relocation!");
Value.SectionID = findOrEmitSection(*si, true, ObjSectionToID);
Value.SectionID = findOrEmitSection(Obj, *si, true, ObjSectionToID);
Value.Addend = *(const intptr_t *)Target;
if (Value.Addend) {
// The MachO addend is offset from the current section, we need set it

View File

@ -49,7 +49,7 @@ protected:
int64_t Addend);
virtual void processRelocationRef(const ObjRelocationInfo &Rel,
const ObjectFile &Obj,
ObjectImage &Obj,
ObjSectionToIDMap &ObjSectionToID,
LocalSymbolMap &Symbols, StubMap &Stubs);
@ -59,7 +59,7 @@ public:
uint64_t Value,
uint32_t Type,
int64_t Addend);
RuntimeDyldMachO(RTDyldMemoryManager *mm) : RuntimeDyldImpl(mm) {}
bool isCompatibleFormat(const MemoryBuffer *InputBuffer) const;

View File

@ -1527,11 +1527,11 @@ bool AsmParser::HandleMacroEntry(StringRef Name, SMLoc NameLoc,
}
Lex();
}
// If there weren't any arguments, erase the token vector so everything
// else knows that. Leaving around the vestigal empty token list confuses
// things.
if (MacroArguments.size() == 1 && MacroArguments.back().empty())
MacroArguments.clear();
// If the last argument didn't end up with any tokens, it's not a real
// argument and we should remove it from the list. This happens with either
// a tailing comma or an empty argument list.
if (MacroArguments.back().empty())
MacroArguments.pop_back();
// Macro instantiation is lexical, unfortunately. We construct a new buffer
// to hold the macro body with substitutions.

View File

@ -17,16 +17,6 @@ namespace llvm {
using namespace object;
namespace {
std::pair<unsigned char, unsigned char>
getElfArchType(MemoryBuffer *Object) {
if (Object->getBufferSize() < ELF::EI_NIDENT)
return std::make_pair((uint8_t)ELF::ELFCLASSNONE,(uint8_t)ELF::ELFDATANONE);
return std::make_pair( (uint8_t)Object->getBufferStart()[ELF::EI_CLASS]
, (uint8_t)Object->getBufferStart()[ELF::EI_DATA]);
}
}
// Creates an in-memory object-file by default: createELFObjectFile(Buffer)
ObjectFile *ObjectFile::createELFObjectFile(MemoryBuffer *Object) {
std::pair<unsigned char, unsigned char> Ident = getElfArchType(Object);

View File

@ -1,302 +0,0 @@
//===--- JSONParser.cpp - Simple JSON parser ------------------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file implements a JSON parser.
//
//===----------------------------------------------------------------------===//
#include "llvm/Support/JSONParser.h"
#include "llvm/ADT/Twine.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/MemoryBuffer.h"
using namespace llvm;
JSONParser::JSONParser(StringRef Input, SourceMgr *SM)
: SM(SM), Failed(false) {
InputBuffer = MemoryBuffer::getMemBuffer(Input, "JSON");
SM->AddNewSourceBuffer(InputBuffer, SMLoc());
End = InputBuffer->getBuffer().end();
Position = InputBuffer->getBuffer().begin();
}
JSONValue *JSONParser::parseRoot() {
if (Position != InputBuffer->getBuffer().begin())
report_fatal_error("Cannot reuse JSONParser.");
if (isWhitespace())
nextNonWhitespace();
if (errorIfAtEndOfFile("'[' or '{' at start of JSON text"))
return 0;
switch (*Position) {
case '[':
return new (ValueAllocator.Allocate<JSONArray>(1)) JSONArray(this);
case '{':
return new (ValueAllocator.Allocate<JSONObject>(1)) JSONObject(this);
default:
setExpectedError("'[' or '{' at start of JSON text", *Position);
return 0;
}
}
bool JSONParser::validate() {
JSONValue *Root = parseRoot();
if (Root == NULL) {
return false;
}
return skip(*Root);
}
bool JSONParser::skip(const JSONAtom &Atom) {
switch(Atom.getKind()) {
case JSONAtom::JK_Array:
case JSONAtom::JK_Object:
return skipContainer(*cast<JSONContainer>(&Atom));
case JSONAtom::JK_String:
return true;
case JSONAtom::JK_KeyValuePair:
return skip(*cast<JSONKeyValuePair>(&Atom)->Value);
}
llvm_unreachable("Impossible enum value.");
}
// Sets the current error to:
// "expected <Expected>, but found <Found>".
void JSONParser::setExpectedError(StringRef Expected, StringRef Found) {
SM->PrintMessage(SMLoc::getFromPointer(Position), SourceMgr::DK_Error,
"expected " + Expected + ", but found " + Found + ".", ArrayRef<SMRange>());
Failed = true;
}
// Sets the current error to:
// "expected <Expected>, but found <Found>".
void JSONParser::setExpectedError(StringRef Expected, char Found) {
setExpectedError(Expected, ("'" + StringRef(&Found, 1) + "'").str());
}
// If there is no character available, returns true and sets the current error
// to: "expected <Expected>, but found EOF.".
bool JSONParser::errorIfAtEndOfFile(StringRef Expected) {
if (Position == End) {
setExpectedError(Expected, "EOF");
return true;
}
return false;
}
// Sets the current error if the current character is not C to:
// "expected 'C', but got <current character>".
bool JSONParser::errorIfNotAt(char C, StringRef Message) {
if (*Position != C) {
std::string Expected =
("'" + StringRef(&C, 1) + "' " + Message).str();
if (Position == End)
setExpectedError(Expected, "EOF");
else
setExpectedError(Expected, *Position);
return true;
}
return false;
}
// Forbidding inlining improves performance by roughly 20%.
// FIXME: Remove once llvm optimizes this to the faster version without hints.
LLVM_ATTRIBUTE_NOINLINE static bool
wasEscaped(StringRef::iterator First, StringRef::iterator Position);
// Returns whether a character at 'Position' was escaped with a leading '\'.
// 'First' specifies the position of the first character in the string.
static bool wasEscaped(StringRef::iterator First,
StringRef::iterator Position) {
assert(Position - 1 >= First);
StringRef::iterator I = Position - 1;
// We calulate the number of consecutive '\'s before the current position
// by iterating backwards through our string.
while (I >= First && *I == '\\') --I;
// (Position - 1 - I) now contains the number of '\'s before the current
// position. If it is odd, the character at 'Positon' was escaped.
return (Position - 1 - I) % 2 == 1;
}
// Parses a JSONString, assuming that the current position is on a quote.
JSONString *JSONParser::parseString() {
assert(Position != End);
assert(!isWhitespace());
if (errorIfNotAt('"', "at start of string"))
return 0;
StringRef::iterator First = Position + 1;
// Benchmarking shows that this loop is the hot path of the application with
// about 2/3rd of the runtime cycles. Since escaped quotes are not the common
// case, and multiple escaped backslashes before escaped quotes are very rare,
// we pessimize this case to achieve a smaller inner loop in the common case.
// We're doing that by having a quick inner loop that just scans for the next
// quote. Once we find the quote we check the last character to see whether
// the quote might have been escaped. If the last character is not a '\', we
// know the quote was not escaped and have thus found the end of the string.
// If the immediately preceding character was a '\', we have to scan backwards
// to see whether the previous character was actually an escaped backslash, or
// an escape character for the quote. If we find that the current quote was
// escaped, we continue parsing for the next quote and repeat.
// This optimization brings around 30% performance improvements.
do {
// Step over the current quote.
++Position;
// Find the next quote.
while (Position != End && *Position != '"')
++Position;
if (errorIfAtEndOfFile("'\"' at end of string"))
return 0;
// Repeat until the previous character was not a '\' or was an escaped
// backslash.
} while (*(Position - 1) == '\\' && wasEscaped(First, Position));
return new (ValueAllocator.Allocate<JSONString>())
JSONString(StringRef(First, Position - First));
}
// Advances the position to the next non-whitespace position.
void JSONParser::nextNonWhitespace() {
do {
++Position;
} while (isWhitespace());
}
// Checks if there is a whitespace character at the current position.
bool JSONParser::isWhitespace() {
return *Position == ' ' || *Position == '\t' ||
*Position == '\n' || *Position == '\r';
}
bool JSONParser::failed() const {
return Failed;
}
// Parses a JSONValue, assuming that the current position is at the first
// character of the value.
JSONValue *JSONParser::parseValue() {
assert(Position != End);
assert(!isWhitespace());
switch (*Position) {
case '[':
return new (ValueAllocator.Allocate<JSONArray>(1)) JSONArray(this);
case '{':
return new (ValueAllocator.Allocate<JSONObject>(1)) JSONObject(this);
case '"':
return parseString();
default:
setExpectedError("'[', '{' or '\"' at start of value", *Position);
return 0;
}
}
// Parses a JSONKeyValuePair, assuming that the current position is at the first
// character of the key, value pair.
JSONKeyValuePair *JSONParser::parseKeyValuePair() {
assert(Position != End);
assert(!isWhitespace());
JSONString *Key = parseString();
if (Key == 0)
return 0;
nextNonWhitespace();
if (errorIfNotAt(':', "between key and value"))
return 0;
nextNonWhitespace();
const JSONValue *Value = parseValue();
if (Value == 0)
return 0;
return new (ValueAllocator.Allocate<JSONKeyValuePair>(1))
JSONKeyValuePair(Key, Value);
}
/// \brief Parses the first element of a JSON array or object, or closes the
/// array.
///
/// The method assumes that the current position is before the first character
/// of the element, with possible white space in between. When successful, it
/// returns the new position after parsing the element. Otherwise, if there is
/// no next value, it returns a default constructed StringRef::iterator.
StringRef::iterator JSONParser::parseFirstElement(JSONAtom::Kind ContainerKind,
char StartChar, char EndChar,
const JSONAtom *&Element) {
assert(*Position == StartChar);
Element = 0;
nextNonWhitespace();
if (errorIfAtEndOfFile("value or end of container at start of container"))
return StringRef::iterator();
if (*Position == EndChar)
return StringRef::iterator();
Element = parseElement(ContainerKind);
if (Element == 0)
return StringRef::iterator();
return Position;
}
/// \brief Parses the next element of a JSON array or object, or closes the
/// array.
///
/// The method assumes that the current position is before the ',' which
/// separates the next element from the current element. When successful, it
/// returns the new position after parsing the element. Otherwise, if there is
/// no next value, it returns a default constructed StringRef::iterator.
StringRef::iterator JSONParser::parseNextElement(JSONAtom::Kind ContainerKind,
char EndChar,
const JSONAtom *&Element) {
Element = 0;
nextNonWhitespace();
if (errorIfAtEndOfFile("',' or end of container for next element"))
return 0;
if (*Position == ',') {
nextNonWhitespace();
if (errorIfAtEndOfFile("element in container"))
return StringRef::iterator();
Element = parseElement(ContainerKind);
if (Element == 0)
return StringRef::iterator();
return Position;
} else if (*Position == EndChar) {
return StringRef::iterator();
} else {
setExpectedError("',' or end of container for next element", *Position);
return StringRef::iterator();
}
}
const JSONAtom *JSONParser::parseElement(JSONAtom::Kind ContainerKind) {
switch (ContainerKind) {
case JSONAtom::JK_Array:
return parseValue();
case JSONAtom::JK_Object:
return parseKeyValuePair();
default:
llvm_unreachable("Impossible code path");
}
}
bool JSONParser::skipContainer(const JSONContainer &Container) {
for (JSONContainer::AtomIterator I = Container.atom_current(),
E = Container.atom_end();
I != E; ++I) {
assert(*I != 0);
if (!skip(**I))
return false;
}
return !failed();
}

View File

@ -0,0 +1,10 @@
#include "llvm/Support/Locale.h"
#include "llvm/Config/config.h"
#ifdef __APPLE__
#include "LocaleXlocale.inc"
#elif LLVM_ON_WIN32
#include "LocaleWindows.inc"
#else
#include "LocaleGeneric.inc"
#endif

View File

@ -0,0 +1,17 @@
#include <cwctype>
namespace llvm {
namespace sys {
namespace locale {
int columnWidth(StringRef s) {
return s.size();
}
bool isPrint(int c) {
return iswprint(c);
}
}
}
}

View File

@ -0,0 +1,15 @@
namespace llvm {
namespace sys {
namespace locale {
int columnWidth(StringRef s) {
return s.size();
}
bool isPrint(int c) {
return ' ' <= c && c <= '~';
}
}
}
}

View File

@ -0,0 +1,61 @@
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/Support/ManagedStatic.h"
#include <cassert>
#include <xlocale.h>
namespace {
struct locale_holder {
locale_holder()
: l(newlocale(LC_CTYPE_MASK,"en_US.UTF-8",LC_GLOBAL_LOCALE))
{
assert(NULL!=l);
}
~locale_holder() {
freelocale(l);
}
int mbswidth(llvm::SmallString<16> s) const {
// this implementation assumes no '\0' in s
assert(s.size()==strlen(s.c_str()));
size_t size = mbstowcs_l(NULL,s.c_str(),0,l);
assert(size!=(size_t)-1);
if (size==0)
return 0;
llvm::SmallVector<wchar_t,200> ws(size);
size = mbstowcs_l(&ws[0],s.c_str(),ws.size(),l);
assert(ws.size()==size);
return wcswidth_l(&ws[0],ws.size(),l);
}
int isprint(int c) const {
return iswprint_l(c,l);
}
private:
locale_t l;
};
llvm::ManagedStatic<locale_holder> l;
}
namespace llvm {
namespace sys {
namespace locale {
int columnWidth(StringRef s) {
int width = l->mbswidth(s);
assert(width>=0);
return width;
}
bool isPrint(int c) {
return l->isprint(c);
}
}
}
}

View File

@ -13,6 +13,7 @@
//===----------------------------------------------------------------------===//
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/DenseMapInfo.h"
#include "llvm/Support/MathExtras.h"
#include <algorithm>
#include <cstdlib>
@ -102,7 +103,7 @@ bool SmallPtrSetImpl::erase_imp(const void * Ptr) {
}
const void * const *SmallPtrSetImpl::FindBucketFor(const void *Ptr) const {
unsigned Bucket = Hash(Ptr);
unsigned Bucket = DenseMapInfo<void *>::getHashValue(Ptr) & (CurArraySize-1);
unsigned ArraySize = CurArraySize;
unsigned ProbeAmt = 1;
const void *const *Array = CurArray;

View File

@ -193,7 +193,8 @@ SMDiagnostic SourceMgr::GetMessage(SMLoc Loc, SourceMgr::DiagKind Kind,
}
void SourceMgr::PrintMessage(SMLoc Loc, SourceMgr::DiagKind Kind,
const Twine &Msg, ArrayRef<SMRange> Ranges) const {
const Twine &Msg, ArrayRef<SMRange> Ranges,
bool ShowColors) const {
SMDiagnostic Diagnostic = GetMessage(Loc, Kind, Msg, Ranges);
// Report the message with the diagnostic handler if present.
@ -208,7 +209,7 @@ void SourceMgr::PrintMessage(SMLoc Loc, SourceMgr::DiagKind Kind,
assert(CurBuf != -1 && "Invalid or unspecified location!");
PrintIncludeStack(getBufferInfo(CurBuf).IncludeLoc, OS);
Diagnostic.print(0, OS);
Diagnostic.print(0, OS, ShowColors);
}
//===----------------------------------------------------------------------===//
@ -225,7 +226,14 @@ SMDiagnostic::SMDiagnostic(const SourceMgr &sm, SMLoc L, const std::string &FN,
}
void SMDiagnostic::print(const char *ProgName, raw_ostream &S) const {
void SMDiagnostic::print(const char *ProgName, raw_ostream &S,
bool ShowColors) const {
// Display colors only if OS goes to a tty.
ShowColors &= S.is_displayed();
if (ShowColors)
S.changeColor(raw_ostream::SAVEDCOLOR, true);
if (ProgName && ProgName[0])
S << ProgName << ": ";
@ -244,13 +252,33 @@ void SMDiagnostic::print(const char *ProgName, raw_ostream &S) const {
}
switch (Kind) {
case SourceMgr::DK_Error: S << "error: "; break;
case SourceMgr::DK_Warning: S << "warning: "; break;
case SourceMgr::DK_Note: S << "note: "; break;
case SourceMgr::DK_Error:
if (ShowColors)
S.changeColor(raw_ostream::RED, true);
S << "error: ";
break;
case SourceMgr::DK_Warning:
if (ShowColors)
S.changeColor(raw_ostream::MAGENTA, true);
S << "warning: ";
break;
case SourceMgr::DK_Note:
if (ShowColors)
S.changeColor(raw_ostream::BLACK, true);
S << "note: ";
break;
}
if (ShowColors) {
S.resetColor();
S.changeColor(raw_ostream::SAVEDCOLOR, true);
}
S << Message << '\n';
if (ShowColors)
S.resetColor();
if (LineNo == -1 || ColumnNo == -1)
return;
@ -292,6 +320,9 @@ void SMDiagnostic::print(const char *ProgName, raw_ostream &S) const {
}
S << '\n';
if (ShowColors)
S.changeColor(raw_ostream::GREEN, true);
// Print out the caret line, matching tabs in the source line.
for (unsigned i = 0, e = CaretLine.size(), OutCol = 0; i != e; ++i) {
if (i >= LineContents.size() || LineContents[i] != '\t') {
@ -306,6 +337,9 @@ void SMDiagnostic::print(const char *ProgName, raw_ostream &S) const {
++OutCol;
} while (OutCol & 7);
}
if (ShowColors)
S.resetColor();
S << '\n';
}

View File

@ -290,6 +290,10 @@ const char *Process::OutputBold(bool bg) {
return "\033[1m";
}
const char *Process::OutputReverse() {
return "\033[7m";
}
const char *Process::ResetColor() {
return "\033[0m";
}

View File

@ -215,6 +215,38 @@ const char *Process::OutputColor(char code, bool bold, bool bg) {
return 0;
}
static WORD GetConsoleTextAttribute(HANDLE hConsoleOutput) {
CONSOLE_SCREEN_BUFFER_INFO info;
GetConsoleScreenBufferInfo(GetStdHandle(STD_OUTPUT_HANDLE), &info);
return info.wAttributes;
}
const char *Process::OutputReverse() {
const WORD attributes
= GetConsoleTextAttribute(GetStdHandle(STD_OUTPUT_HANDLE));
const WORD foreground_mask = FOREGROUND_BLUE | FOREGROUND_GREEN |
FOREGROUND_RED | FOREGROUND_INTENSITY;
const WORD background_mask = BACKGROUND_BLUE | BACKGROUND_GREEN |
BACKGROUND_RED | BACKGROUND_INTENSITY;
const WORD color_mask = foreground_mask | background_mask;
WORD new_attributes =
((attributes & FOREGROUND_BLUE )?BACKGROUND_BLUE :0) |
((attributes & FOREGROUND_GREEN )?BACKGROUND_GREEN :0) |
((attributes & FOREGROUND_RED )?BACKGROUND_RED :0) |
((attributes & FOREGROUND_INTENSITY)?BACKGROUND_INTENSITY:0) |
((attributes & BACKGROUND_BLUE )?FOREGROUND_BLUE :0) |
((attributes & BACKGROUND_GREEN )?FOREGROUND_GREEN :0) |
((attributes & BACKGROUND_RED )?FOREGROUND_RED :0) |
((attributes & BACKGROUND_INTENSITY)?FOREGROUND_INTENSITY:0) |
0;
new_attributes = (attributes & ~color_mask) | (new_attributes & color_mask);
SetConsoleTextAttribute(GetStdHandle(STD_OUTPUT_HANDLE), new_attributes);
return 0;
}
const char *Process::ResetColor() {
SetConsoleTextAttribute(GetStdHandle(STD_OUTPUT_HANDLE), defaultColors());
return 0;

View File

@ -1732,7 +1732,7 @@ StringRef ScalarNode::unescapeDoubleQuoted( StringRef UnquotedValue
if (UnquotedValue.size() < 3)
// TODO: Report error.
break;
unsigned int UnicodeScalarValue;
unsigned int UnicodeScalarValue = 0;
UnquotedValue.substr(1, 2).getAsInteger(16, UnicodeScalarValue);
encodeUTF8(UnicodeScalarValue, Storage);
UnquotedValue = UnquotedValue.substr(2);
@ -1742,7 +1742,7 @@ StringRef ScalarNode::unescapeDoubleQuoted( StringRef UnquotedValue
if (UnquotedValue.size() < 5)
// TODO: Report error.
break;
unsigned int UnicodeScalarValue;
unsigned int UnicodeScalarValue = 0;
UnquotedValue.substr(1, 4).getAsInteger(16, UnicodeScalarValue);
encodeUTF8(UnicodeScalarValue, Storage);
UnquotedValue = UnquotedValue.substr(4);
@ -1752,7 +1752,7 @@ StringRef ScalarNode::unescapeDoubleQuoted( StringRef UnquotedValue
if (UnquotedValue.size() < 9)
// TODO: Report error.
break;
unsigned int UnicodeScalarValue;
unsigned int UnicodeScalarValue = 0;
UnquotedValue.substr(1, 8).getAsInteger(16, UnicodeScalarValue);
encodeUTF8(UnicodeScalarValue, Storage);
UnquotedValue = UnquotedValue.substr(8);

View File

@ -633,6 +633,19 @@ raw_ostream &raw_fd_ostream::resetColor() {
return *this;
}
raw_ostream &raw_fd_ostream::reverseColor() {
if (sys::Process::ColorNeedsFlush())
flush();
const char *colorcode = sys::Process::OutputReverse();
if (colorcode) {
size_t len = strlen(colorcode);
write(colorcode, len);
// don't account colors towards output characters
pos -= len;
}
return *this;
}
bool raw_fd_ostream::is_displayed() const {
return sys::Process::FileDescriptorIsDisplayed(FD);
}

View File

@ -20,6 +20,22 @@ namespace llvm {
SourceMgr SrcMgr;
void PrintWarning(SMLoc WarningLoc, const Twine &Msg) {
SrcMgr.PrintMessage(WarningLoc, SourceMgr::DK_Warning, Msg);
}
void PrintWarning(const char *Loc, const Twine &Msg) {
SrcMgr.PrintMessage(SMLoc::getFromPointer(Loc), SourceMgr::DK_Warning, Msg);
}
void PrintWarning(const Twine &Msg) {
errs() << "warning:" << Msg << "\n";
}
void PrintWarning(const TGError &Warning) {
PrintWarning(Warning.getLoc(), Warning.getMessage());
}
void PrintError(SMLoc ErrorLoc, const Twine &Msg) {
SrcMgr.PrintMessage(ErrorLoc, SourceMgr::DK_Error, Msg);
}

View File

@ -9,10 +9,6 @@
// This describes the calling conventions for ARM architecture.
//===----------------------------------------------------------------------===//
/// CCIfSubtarget - Match if the current subtarget has a feature F.
class CCIfSubtarget<string F, CCAction A>:
CCIf<!strconcat("State.getTarget().getSubtarget<ARMSubtarget>().", F), A>;
/// CCIfAlign - Match of the original alignment of the arg
class CCIfAlign<string Align, CCAction A>:
CCIf<!strconcat("ArgFlags.getOrigAlign() == ", Align), A>;

View File

@ -532,6 +532,7 @@ class AIswp<bit b, dag oops, dag iops, string opc, list<dag> pattern>
let Inst{11-4} = 0b00001001;
let Inst{3-0} = Rt2;
let Unpredictable{11-8} = 0b1111;
let DecoderMethod = "DecodeSwap";
}

View File

@ -219,8 +219,11 @@ def UseFPVMLx : Predicate<"Subtarget->useFPVMLx()">;
// Prefer fused MAC for fp mul + add over fp VMLA / VMLS if they are available.
// But only select them if more precision in FP computation is allowed.
def UseFusedMAC : Predicate<"!TM.Options.NoExcessFPPrecision">;
def DontUseFusedMAC : Predicate<"!Subtarget->hasVFP4()">;
// Do not use them for Darwin platforms.
def UseFusedMAC : Predicate<"!TM.Options.NoExcessFPPrecision && "
"!Subtarget->isTargetDarwin()">;
def DontUseFusedMAC : Predicate<"!Subtarget->hasVFP4() || "
"Subtarget->isTargetDarwin()">;
//===----------------------------------------------------------------------===//
// ARM Flag Definitions.
@ -905,6 +908,11 @@ def p_imm : Operand<i32> {
let DecoderMethod = "DecodeCoprocessor";
}
def pf_imm : Operand<i32> {
let PrintMethod = "printPImmediate";
let ParserMatchClass = CoprocNumAsmOperand;
}
def CoprocRegAsmOperand : AsmOperandClass {
let Name = "CoprocReg";
let ParserMethod = "parseCoprocRegOperand";
@ -1184,6 +1192,8 @@ multiclass AI1_cmp_irs<bits<4> opcod, string opc,
let Inst{19-16} = Rn;
let Inst{15-12} = 0b0000;
let Inst{11-0} = imm;
let Unpredictable{15-12} = 0b1111;
}
def rr : AI1<opcod, (outs), (ins GPR:$Rn, GPR:$Rm), DPFrm, iir,
opc, "\t$Rn, $Rm",
@ -1197,6 +1207,8 @@ multiclass AI1_cmp_irs<bits<4> opcod, string opc,
let Inst{15-12} = 0b0000;
let Inst{11-4} = 0b00000000;
let Inst{3-0} = Rm;
let Unpredictable{15-12} = 0b1111;
}
def rsi : AI1<opcod, (outs),
(ins GPR:$Rn, so_reg_imm:$shift), DPSoRegImmFrm, iis,
@ -1211,11 +1223,13 @@ multiclass AI1_cmp_irs<bits<4> opcod, string opc,
let Inst{11-5} = shift{11-5};
let Inst{4} = 0;
let Inst{3-0} = shift{3-0};
let Unpredictable{15-12} = 0b1111;
}
def rsr : AI1<opcod, (outs),
(ins GPR:$Rn, so_reg_reg:$shift), DPSoRegRegFrm, iis,
(ins GPRnopc:$Rn, so_reg_reg:$shift), DPSoRegRegFrm, iis,
opc, "\t$Rn, $shift",
[(opnode GPR:$Rn, so_reg_reg:$shift)]> {
[(opnode GPRnopc:$Rn, so_reg_reg:$shift)]> {
bits<4> Rn;
bits<12> shift;
let Inst{25} = 0;
@ -1227,6 +1241,8 @@ multiclass AI1_cmp_irs<bits<4> opcod, string opc,
let Inst{6-5} = shift{6-5};
let Inst{4} = 1;
let Inst{3-0} = shift{3-0};
let Unpredictable{15-12} = 0b1111;
}
}
@ -4103,7 +4119,7 @@ def ISB : AInoP<(outs), (ins memb_opt:$opt), MiscFrm, NoItinerary,
let Inst{3-0} = opt;
}
// Pseudo isntruction that combines movs + predicated rsbmi
// Pseudo instruction that combines movs + predicated rsbmi
// to implement integer ABS
let usesCustomInserter = 1, Defs = [CPSR] in {
def ABS : ARMPseudoInst<
@ -4264,9 +4280,9 @@ def CLREX : AXI<(outs), (ins), MiscFrm, NoItinerary, "clrex", []>,
// SWP/SWPB are deprecated in V6/V7.
let mayLoad = 1, mayStore = 1 in {
def SWP : AIswp<0, (outs GPR:$Rt), (ins GPR:$Rt2, addr_offset_none:$addr),
def SWP : AIswp<0, (outs GPRnopc:$Rt), (ins GPRnopc:$Rt2, addr_offset_none:$addr),
"swp", []>;
def SWPB: AIswp<1, (outs GPR:$Rt), (ins GPR:$Rt2, addr_offset_none:$addr),
def SWPB: AIswp<1, (outs GPRnopc:$Rt), (ins GPRnopc:$Rt2, addr_offset_none:$addr),
"swpb", []>;
}
@ -4295,7 +4311,7 @@ def CDP : ABI<0b1110, (outs), (ins p_imm:$cop, imm0_15:$opc1,
let Inst{23-20} = opc1;
}
def CDP2 : ABXI<0b1110, (outs), (ins p_imm:$cop, imm0_15:$opc1,
def CDP2 : ABXI<0b1110, (outs), (ins pf_imm:$cop, imm0_15:$opc1,
c_imm:$CRd, c_imm:$CRn, c_imm:$CRm, imm0_7:$opc2),
NoItinerary, "cdp2\t$cop, $opc1, $CRd, $CRn, $CRm, $opc2",
[(int_arm_cdp2 imm:$cop, imm:$opc1, imm:$CRd, imm:$CRn,
@ -4574,7 +4590,7 @@ def : ARMV5TPat<(int_arm_mrc2 imm:$cop, imm:$opc1, imm:$CRn,
class MovRRCopro<string opc, bit direction, list<dag> pattern = []>
: ABI<0b1100, (outs), (ins p_imm:$cop, imm0_15:$opc1,
GPR:$Rt, GPR:$Rt2, c_imm:$CRm),
GPRnopc:$Rt, GPRnopc:$Rt2, c_imm:$CRm),
NoItinerary, opc, "\t$cop, $opc1, $Rt, $Rt2, $CRm", pattern> {
let Inst{23-21} = 0b010;
let Inst{20} = direction;
@ -4593,13 +4609,13 @@ class MovRRCopro<string opc, bit direction, list<dag> pattern = []>
}
def MCRR : MovRRCopro<"mcrr", 0 /* from ARM core register to coprocessor */,
[(int_arm_mcrr imm:$cop, imm:$opc1, GPR:$Rt, GPR:$Rt2,
[(int_arm_mcrr imm:$cop, imm:$opc1, GPRnopc:$Rt, GPRnopc:$Rt2,
imm:$CRm)]>;
def MRRC : MovRRCopro<"mrrc", 1 /* from coprocessor to ARM core register */>;
class MovRRCopro2<string opc, bit direction, list<dag> pattern = []>
: ABXI<0b1100, (outs), (ins p_imm:$cop, imm0_15:$opc1,
GPR:$Rt, GPR:$Rt2, c_imm:$CRm), NoItinerary,
GPRnopc:$Rt, GPRnopc:$Rt2, c_imm:$CRm), NoItinerary,
!strconcat(opc, "\t$cop, $opc1, $Rt, $Rt2, $CRm"), pattern> {
let Inst{31-28} = 0b1111;
let Inst{23-21} = 0b010;
@ -4616,10 +4632,12 @@ class MovRRCopro2<string opc, bit direction, list<dag> pattern = []>
let Inst{11-8} = cop;
let Inst{7-4} = opc1;
let Inst{3-0} = CRm;
let DecoderMethod = "DecodeMRRC2";
}
def MCRR2 : MovRRCopro2<"mcrr2", 0 /* from ARM core register to coprocessor */,
[(int_arm_mcrr2 imm:$cop, imm:$opc1, GPR:$Rt, GPR:$Rt2,
[(int_arm_mcrr2 imm:$cop, imm:$opc1, GPRnopc:$Rt, GPRnopc:$Rt2,
imm:$CRm)]>;
def MRRC2 : MovRRCopro2<"mrrc2", 1 /* from coprocessor to ARM core register */>;
@ -4628,22 +4646,32 @@ def MRRC2 : MovRRCopro2<"mrrc2", 1 /* from coprocessor to ARM core register */>;
//
// Move to ARM core register from Special Register
def MRS : ABI<0b0001, (outs GPR:$Rd), (ins), NoItinerary,
def MRS : ABI<0b0001, (outs GPRnopc:$Rd), (ins), NoItinerary,
"mrs", "\t$Rd, apsr", []> {
bits<4> Rd;
let Inst{23-16} = 0b00001111;
let Unpredictable{19-17} = 0b111;
let Inst{15-12} = Rd;
let Inst{7-4} = 0b0000;
let Inst{11-0} = 0b000000000000;
let Unpredictable{11-0} = 0b110100001111;
}
def : InstAlias<"mrs${p} $Rd, cpsr", (MRS GPR:$Rd, pred:$p)>, Requires<[IsARM]>;
def : InstAlias<"mrs${p} $Rd, cpsr", (MRS GPRnopc:$Rd, pred:$p)>, Requires<[IsARM]>;
def MRSsys : ABI<0b0001, (outs GPR:$Rd), (ins), NoItinerary,
// The MRSsys instruction is the MRS instruction from the ARM ARM,
// section B9.3.9, with the R bit set to 1.
def MRSsys : ABI<0b0001, (outs GPRnopc:$Rd), (ins), NoItinerary,
"mrs", "\t$Rd, spsr", []> {
bits<4> Rd;
let Inst{23-16} = 0b01001111;
let Unpredictable{19-16} = 0b1111;
let Inst{15-12} = Rd;
let Inst{7-4} = 0b0000;
let Inst{11-0} = 0b000000000000;
let Unpredictable{11-0} = 0b110100001111;
}
// Move from ARM core register to Special Register

View File

@ -5634,6 +5634,7 @@ multiclass Lengthen_HalfSingle<string DestLanes, string DestTy, string SrcTy,
// extload, zextload and sextload for a lengthening load followed by another
// lengthening load, to quadruple the initial length.
//
// Lengthen_Double<"4", "i32", "i8", "8", "i16", "4", "i32", qsub_0> =
// Pat<(v4i32 (extloadvi8 addrmode5:$addr))
// (EXTRACT_SUBREG (VMOVLuv4i32
@ -5644,28 +5645,63 @@ multiclass Lengthen_HalfSingle<string DestLanes, string DestTy, string SrcTy,
// qsub_0)>;
multiclass Lengthen_Double<string DestLanes, string DestTy, string SrcTy,
string Insn1Lanes, string Insn1Ty, string Insn2Lanes,
string Insn2Ty, SubRegIndex RegType> {
string Insn2Ty> {
def _Any : Pat<(!cast<ValueType>("v" # DestLanes # DestTy)
(!cast<PatFrag>("extloadv" # SrcTy) addrmode5:$addr)),
(!cast<Instruction>("VMOVLuv" # Insn2Lanes # Insn2Ty)
(EXTRACT_SUBREG (!cast<Instruction>("VMOVLuv" # Insn1Lanes # Insn1Ty)
(INSERT_SUBREG (f64 (IMPLICIT_DEF)), (VLDRS addrmode5:$addr),
ssub_0)), dsub_0))>;
def _Z : Pat<(!cast<ValueType>("v" # DestLanes # DestTy)
(!cast<PatFrag>("zextloadv" # SrcTy) addrmode5:$addr)),
(!cast<Instruction>("VMOVLuv" # Insn2Lanes # Insn2Ty)
(EXTRACT_SUBREG (!cast<Instruction>("VMOVLuv" # Insn1Lanes # Insn1Ty)
(INSERT_SUBREG (f64 (IMPLICIT_DEF)), (VLDRS addrmode5:$addr),
ssub_0)), dsub_0))>;
def _S : Pat<(!cast<ValueType>("v" # DestLanes # DestTy)
(!cast<PatFrag>("sextloadv" # SrcTy) addrmode5:$addr)),
(!cast<Instruction>("VMOVLsv" # Insn2Lanes # Insn2Ty)
(EXTRACT_SUBREG (!cast<Instruction>("VMOVLsv" # Insn1Lanes # Insn1Ty)
(INSERT_SUBREG (f64 (IMPLICIT_DEF)), (VLDRS addrmode5:$addr),
ssub_0)), dsub_0))>;
}
// extload, zextload and sextload for a lengthening load followed by another
// lengthening load, to quadruple the initial length, but which ends up only
// requiring half the available lanes (a 64-bit outcome instead of a 128-bit).
//
// Lengthen_HalfDouble<"2", "i32", "i8", "8", "i16", "4", "i32"> =
// Pat<(v4i32 (extloadvi8 addrmode5:$addr))
// (EXTRACT_SUBREG (VMOVLuv4i32
// (EXTRACT_SUBREG (VMOVLuv8i16 (INSERT_SUBREG (f64 (IMPLICIT_DEF)),
// (VLDRS addrmode5:$addr),
// ssub_0)),
// dsub_0)),
// dsub_0)>;
multiclass Lengthen_HalfDouble<string DestLanes, string DestTy, string SrcTy,
string Insn1Lanes, string Insn1Ty, string Insn2Lanes,
string Insn2Ty> {
def _Any : Pat<(!cast<ValueType>("v" # DestLanes # DestTy)
(!cast<PatFrag>("extloadv" # SrcTy) addrmode5:$addr)),
(EXTRACT_SUBREG (!cast<Instruction>("VMOVLuv" # Insn2Lanes # Insn2Ty)
(EXTRACT_SUBREG (!cast<Instruction>("VMOVLuv" # Insn1Lanes # Insn1Ty)
(INSERT_SUBREG (f64 (IMPLICIT_DEF)), (VLDRS addrmode5:$addr),
ssub_0)), dsub_0)),
RegType)>;
dsub_0)>;
def _Z : Pat<(!cast<ValueType>("v" # DestLanes # DestTy)
(!cast<PatFrag>("zextloadv" # SrcTy) addrmode5:$addr)),
(EXTRACT_SUBREG (!cast<Instruction>("VMOVLuv" # Insn2Lanes # Insn2Ty)
(EXTRACT_SUBREG (!cast<Instruction>("VMOVLuv" # Insn1Lanes # Insn1Ty)
(INSERT_SUBREG (f64 (IMPLICIT_DEF)), (VLDRS addrmode5:$addr),
ssub_0)), dsub_0)),
RegType)>;
dsub_0)>;
def _S : Pat<(!cast<ValueType>("v" # DestLanes # DestTy)
(!cast<PatFrag>("sextloadv" # SrcTy) addrmode5:$addr)),
(EXTRACT_SUBREG (!cast<Instruction>("VMOVLsv" # Insn2Lanes # Insn2Ty)
(EXTRACT_SUBREG (!cast<Instruction>("VMOVLsv" # Insn1Lanes # Insn1Ty)
(INSERT_SUBREG (f64 (IMPLICIT_DEF)), (VLDRS addrmode5:$addr),
ssub_0)), dsub_0)),
RegType)>;
dsub_0)>;
}
defm : Lengthen_Single<"8", "i16", "i8">; // v8i8 -> v8i16
@ -5676,12 +5712,12 @@ defm : Lengthen_HalfSingle<"4", "i16", "i8", "8", "i16">; // v4i8 -> v4i16
defm : Lengthen_HalfSingle<"2", "i16", "i8", "8", "i16">; // v2i8 -> v2i16
defm : Lengthen_HalfSingle<"2", "i32", "i16", "4", "i32">; // v2i16 -> v2i32
// Double lengthening - v4i8 -> v4i16 -> v4i32
defm : Lengthen_Double<"4", "i32", "i8", "8", "i16", "4", "i32", qsub_0>;
// Double lengthening - v4i8 -> v4i16 -> v4i32
defm : Lengthen_Double<"4", "i32", "i8", "8", "i16", "4", "i32">;
// v2i8 -> v2i16 -> v2i32
defm : Lengthen_Double<"2", "i32", "i8", "8", "i16", "4", "i32", dsub_0>;
defm : Lengthen_HalfDouble<"2", "i32", "i8", "8", "i16", "4", "i32">;
// v2i16 -> v2i32 -> v2i64
defm : Lengthen_Double<"2", "i64", "i16", "4", "i32", "2", "i64", qsub_0>;
defm : Lengthen_Double<"2", "i64", "i16", "4", "i32", "2", "i64">;
// Triple lengthening - v2i8 -> v2i16 -> v2i32 -> v2i64
def : Pat<(v2i64 (extloadvi8 addrmode5:$addr)),
@ -5951,7 +5987,7 @@ def : NEONInstAlias<"vshl${p}.u32 $Vdn, $Vm",
def : NEONInstAlias<"vshl${p}.u64 $Vdn, $Vm",
(VSHLuv2i64 QPR:$Vdn, QPR:$Vdn, QPR:$Vm, pred:$p)>;
// VSHL (immediate) two-operand aliases.
// VSHR (immediate) two-operand aliases.
def : NEONInstAlias<"vshr${p}.s8 $Vdn, $imm",
(VSHRsv8i8 DPR:$Vdn, DPR:$Vdn, shr_imm8:$imm, pred:$p)>;
def : NEONInstAlias<"vshr${p}.s16 $Vdn, $imm",
@ -5988,6 +6024,41 @@ def : NEONInstAlias<"vshr${p}.u32 $Vdn, $imm",
def : NEONInstAlias<"vshr${p}.u64 $Vdn, $imm",
(VSHRuv2i64 QPR:$Vdn, QPR:$Vdn, shr_imm64:$imm, pred:$p)>;
// VRSHL two-operand aliases.
def : NEONInstAlias<"vrshl${p}.s8 $Vdn, $Vm",
(VRSHLsv8i8 DPR:$Vdn, DPR:$Vdn, DPR:$Vm, pred:$p)>;
def : NEONInstAlias<"vrshl${p}.s16 $Vdn, $Vm",
(VRSHLsv4i16 DPR:$Vdn, DPR:$Vdn, DPR:$Vm, pred:$p)>;
def : NEONInstAlias<"vrshl${p}.s32 $Vdn, $Vm",
(VRSHLsv2i32 DPR:$Vdn, DPR:$Vdn, DPR:$Vm, pred:$p)>;
def : NEONInstAlias<"vrshl${p}.s64 $Vdn, $Vm",
(VRSHLsv1i64 DPR:$Vdn, DPR:$Vdn, DPR:$Vm, pred:$p)>;
def : NEONInstAlias<"vrshl${p}.u8 $Vdn, $Vm",
(VRSHLuv8i8 DPR:$Vdn, DPR:$Vdn, DPR:$Vm, pred:$p)>;
def : NEONInstAlias<"vrshl${p}.u16 $Vdn, $Vm",
(VRSHLuv4i16 DPR:$Vdn, DPR:$Vdn, DPR:$Vm, pred:$p)>;
def : NEONInstAlias<"vrshl${p}.u32 $Vdn, $Vm",
(VRSHLuv2i32 DPR:$Vdn, DPR:$Vdn, DPR:$Vm, pred:$p)>;
def : NEONInstAlias<"vrshl${p}.u64 $Vdn, $Vm",
(VRSHLuv1i64 DPR:$Vdn, DPR:$Vdn, DPR:$Vm, pred:$p)>;
def : NEONInstAlias<"vrshl${p}.s8 $Vdn, $Vm",
(VRSHLsv16i8 QPR:$Vdn, QPR:$Vdn, QPR:$Vm, pred:$p)>;
def : NEONInstAlias<"vrshl${p}.s16 $Vdn, $Vm",
(VRSHLsv8i16 QPR:$Vdn, QPR:$Vdn, QPR:$Vm, pred:$p)>;
def : NEONInstAlias<"vrshl${p}.s32 $Vdn, $Vm",
(VRSHLsv4i32 QPR:$Vdn, QPR:$Vdn, QPR:$Vm, pred:$p)>;
def : NEONInstAlias<"vrshl${p}.s64 $Vdn, $Vm",
(VRSHLsv2i64 QPR:$Vdn, QPR:$Vdn, QPR:$Vm, pred:$p)>;
def : NEONInstAlias<"vrshl${p}.u8 $Vdn, $Vm",
(VRSHLuv16i8 QPR:$Vdn, QPR:$Vdn, QPR:$Vm, pred:$p)>;
def : NEONInstAlias<"vrshl${p}.u16 $Vdn, $Vm",
(VRSHLuv8i16 QPR:$Vdn, QPR:$Vdn, QPR:$Vm, pred:$p)>;
def : NEONInstAlias<"vrshl${p}.u32 $Vdn, $Vm",
(VRSHLuv4i32 QPR:$Vdn, QPR:$Vdn, QPR:$Vm, pred:$p)>;
def : NEONInstAlias<"vrshl${p}.u64 $Vdn, $Vm",
(VRSHLuv2i64 QPR:$Vdn, QPR:$Vdn, QPR:$Vm, pred:$p)>;
// VLD1 single-lane pseudo-instructions. These need special handling for
// the lane index that an InstAlias can't handle, so we use these instead.
def VLD1LNdAsm_8 : NEONDataTypeAsmPseudoInst<"vld1${p}", ".8", "$list, $addr",
@ -6951,6 +7022,100 @@ def : NEONInstAlias<"vsli${p}.32 $Vdm, $imm",
def : NEONInstAlias<"vsli${p}.64 $Vdm, $imm",
(VSLIv2i64 QPR:$Vdm, QPR:$Vdm, shr_imm64:$imm, pred:$p)>;
// Two-operand variants for VHSUB.
// Signed.
def : NEONInstAlias<"vhsub${p}.s8 $Vdn, $Vm",
(VHSUBsv8i8 DPR:$Vdn, DPR:$Vdn, DPR:$Vm, pred:$p)>;
def : NEONInstAlias<"vhsub${p}.s16 $Vdn, $Vm",
(VHSUBsv4i16 DPR:$Vdn, DPR:$Vdn, DPR:$Vm, pred:$p)>;
def : NEONInstAlias<"vhsub${p}.s32 $Vdn, $Vm",
(VHSUBsv2i32 DPR:$Vdn, DPR:$Vdn, DPR:$Vm, pred:$p)>;
def : NEONInstAlias<"vhsub${p}.s8 $Vdn, $Vm",
(VHSUBsv16i8 QPR:$Vdn, QPR:$Vdn, QPR:$Vm, pred:$p)>;
def : NEONInstAlias<"vhsub${p}.s16 $Vdn, $Vm",
(VHSUBsv8i16 QPR:$Vdn, QPR:$Vdn, QPR:$Vm, pred:$p)>;
def : NEONInstAlias<"vhsub${p}.s32 $Vdn, $Vm",
(VHSUBsv4i32 QPR:$Vdn, QPR:$Vdn, QPR:$Vm, pred:$p)>;
// Unsigned.
def : NEONInstAlias<"vhsub${p}.u8 $Vdn, $Vm",
(VHSUBuv8i8 DPR:$Vdn, DPR:$Vdn, DPR:$Vm, pred:$p)>;
def : NEONInstAlias<"vhsub${p}.u16 $Vdn, $Vm",
(VHSUBuv4i16 DPR:$Vdn, DPR:$Vdn, DPR:$Vm, pred:$p)>;
def : NEONInstAlias<"vhsub${p}.u32 $Vdn, $Vm",
(VHSUBuv2i32 DPR:$Vdn, DPR:$Vdn, DPR:$Vm, pred:$p)>;
def : NEONInstAlias<"vhsub${p}.u8 $Vdn, $Vm",
(VHSUBuv16i8 QPR:$Vdn, QPR:$Vdn, QPR:$Vm, pred:$p)>;
def : NEONInstAlias<"vhsub${p}.u16 $Vdn, $Vm",
(VHSUBuv8i16 QPR:$Vdn, QPR:$Vdn, QPR:$Vm, pred:$p)>;
def : NEONInstAlias<"vhsub${p}.u32 $Vdn, $Vm",
(VHSUBuv4i32 QPR:$Vdn, QPR:$Vdn, QPR:$Vm, pred:$p)>;
// Two-operand variants for VHADD.
// Signed.
def : NEONInstAlias<"vhadd${p}.s8 $Vdn, $Vm",
(VHADDsv8i8 DPR:$Vdn, DPR:$Vdn, DPR:$Vm, pred:$p)>;
def : NEONInstAlias<"vhadd${p}.s16 $Vdn, $Vm",
(VHADDsv4i16 DPR:$Vdn, DPR:$Vdn, DPR:$Vm, pred:$p)>;
def : NEONInstAlias<"vhadd${p}.s32 $Vdn, $Vm",
(VHADDsv2i32 DPR:$Vdn, DPR:$Vdn, DPR:$Vm, pred:$p)>;
def : NEONInstAlias<"vhadd${p}.s8 $Vdn, $Vm",
(VHADDsv16i8 QPR:$Vdn, QPR:$Vdn, QPR:$Vm, pred:$p)>;
def : NEONInstAlias<"vhadd${p}.s16 $Vdn, $Vm",
(VHADDsv8i16 QPR:$Vdn, QPR:$Vdn, QPR:$Vm, pred:$p)>;
def : NEONInstAlias<"vhadd${p}.s32 $Vdn, $Vm",
(VHADDsv4i32 QPR:$Vdn, QPR:$Vdn, QPR:$Vm, pred:$p)>;
// Unsigned.
def : NEONInstAlias<"vhadd${p}.u8 $Vdn, $Vm",
(VHADDuv8i8 DPR:$Vdn, DPR:$Vdn, DPR:$Vm, pred:$p)>;
def : NEONInstAlias<"vhadd${p}.u16 $Vdn, $Vm",
(VHADDuv4i16 DPR:$Vdn, DPR:$Vdn, DPR:$Vm, pred:$p)>;
def : NEONInstAlias<"vhadd${p}.u32 $Vdn, $Vm",
(VHADDuv2i32 DPR:$Vdn, DPR:$Vdn, DPR:$Vm, pred:$p)>;
def : NEONInstAlias<"vhadd${p}.u8 $Vdn, $Vm",
(VHADDuv16i8 QPR:$Vdn, QPR:$Vdn, QPR:$Vm, pred:$p)>;
def : NEONInstAlias<"vhadd${p}.u16 $Vdn, $Vm",
(VHADDuv8i16 QPR:$Vdn, QPR:$Vdn, QPR:$Vm, pred:$p)>;
def : NEONInstAlias<"vhadd${p}.u32 $Vdn, $Vm",
(VHADDuv4i32 QPR:$Vdn, QPR:$Vdn, QPR:$Vm, pred:$p)>;
// Two-operand variants for VRHADD.
// Signed.
def : NEONInstAlias<"vrhadd${p}.s8 $Vdn, $Rm",
(VRHADDsv8i8 DPR:$Vdn, DPR:$Vdn, DPR:$Rm, pred:$p)>;
def : NEONInstAlias<"vrhadd${p}.s16 $Vdn, $Rm",
(VRHADDsv4i16 DPR:$Vdn, DPR:$Vdn, DPR:$Rm, pred:$p)>;
def : NEONInstAlias<"vrhadd${p}.s32 $Vdn, $Rm",
(VRHADDsv2i32 DPR:$Vdn, DPR:$Vdn, DPR:$Rm, pred:$p)>;
def : NEONInstAlias<"vrhadd${p}.s8 $Vdn, $Rm",
(VRHADDsv16i8 QPR:$Vdn, QPR:$Vdn, QPR:$Rm, pred:$p)>;
def : NEONInstAlias<"vrhadd${p}.s16 $Vdn, $Rm",
(VRHADDsv8i16 QPR:$Vdn, QPR:$Vdn, QPR:$Rm, pred:$p)>;
def : NEONInstAlias<"vrhadd${p}.s32 $Vdn, $Rm",
(VRHADDsv4i32 QPR:$Vdn, QPR:$Vdn, QPR:$Rm, pred:$p)>;
// Unsigned.
def : NEONInstAlias<"vrhadd${p}.u8 $Vdn, $Rm",
(VRHADDuv8i8 DPR:$Vdn, DPR:$Vdn, DPR:$Rm, pred:$p)>;
def : NEONInstAlias<"vrhadd${p}.u16 $Vdn, $Rm",
(VRHADDuv4i16 DPR:$Vdn, DPR:$Vdn, DPR:$Rm, pred:$p)>;
def : NEONInstAlias<"vrhadd${p}.u32 $Vdn, $Rm",
(VRHADDuv2i32 DPR:$Vdn, DPR:$Vdn, DPR:$Rm, pred:$p)>;
def : NEONInstAlias<"vrhadd${p}.u8 $Vdn, $Rm",
(VRHADDuv16i8 QPR:$Vdn, QPR:$Vdn, QPR:$Rm, pred:$p)>;
def : NEONInstAlias<"vrhadd${p}.u16 $Vdn, $Rm",
(VRHADDuv8i16 QPR:$Vdn, QPR:$Vdn, QPR:$Rm, pred:$p)>;
def : NEONInstAlias<"vrhadd${p}.u32 $Vdn, $Rm",
(VRHADDuv4i32 QPR:$Vdn, QPR:$Vdn, QPR:$Rm, pred:$p)>;
// VSWP allows, but does not require, a type suffix.
defm : NEONDTAnyInstAlias<"vswp${p}", "$Vd, $Vm",
(VSWPd DPR:$Vd, DPR:$Vm, pred:$p)>;

View File

@ -136,22 +136,22 @@ TargetPassConfig *ARMBaseTargetMachine::createPassConfig(PassManagerBase &PM) {
bool ARMPassConfig::addPreISel() {
if (TM->getOptLevel() != CodeGenOpt::None && EnableGlobalMerge)
PM.add(createGlobalMergePass(TM->getTargetLowering()));
PM->add(createGlobalMergePass(TM->getTargetLowering()));
return false;
}
bool ARMPassConfig::addInstSelector() {
PM.add(createARMISelDag(getARMTargetMachine(), getOptLevel()));
PM->add(createARMISelDag(getARMTargetMachine(), getOptLevel()));
return false;
}
bool ARMPassConfig::addPreRegAlloc() {
// FIXME: temporarily disabling load / store optimization pass for Thumb1.
if (getOptLevel() != CodeGenOpt::None && !getARMSubtarget().isThumb1Only())
PM.add(createARMLoadStoreOptimizationPass(true));
PM->add(createARMLoadStoreOptimizationPass(true));
if (getOptLevel() != CodeGenOpt::None && getARMSubtarget().isCortexA9())
PM.add(createMLxExpansionPass());
PM->add(createMLxExpansionPass());
return true;
}
@ -159,23 +159,23 @@ bool ARMPassConfig::addPreSched2() {
// FIXME: temporarily disabling load / store optimization pass for Thumb1.
if (getOptLevel() != CodeGenOpt::None) {
if (!getARMSubtarget().isThumb1Only()) {
PM.add(createARMLoadStoreOptimizationPass());
PM->add(createARMLoadStoreOptimizationPass());
printAndVerify("After ARM load / store optimizer");
}
if (getARMSubtarget().hasNEON())
PM.add(createExecutionDependencyFixPass(&ARM::DPRRegClass));
PM->add(createExecutionDependencyFixPass(&ARM::DPRRegClass));
}
// Expand some pseudo instructions into multiple instructions to allow
// proper scheduling.
PM.add(createARMExpandPseudoPass());
PM->add(createARMExpandPseudoPass());
if (getOptLevel() != CodeGenOpt::None) {
if (!getARMSubtarget().isThumb1Only())
addPass(IfConverterID);
}
if (getARMSubtarget().isThumb2())
PM.add(createThumb2ITBlockPass());
PM->add(createThumb2ITBlockPass());
return true;
}
@ -183,13 +183,13 @@ bool ARMPassConfig::addPreSched2() {
bool ARMPassConfig::addPreEmitPass() {
if (getARMSubtarget().isThumb2()) {
if (!getARMSubtarget().prefers32BitThumb())
PM.add(createThumb2SizeReductionPass());
PM->add(createThumb2SizeReductionPass());
// Constant island pass work on unbundled instructions.
addPass(UnpackMachineBundlesID);
}
PM.add(createARMConstantIslandPass());
PM->add(createARMConstantIslandPass());
return true;
}

View File

@ -82,8 +82,14 @@ class ARMAsmParser : public MCTargetAsmParser {
MCAsmParser &getParser() const { return Parser; }
MCAsmLexer &getLexer() const { return Parser.getLexer(); }
void Warning(SMLoc L, const Twine &Msg) { Parser.Warning(L, Msg); }
bool Error(SMLoc L, const Twine &Msg) { return Parser.Error(L, Msg); }
bool Warning(SMLoc L, const Twine &Msg,
ArrayRef<SMRange> Ranges = ArrayRef<SMRange>()) {
return Parser.Warning(L, Msg, Ranges);
}
bool Error(SMLoc L, const Twine &Msg,
ArrayRef<SMRange> Ranges = ArrayRef<SMRange>()) {
return Parser.Error(L, Msg, Ranges);
}
int tryParseRegister();
bool tryParseRegisterWithWriteBack(SmallVectorImpl<MCParsedAsmOperand*> &);
@ -478,6 +484,8 @@ public:
/// getEndLoc - Get the location of the last token of this operand.
SMLoc getEndLoc() const { return EndLoc; }
SMRange getLocRange() const { return SMRange(StartLoc, EndLoc); }
ARMCC::CondCodes getCondCode() const {
assert(Kind == k_CondCode && "Invalid access!");
return CC.Val;
@ -4518,22 +4526,26 @@ bool ARMAsmParser::parseOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands,
case AsmToken::Dollar:
case AsmToken::Hash: {
// #42 -> immediate.
// TODO: ":lower16:" and ":upper16:" modifiers after # before immediate
S = Parser.getTok().getLoc();
Parser.Lex();
bool isNegative = Parser.getTok().is(AsmToken::Minus);
const MCExpr *ImmVal;
if (getParser().ParseExpression(ImmVal))
return true;
const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ImmVal);
if (CE) {
int32_t Val = CE->getValue();
if (isNegative && Val == 0)
ImmVal = MCConstantExpr::Create(INT32_MIN, getContext());
if (Parser.getTok().isNot(AsmToken::Colon)) {
bool isNegative = Parser.getTok().is(AsmToken::Minus);
const MCExpr *ImmVal;
if (getParser().ParseExpression(ImmVal))
return true;
const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ImmVal);
if (CE) {
int32_t Val = CE->getValue();
if (isNegative && Val == 0)
ImmVal = MCConstantExpr::Create(INT32_MIN, getContext());
}
E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
Operands.push_back(ARMOperand::CreateImm(ImmVal, S, E));
return false;
}
E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
Operands.push_back(ARMOperand::CreateImm(ImmVal, S, E));
return false;
// w/ a ':' after the '#', it's just like a plain ':'.
// FALLTHROUGH
}
case AsmToken::Colon: {
// ":lower16:" and ":upper16:" expression prefixes
@ -7321,7 +7333,8 @@ MatchAndEmitInstruction(SMLoc IDLoc,
return Error(ErrorLoc, "invalid operand for instruction");
}
case Match_MnemonicFail:
return Error(IDLoc, "invalid instruction");
return Error(IDLoc, "invalid instruction",
((ARMOperand*)Operands[0])->getLocRange());
case Match_ConversionFail:
// The converter function will have already emited a diagnostic.
return true;

View File

@ -326,6 +326,8 @@ static DecodeStatus DecodeT2ShifterImmOperand(MCInst &Inst, unsigned Val,
static DecodeStatus DecodeLDR(MCInst &Inst, unsigned Val,
uint64_t Address, const void *Decoder);
static DecodeStatus DecodeMRRC2(llvm::MCInst &Inst, unsigned Val,
uint64_t Address, const void *Decoder);
#include "ARMGenDisassemblerTables.inc"
#include "ARMGenInstrInfo.inc"
#include "ARMGenEDInfo.inc"
@ -2690,7 +2692,6 @@ static DecodeStatus DecodeVLD2DupInstruction(MCInst &Inst, unsigned Insn,
unsigned Rm = fieldFromInstruction32(Insn, 0, 4);
unsigned align = fieldFromInstruction32(Insn, 4, 1);
unsigned size = 1 << fieldFromInstruction32(Insn, 6, 2);
unsigned pred = fieldFromInstruction32(Insn, 22, 4);
align *= 2*size;
switch (Inst.getOpcode()) {
@ -2721,16 +2722,11 @@ static DecodeStatus DecodeVLD2DupInstruction(MCInst &Inst, unsigned Insn,
return MCDisassembler::Fail;
Inst.addOperand(MCOperand::CreateImm(align));
if (Rm == 0xD)
Inst.addOperand(MCOperand::CreateReg(0));
else if (Rm != 0xF) {
if (Rm != 0xD && Rm != 0xF) {
if (!Check(S, DecodeGPRRegisterClass(Inst, Rm, Address, Decoder)))
return MCDisassembler::Fail;
}
if (!Check(S, DecodePredicateOperand(Inst, pred, Address, Decoder)))
return MCDisassembler::Fail;
return S;
}
@ -4314,6 +4310,10 @@ static DecodeStatus DecodeSwap(MCInst &Inst, unsigned Insn,
return DecodeCPSInstruction(Inst, Insn, Address, Decoder);
DecodeStatus S = MCDisassembler::Success;
if (Rt == Rn || Rn == Rt2)
S = MCDisassembler::SoftFail;
if (!Check(S, DecodeGPRnopcRegisterClass(Inst, Rt, Address, Decoder)))
return MCDisassembler::Fail;
if (!Check(S, DecodeGPRnopcRegisterClass(Inst, Rt2, Address, Decoder)))
@ -4409,3 +4409,31 @@ static DecodeStatus DecodeLDR(MCInst &Inst, unsigned Val,
return S;
}
static DecodeStatus DecodeMRRC2(llvm::MCInst &Inst, unsigned Val,
uint64_t Address, const void *Decoder) {
DecodeStatus S = MCDisassembler::Success;
unsigned CRm = fieldFromInstruction32(Val, 0, 4);
unsigned opc1 = fieldFromInstruction32(Val, 4, 4);
unsigned cop = fieldFromInstruction32(Val, 8, 4);
unsigned Rt = fieldFromInstruction32(Val, 12, 4);
unsigned Rt2 = fieldFromInstruction32(Val, 16, 4);
if ((cop & ~0x1) == 0xa)
return MCDisassembler::Fail;
if (Rt == Rt2)
S = MCDisassembler::SoftFail;
Inst.addOperand(MCOperand::CreateImm(cop));
Inst.addOperand(MCOperand::CreateImm(opc1));
if (!Check(S, DecodeGPRnopcRegisterClass(Inst, Rt, Address, Decoder)))
return MCDisassembler::Fail;
if (!Check(S, DecodeGPRnopcRegisterClass(Inst, Rt2, Address, Decoder)))
return MCDisassembler::Fail;
Inst.addOperand(MCOperand::CreateImm(CRm));
return S;
}

View File

@ -209,12 +209,12 @@ void ARMInstPrinter::printOperand(const MCInst *MI, unsigned OpNo,
} else {
assert(Op.isExpr() && "unknown operand kind in printOperand");
// If a symbolic branch target was added as a constant expression then print
// that address in hex.
// that address in hex. And only print 32 unsigned bits for the address.
const MCConstantExpr *BranchTarget = dyn_cast<MCConstantExpr>(Op.getExpr());
int64_t Address;
if (BranchTarget && BranchTarget->EvaluateAsAbsolute(Address)) {
O << "0x";
O.write_hex(Address);
O.write_hex((uint32_t)Address);
}
else {
// Otherwise, just print the expression.

View File

@ -11,10 +11,6 @@
//
//===----------------------------------------------------------------------===//
/// CCIfSubtarget - Match if the current subtarget has a feature F.
class CCIfSubtarget<string F, CCAction A>
: CCIf<!strconcat("State.getTarget().getSubtarget<PPCSubtarget>().", F), A>;
//===----------------------------------------------------------------------===//
// Return Value Calling Convention
//===----------------------------------------------------------------------===//

View File

@ -72,7 +72,7 @@ TargetPassConfig *SPUTargetMachine::createPassConfig(PassManagerBase &PM) {
bool SPUPassConfig::addInstSelector() {
// Install an instruction selector.
PM.add(createSPUISelDag(getSPUTargetMachine()));
PM->add(createSPUISelDag(getSPUTargetMachine()));
return false;
}
@ -85,9 +85,9 @@ bool SPUPassConfig::addPreEmitPass() {
(BuilderFunc)(intptr_t)sys::DynamicLibrary::SearchForAddressOfSymbol(
"createTCESchedulerPass");
if (schedulerCreator != NULL)
PM.add(schedulerCreator("cellspu"));
PM->add(schedulerCreator("cellspu"));
//align instructions with nops/lnops for dual issue
PM.add(createSPUNopFillerPass(getSPUTargetMachine()));
PM->add(createSPUNopFillerPass(getSPUTargetMachine()));
return true;
}

View File

@ -40,7 +40,6 @@ namespace llvm {
FunctionPass *createHexagonHardwareLoops();
FunctionPass *createHexagonPeephole();
FunctionPass *createHexagonFixupHwLoops();
FunctionPass *createHexagonPacketizer();
/* TODO: object output.
MCCodeEmitter *createHexagonMCCodeEmitter(const Target &,

View File

@ -13,11 +13,11 @@
//
//===----------------------------------------------------------------------===//
#define DEBUG_TYPE "asm-printer"
#include "Hexagon.h"
#include "HexagonAsmPrinter.h"
#include "HexagonMachineFunctionInfo.h"
#include "HexagonMCInst.h"
#include "HexagonTargetMachine.h"
#include "HexagonSubtarget.h"
#include "InstPrinter/HexagonInstPrinter.h"
@ -54,7 +54,6 @@
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringExtras.h"
#include <map>
using namespace llvm;
@ -78,7 +77,8 @@ void HexagonAsmPrinter::printOperand(const MachineInstr *MI, unsigned OpNo,
const MachineOperand &MO = MI->getOperand(OpNo);
switch (MO.getType()) {
default: llvm_unreachable("<unknown operand type>");
default:
assert(0 && "<unknown operand type>");
case MachineOperand::MO_Register:
O << HexagonInstPrinter::getRegisterName(MO.getReg());
return;
@ -196,45 +196,10 @@ void HexagonAsmPrinter::printPredicateOperand(const MachineInstr *MI,
/// the current output stream.
///
void HexagonAsmPrinter::EmitInstruction(const MachineInstr *MI) {
if (MI->isBundle()) {
std::vector<const MachineInstr*> BundleMIs;
MCInst MCI;
const MachineBasicBlock *MBB = MI->getParent();
MachineBasicBlock::const_instr_iterator MII = MI;
++MII;
unsigned int IgnoreCount = 0;
while (MII != MBB->end() && MII->isInsideBundle()) {
const MachineInstr *MInst = MII;
if (MInst->getOpcode() == TargetOpcode::DBG_VALUE ||
MInst->getOpcode() == TargetOpcode::IMPLICIT_DEF) {
IgnoreCount++;
++MII;
continue;
}
//BundleMIs.push_back(&*MII);
BundleMIs.push_back(MInst);
++MII;
}
unsigned Size = BundleMIs.size();
assert((Size+IgnoreCount) == MI->getBundleSize() && "Corrupt Bundle!");
for (unsigned Index = 0; Index < Size; Index++) {
HexagonMCInst MCI;
MCI.setStartPacket(Index == 0);
MCI.setEndPacket(Index == (Size-1));
HexagonLowerToMC(BundleMIs[Index], MCI, *this);
OutStreamer.EmitInstruction(MCI);
}
}
else {
HexagonMCInst MCI;
if (MI->getOpcode() == Hexagon::ENDLOOP0) {
MCI.setStartPacket(true);
MCI.setEndPacket(true);
}
HexagonLowerToMC(MI, MCI, *this);
OutStreamer.EmitInstruction(MCI);
}
HexagonLowerToMC(MI, MCI, *this);
OutStreamer.EmitInstruction(MCI);
return;
}
@ -277,17 +242,17 @@ void HexagonAsmPrinter::printJumpTable(const MachineInstr *MI, int OpNo,
raw_ostream &O) {
const MachineOperand &MO = MI->getOperand(OpNo);
assert( (MO.getType() == MachineOperand::MO_JumpTableIndex) &&
"Expecting jump table index");
"Expecting jump table index");
// Hexagon_TODO: Do we need name mangling?
O << *GetJTISymbol(MO.getIndex());
}
void HexagonAsmPrinter::printConstantPool(const MachineInstr *MI, int OpNo,
raw_ostream &O) {
raw_ostream &O) {
const MachineOperand &MO = MI->getOperand(OpNo);
assert( (MO.getType() == MachineOperand::MO_ConstantPoolIndex) &&
"Expecting constant pool index");
"Expecting constant pool index");
// Hexagon_TODO: Do we need name mangling?
O << *GetCPISymbol(MO.getIndex());

View File

@ -32,9 +32,11 @@
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/SelectionDAGISel.h"
#include "llvm/CodeGen/ValueTypes.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/raw_ostream.h"
using namespace llvm;
const unsigned Hexagon_MAX_RET_SIZE = 64;

View File

@ -13,26 +13,13 @@
// *** Must match HexagonBaseInfo.h ***
//===----------------------------------------------------------------------===//
class Type<bits<5> t> {
bits<5> Value = t;
}
def TypePSEUDO : Type<0>;
def TypeALU32 : Type<1>;
def TypeCR : Type<2>;
def TypeJR : Type<3>;
def TypeJ : Type<4>;
def TypeLD : Type<5>;
def TypeST : Type<6>;
def TypeSYSTEM : Type<7>;
def TypeXTYPE : Type<8>;
def TypeMARKER : Type<31>;
//===----------------------------------------------------------------------===//
// Intruction Class Declaration +
//===----------------------------------------------------------------------===//
class InstHexagon<dag outs, dag ins, string asmstr, list<dag> pattern,
string cstr, InstrItinClass itin, Type type> : Instruction {
string cstr, InstrItinClass itin> : Instruction {
field bits<32> Inst;
let Namespace = "Hexagon";
@ -44,15 +31,11 @@ class InstHexagon<dag outs, dag ins, string asmstr, list<dag> pattern,
let Constraints = cstr;
let Itinerary = itin;
// *** Must match HexagonBaseInfo.h ***
Type HexagonType = type;
let TSFlags{4-0} = HexagonType.Value;
bits<1> isHexagonSolo = 0;
let TSFlags{5} = isHexagonSolo;
// *** The code below must match HexagonBaseInfo.h ***
// Predicated instructions.
bits<1> isPredicated = 0;
let TSFlags{6} = isPredicated;
let TSFlags{1} = isPredicated;
// *** The code above must match HexagonBaseInfo.h ***
}
@ -64,40 +47,28 @@ class InstHexagon<dag outs, dag ins, string asmstr, list<dag> pattern,
// LD Instruction Class in V2/V3/V4.
// Definition of the instruction class NOT CHANGED.
class LDInst<dag outs, dag ins, string asmstr, list<dag> pattern>
: InstHexagon<outs, ins, asmstr, pattern, "", LD, TypeLD> {
: InstHexagon<outs, ins, asmstr, pattern, "", LD> {
bits<5> rd;
bits<5> rs;
bits<13> imm13;
let mayLoad = 1;
}
// LD Instruction Class in V2/V3/V4.
// Definition of the instruction class NOT CHANGED.
class LDInstPost<dag outs, dag ins, string asmstr, list<dag> pattern,
string cstr>
: InstHexagon<outs, ins, asmstr, pattern, cstr, LD, TypeLD> {
: InstHexagon<outs, ins, asmstr, pattern, cstr, LD> {
bits<5> rd;
bits<5> rs;
bits<5> rt;
bits<13> imm13;
let mayLoad = 1;
}
// ST Instruction Class in V2/V3 can take SLOT0 only.
// ST Instruction Class in V4 can take SLOT0 & SLOT1.
// Definition of the instruction class CHANGED from V2/V3 to V4.
class STInst<dag outs, dag ins, string asmstr, list<dag> pattern>
: InstHexagon<outs, ins, asmstr, pattern, "", ST, TypeST> {
bits<5> rd;
bits<5> rs;
bits<13> imm13;
let mayStore = 1;
}
// SYSTEM Instruction Class in V4 can take SLOT0 only
// In V2/V3 we used ST for this but in v4 ST can take SLOT0 or SLOT1.
class SYSInst<dag outs, dag ins, string asmstr, list<dag> pattern>
: InstHexagon<outs, ins, asmstr, pattern, "", SYS, TypeSYSTEM> {
: InstHexagon<outs, ins, asmstr, pattern, "", ST> {
bits<5> rd;
bits<5> rs;
bits<13> imm13;
@ -108,18 +79,17 @@ class SYSInst<dag outs, dag ins, string asmstr, list<dag> pattern>
// Definition of the instruction class CHANGED from V2/V3 to V4.
class STInstPost<dag outs, dag ins, string asmstr, list<dag> pattern,
string cstr>
: InstHexagon<outs, ins, asmstr, pattern, cstr, ST, TypeST> {
: InstHexagon<outs, ins, asmstr, pattern, cstr, ST> {
bits<5> rd;
bits<5> rs;
bits<5> rt;
bits<13> imm13;
let mayStore = 1;
}
// ALU32 Instruction Class in V2/V3/V4.
// Definition of the instruction class NOT CHANGED.
class ALU32Type<dag outs, dag ins, string asmstr, list<dag> pattern>
: InstHexagon<outs, ins, asmstr, pattern, "", ALU32, TypeALU32> {
: InstHexagon<outs, ins, asmstr, pattern, "", ALU32> {
bits<5> rd;
bits<5> rs;
bits<5> rt;
@ -132,17 +102,7 @@ class ALU32Type<dag outs, dag ins, string asmstr, list<dag> pattern>
// Definition of the instruction class NOT CHANGED.
// Name of the Instruction Class changed from ALU64 to XTYPE from V2/V3 to V4.
class ALU64Type<dag outs, dag ins, string asmstr, list<dag> pattern>
: InstHexagon<outs, ins, asmstr, pattern, "", ALU64, TypeXTYPE> {
bits<5> rd;
bits<5> rs;
bits<5> rt;
bits<16> imm16;
bits<16> imm16_2;
}
class ALU64_acc<dag outs, dag ins, string asmstr, list<dag> pattern,
string cstr>
: InstHexagon<outs, ins, asmstr, pattern, cstr, ALU64, TypeXTYPE> {
: InstHexagon<outs, ins, asmstr, pattern, "", ALU64> {
bits<5> rd;
bits<5> rs;
bits<5> rt;
@ -155,7 +115,7 @@ class ALU64_acc<dag outs, dag ins, string asmstr, list<dag> pattern,
// Definition of the instruction class NOT CHANGED.
// Name of the Instruction Class changed from M to XTYPE from V2/V3 to V4.
class MInst<dag outs, dag ins, string asmstr, list<dag> pattern>
: InstHexagon<outs, ins, asmstr, pattern, "", M, TypeXTYPE> {
: InstHexagon<outs, ins, asmstr, pattern, "", M> {
bits<5> rd;
bits<5> rs;
bits<5> rt;
@ -166,8 +126,8 @@ class MInst<dag outs, dag ins, string asmstr, list<dag> pattern>
// Definition of the instruction class NOT CHANGED.
// Name of the Instruction Class changed from M to XTYPE from V2/V3 to V4.
class MInst_acc<dag outs, dag ins, string asmstr, list<dag> pattern,
string cstr>
: InstHexagon<outs, ins, asmstr, pattern, cstr, M, TypeXTYPE> {
string cstr>
: InstHexagon<outs, ins, asmstr, pattern, cstr, M> {
bits<5> rd;
bits<5> rs;
bits<5> rt;
@ -178,7 +138,9 @@ class MInst_acc<dag outs, dag ins, string asmstr, list<dag> pattern,
// Definition of the instruction class NOT CHANGED.
// Name of the Instruction Class changed from S to XTYPE from V2/V3 to V4.
class SInst<dag outs, dag ins, string asmstr, list<dag> pattern>
: InstHexagon<outs, ins, asmstr, pattern, "", S, TypeXTYPE> {
//: InstHexagon<outs, ins, asmstr, pattern, cstr, !if(V4T, XTYPE_V4, M)> {
: InstHexagon<outs, ins, asmstr, pattern, "", S> {
// : InstHexagon<outs, ins, asmstr, pattern, "", S> {
bits<5> rd;
bits<5> rs;
bits<5> rt;
@ -189,8 +151,8 @@ class SInst<dag outs, dag ins, string asmstr, list<dag> pattern>
// Definition of the instruction class NOT CHANGED.
// Name of the Instruction Class changed from S to XTYPE from V2/V3 to V4.
class SInst_acc<dag outs, dag ins, string asmstr, list<dag> pattern,
string cstr>
: InstHexagon<outs, ins, asmstr, pattern, cstr, S, TypeXTYPE> {
string cstr>
: InstHexagon<outs, ins, asmstr, pattern, cstr, S> {
// : InstHexagon<outs, ins, asmstr, pattern, cstr, S> {
// : InstHexagon<outs, ins, asmstr, pattern, cstr, !if(V4T, XTYPE_V4, S)> {
bits<5> rd;
@ -201,14 +163,14 @@ class SInst_acc<dag outs, dag ins, string asmstr, list<dag> pattern,
// J Instruction Class in V2/V3/V4.
// Definition of the instruction class NOT CHANGED.
class JType<dag outs, dag ins, string asmstr, list<dag> pattern>
: InstHexagon<outs, ins, asmstr, pattern, "", J, TypeJ> {
: InstHexagon<outs, ins, asmstr, pattern, "", J> {
bits<16> imm16;
}
// JR Instruction Class in V2/V3/V4.
// Definition of the instruction class NOT CHANGED.
class JRType<dag outs, dag ins, string asmstr, list<dag> pattern>
: InstHexagon<outs, ins, asmstr, pattern, "", JR, TypeJR> {
: InstHexagon<outs, ins, asmstr, pattern, "", JR> {
bits<5> rs;
bits<5> pu; // Predicate register
}
@ -216,22 +178,15 @@ class JRType<dag outs, dag ins, string asmstr, list<dag> pattern>
// CR Instruction Class in V2/V3/V4.
// Definition of the instruction class NOT CHANGED.
class CRInst<dag outs, dag ins, string asmstr, list<dag> pattern>
: InstHexagon<outs, ins, asmstr, pattern, "", CR, TypeCR> {
: InstHexagon<outs, ins, asmstr, pattern, "", CR> {
bits<5> rs;
bits<10> imm10;
}
class Marker<dag outs, dag ins, string asmstr, list<dag> pattern>
: InstHexagon<outs, ins, asmstr, pattern, "", MARKER, TypeMARKER> {
let isCodeGenOnly = 1;
let isPseudo = 1;
}
class Pseudo<dag outs, dag ins, string asmstr, list<dag> pattern>
: InstHexagon<outs, ins, asmstr, pattern, "", PSEUDO, TypePSEUDO> {
let isCodeGenOnly = 1;
let isPseudo = 1;
}
: InstHexagon<outs, ins, asmstr, pattern, "", PSEUDO>;
//===----------------------------------------------------------------------===//
// Intruction Classes Definitions -
@ -267,11 +222,6 @@ class ALU64_rr<dag outs, dag ins, string asmstr, list<dag> pattern>
: ALU64Type<outs, ins, asmstr, pattern> {
}
class ALU64_ri<dag outs, dag ins, string asmstr, list<dag> pattern>
: ALU64Type<outs, ins, asmstr, pattern> {
let rt{0-4} = 0;
}
// J Type Instructions.
class JInst<dag outs, dag ins, string asmstr, list<dag> pattern>
: JType<outs, ins, asmstr, pattern> {
@ -287,14 +237,12 @@ class JRInst<dag outs, dag ins, string asmstr, list<dag> pattern>
class STInstPI<dag outs, dag ins, string asmstr, list<dag> pattern, string cstr>
: STInstPost<outs, ins, asmstr, pattern, cstr> {
let rt{0-4} = 0;
let mayStore = 1;
}
// Post increment LD Instruction.
class LDInstPI<dag outs, dag ins, string asmstr, list<dag> pattern, string cstr>
: LDInstPost<outs, ins, asmstr, pattern, cstr> {
let rt{0-4} = 0;
let mayLoad = 1;
}
//===----------------------------------------------------------------------===//

View File

@ -11,25 +11,11 @@
//
//===----------------------------------------------------------------------===//
//----------------------------------------------------------------------------//
// Hexagon Intruction Flags +
//
// *** Must match BaseInfo.h ***
//----------------------------------------------------------------------------//
def TypeMEMOP : Type<9>;
def TypeNV : Type<10>;
def TypePREFIX : Type<30>;
//----------------------------------------------------------------------------//
// Intruction Classes Definitions +
//----------------------------------------------------------------------------//
//
// NV type instructions.
//
class NVInst_V4<dag outs, dag ins, string asmstr, list<dag> pattern>
: InstHexagon<outs, ins, asmstr, pattern, "", NV_V4, TypeNV> {
: InstHexagon<outs, ins, asmstr, pattern, "", NV_V4> {
bits<5> rd;
bits<5> rs;
bits<13> imm13;
@ -38,7 +24,7 @@ class NVInst_V4<dag outs, dag ins, string asmstr, list<dag> pattern>
// Definition of Post increment new value store.
class NVInstPost_V4<dag outs, dag ins, string asmstr, list<dag> pattern,
string cstr>
: InstHexagon<outs, ins, asmstr, pattern, cstr, NV_V4, TypeNV> {
: InstHexagon<outs, ins, asmstr, pattern, cstr, NV_V4> {
bits<5> rd;
bits<5> rs;
bits<5> rt;
@ -53,15 +39,8 @@ class NVInstPI_V4<dag outs, dag ins, string asmstr, list<dag> pattern,
}
class MEMInst_V4<dag outs, dag ins, string asmstr, list<dag> pattern>
: InstHexagon<outs, ins, asmstr, pattern, "", MEM_V4, TypeMEMOP> {
: InstHexagon<outs, ins, asmstr, pattern, "", MEM_V4> {
bits<5> rd;
bits<5> rs;
bits<6> imm6;
}
class Immext<dag outs, dag ins, string asmstr, list<dag> pattern>
: InstHexagon<outs, ins, asmstr, pattern, "", PREFIX, TypePREFIX> {
let isCodeGenOnly = 1;
bits<26> imm26;
}

File diff suppressed because it is too large Load Diff

View File

@ -160,20 +160,10 @@ public:
bool isS8_Immediate(const int value) const;
bool isS6_Immediate(const int value) const;
bool isSaveCalleeSavedRegsCall(const MachineInstr* MI) const;
bool isConditionalTransfer(const MachineInstr* MI) const;
bool isConditionalALU32 (const MachineInstr* MI) const;
bool isConditionalLoad (const MachineInstr* MI) const;
bool isConditionalStore(const MachineInstr* MI) const;
bool isDeallocRet(const MachineInstr *MI) const;
unsigned getInvertedPredicatedOpcode(const int Opc) const;
bool isExtendable(const MachineInstr* MI) const;
bool isExtended(const MachineInstr* MI) const;
bool isPostIncrement(const MachineInstr* MI) const;
bool isNewValueStore(const MachineInstr* MI) const;
bool isNewValueJump(const MachineInstr* MI) const;
unsigned getImmExtForm(const MachineInstr* MI) const;
unsigned getNormalBranchForm(const MachineInstr* MI) const;
private:
int getMatchingCondBranchOpcode(int Opc, bool sense) const;

View File

@ -3046,7 +3046,3 @@ include "HexagonInstrInfoV3.td"
//===----------------------------------------------------------------------===//
include "HexagonInstrInfoV4.td"
//===----------------------------------------------------------------------===//
// V4 Instructions -
//===----------------------------------------------------------------------===//

View File

@ -41,11 +41,10 @@ let isCall = 1, neverHasSideEffects = 1,
}
// Jump to address from register
// if(p?.new) jumpr:t r?
let isReturn = 1, isTerminator = 1, isBarrier = 1,
Defs = [PC], Uses = [R31] in {
def JMPR_cdnPt_V3: JRInst<(outs), (ins PredRegs:$src1, IntRegs:$src2),
def JMPR_cPnewt: JRInst<(outs), (ins PredRegs:$src1, IntRegs:$src2),
"if ($src1.new) jumpr:t $src2",
[]>, Requires<[HasV3T]>;
}
@ -53,7 +52,7 @@ let isReturn = 1, isTerminator = 1, isBarrier = 1,
// if (!p?.new) jumpr:t r?
let isReturn = 1, isTerminator = 1, isBarrier = 1,
Defs = [PC], Uses = [R31] in {
def JMPR_cdnNotPt_V3: JRInst<(outs), (ins PredRegs:$src1, IntRegs:$src2),
def JMPR_cNotPnewt: JRInst<(outs), (ins PredRegs:$src1, IntRegs:$src2),
"if (!$src1.new) jumpr:t $src2",
[]>, Requires<[HasV3T]>;
}
@ -62,7 +61,7 @@ let isReturn = 1, isTerminator = 1, isBarrier = 1,
// if(p?.new) jumpr:nt r?
let isReturn = 1, isTerminator = 1, isBarrier = 1,
Defs = [PC], Uses = [R31] in {
def JMPR_cdnPnt: JRInst<(outs), (ins PredRegs:$src1, IntRegs:$src2),
def JMPR_cPnewNt: JRInst<(outs), (ins PredRegs:$src1, IntRegs:$src2),
"if ($src1.new) jumpr:nt $src2",
[]>, Requires<[HasV3T]>;
}
@ -70,7 +69,7 @@ let isReturn = 1, isTerminator = 1, isBarrier = 1,
// if (!p?.new) jumpr:nt r?
let isReturn = 1, isTerminator = 1, isBarrier = 1,
Defs = [PC], Uses = [R31] in {
def JMPR_cdnNotPnt: JRInst<(outs), (ins PredRegs:$src1, IntRegs:$src2),
def JMPR_cNotPnewNt: JRInst<(outs), (ins PredRegs:$src1, IntRegs:$src2),
"if (!$src1.new) jumpr:nt $src2",
[]>, Requires<[HasV3T]>;
}
@ -87,22 +86,20 @@ let AddedComplexity = 200 in
def MAXw_dd : ALU64_rr<(outs DoubleRegs:$dst), (ins DoubleRegs:$src1,
DoubleRegs:$src2),
"$dst = max($src2, $src1)",
[(set (i64 DoubleRegs:$dst),
(i64 (select (i1 (setlt (i64 DoubleRegs:$src2),
(i64 DoubleRegs:$src1))),
(i64 DoubleRegs:$src1),
(i64 DoubleRegs:$src2))))]>,
[(set DoubleRegs:$dst, (select (i1 (setlt DoubleRegs:$src2,
DoubleRegs:$src1)),
DoubleRegs:$src1,
DoubleRegs:$src2))]>,
Requires<[HasV3T]>;
let AddedComplexity = 200 in
def MINw_dd : ALU64_rr<(outs DoubleRegs:$dst), (ins DoubleRegs:$src1,
DoubleRegs:$src2),
"$dst = min($src2, $src1)",
[(set (i64 DoubleRegs:$dst),
(i64 (select (i1 (setgt (i64 DoubleRegs:$src2),
(i64 DoubleRegs:$src1))),
(i64 DoubleRegs:$src1),
(i64 DoubleRegs:$src2))))]>,
[(set DoubleRegs:$dst, (select (i1 (setgt DoubleRegs:$src2,
DoubleRegs:$src1)),
DoubleRegs:$src1,
DoubleRegs:$src2))]>,
Requires<[HasV3T]>;
//===----------------------------------------------------------------------===//
@ -112,25 +109,25 @@ Requires<[HasV3T]>;
//def : Pat <(brcond (i1 (seteq (i32 IntRegs:$src1), 0)), bb:$offset),
// (JMP_RegEzt (i32 IntRegs:$src1), bb:$offset)>, Requires<[HasV3T]>;
//def : Pat <(brcond (i1 (seteq IntRegs:$src1, 0)), bb:$offset),
// (JMP_RegEzt IntRegs:$src1, bb:$offset)>, Requires<[HasV3T]>;
//def : Pat <(brcond (i1 (setne (i32 IntRegs:$src1), 0)), bb:$offset),
// (JMP_RegNzt (i32 IntRegs:$src1), bb:$offset)>, Requires<[HasV3T]>;
//def : Pat <(brcond (i1 (setne IntRegs:$src1, 0)), bb:$offset),
// (JMP_RegNzt IntRegs:$src1, bb:$offset)>, Requires<[HasV3T]>;
//def : Pat <(brcond (i1 (setle (i32 IntRegs:$src1), 0)), bb:$offset),
// (JMP_RegLezt (i32 IntRegs:$src1), bb:$offset)>, Requires<[HasV3T]>;
//def : Pat <(brcond (i1 (setle IntRegs:$src1, 0)), bb:$offset),
// (JMP_RegLezt IntRegs:$src1, bb:$offset)>, Requires<[HasV3T]>;
//def : Pat <(brcond (i1 (setge (i32 IntRegs:$src1), 0)), bb:$offset),
// (JMP_RegGezt (i32 IntRegs:$src1), bb:$offset)>, Requires<[HasV3T]>;
//def : Pat <(brcond (i1 (setge IntRegs:$src1, 0)), bb:$offset),
// (JMP_RegGezt IntRegs:$src1, bb:$offset)>, Requires<[HasV3T]>;
//def : Pat <(brcond (i1 (setgt (i32 IntRegs:$src1), -1)), bb:$offset),
// (JMP_RegGezt (i32 IntRegs:$src1), bb:$offset)>, Requires<[HasV3T]>;
//def : Pat <(brcond (i1 (setgt IntRegs:$src1, -1)), bb:$offset),
// (JMP_RegGezt IntRegs:$src1, bb:$offset)>, Requires<[HasV3T]>;
// Map call instruction
def : Pat<(call (i32 IntRegs:$dst)),
(CALLRv3 (i32 IntRegs:$dst))>, Requires<[HasV3T]>;
def : Pat<(call IntRegs:$dst),
(CALLRv3 IntRegs:$dst)>, Requires<[HasV3T]>;
def : Pat<(call tglobaladdr:$dst),
(CALLv3 tglobaladdr:$dst)>, Requires<[HasV3T]>;
def : Pat<(call texternalsym:$dst),

File diff suppressed because it is too large Load Diff

View File

@ -1,41 +0,0 @@
//===- HexagonMCInst.h - Hexagon sub-class of MCInst ----------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This class extends MCInst to allow some VLIW annotation.
//
//===----------------------------------------------------------------------===//
#ifndef HEXAGONMCINST_H
#define HEXAGONMCINST_H
#include "llvm/MC/MCInst.h"
#include "llvm/CodeGen/MachineInstr.h"
namespace llvm {
class HexagonMCInst: public MCInst {
// Packet start and end markers
unsigned startPacket: 1, endPacket: 1;
const MachineInstr *MachineI;
public:
explicit HexagonMCInst(): MCInst(),
startPacket(0), endPacket(0) {}
const MachineInstr* getMI() const { return MachineI; };
void setMI(const MachineInstr *MI) { MachineI = MI; };
bool isStartPacket() const { return (startPacket); };
bool isEndPacket() const { return (endPacket); };
void setStartPacket(bool yes) { startPacket = yes; };
void setEndPacket(bool yes) { endPacket = yes; };
};
}
#endif

View File

@ -49,7 +49,7 @@ void llvm::HexagonLowerToMC(const MachineInstr* MI, MCInst& MCI,
switch (MO.getType()) {
default:
MI->dump();
llvm_unreachable("unknown operand type");
assert(0 && "unknown operand type");
case MachineOperand::MO_Register:
// Ignore all implicit register operands.
if (MO.isImplicit()) continue;

View File

@ -13,6 +13,7 @@ def LSUNIT : FuncUnit;
def MUNIT : FuncUnit;
def SUNIT : FuncUnit;
// Itinerary classes
def ALU32 : InstrItinClass;
def ALU64 : InstrItinClass;
@ -23,25 +24,23 @@ def LD : InstrItinClass;
def M : InstrItinClass;
def ST : InstrItinClass;
def S : InstrItinClass;
def SYS : InstrItinClass;
def MARKER : InstrItinClass;
def PSEUDO : InstrItinClass;
def HexagonItineraries :
ProcessorItineraries<[LUNIT, LSUNIT, MUNIT, SUNIT], [], [
InstrItinData<ALU32 , [InstrStage<1, [LUNIT, LSUNIT, MUNIT, SUNIT]>]>,
InstrItinData<ALU64 , [InstrStage<1, [MUNIT, SUNIT]>]>,
InstrItinData<CR , [InstrStage<1, [SUNIT]>]>,
InstrItinData<J , [InstrStage<1, [SUNIT, MUNIT]>]>,
InstrItinData<JR , [InstrStage<1, [MUNIT]>]>,
InstrItinData<LD , [InstrStage<1, [LUNIT, LSUNIT]>]>,
InstrItinData<M , [InstrStage<1, [MUNIT, SUNIT]>]>,
InstrItinData<ST , [InstrStage<1, [LSUNIT]>]>,
InstrItinData<S , [InstrStage<1, [SUNIT, MUNIT]>]>,
InstrItinData<SYS , [InstrStage<1, [LSUNIT]>]>,
InstrItinData<MARKER , [InstrStage<1, [LUNIT, LSUNIT, MUNIT, SUNIT]>]>,
InstrItinData<PSEUDO , [InstrStage<1, [LUNIT, LSUNIT, MUNIT, SUNIT]>]>
]>;
ProcessorItineraries<[LUNIT, LSUNIT, MUNIT, SUNIT], [], [
InstrItinData<ALU32 , [InstrStage<1, [LUNIT, LSUNIT, MUNIT, SUNIT]>]>,
InstrItinData<ALU64 , [InstrStage<1, [MUNIT, SUNIT]>]>,
InstrItinData<CR , [InstrStage<1, [SUNIT]>]>,
InstrItinData<J , [InstrStage<1, [SUNIT, MUNIT]>]>,
InstrItinData<JR , [InstrStage<1, [MUNIT]>]>,
InstrItinData<LD , [InstrStage<1, [LUNIT, LSUNIT]>]>,
InstrItinData<M , [InstrStage<1, [MUNIT, SUNIT]>]>,
InstrItinData<ST , [InstrStage<1, [LSUNIT]>]>,
InstrItinData<S , [InstrStage<1, [SUNIT, MUNIT]>]>,
InstrItinData<PSEUDO , [InstrStage<1, [LUNIT, LSUNIT, MUNIT, SUNIT]>]>
]>;
//===----------------------------------------------------------------------===//
// V4 Machine Info +

View File

@ -23,6 +23,7 @@
// | SLOT3 | XTYPE ALU32 J CR |
// |===========|==================================================|
// Functional Units.
def SLOT0 : FuncUnit;
def SLOT1 : FuncUnit;
@ -33,26 +34,22 @@ def SLOT3 : FuncUnit;
def NV_V4 : InstrItinClass;
def MEM_V4 : InstrItinClass;
// ALU64/M/S Instruction classes of V2 are collectively knownn as XTYPE in V4.
def PREFIX : InstrItinClass;
def HexagonItinerariesV4 :
ProcessorItineraries<[SLOT0, SLOT1, SLOT2, SLOT3], [], [
InstrItinData<ALU32 , [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>]>,
InstrItinData<ALU64 , [InstrStage<1, [SLOT2, SLOT3]>]>,
InstrItinData<CR , [InstrStage<1, [SLOT3]>]>,
InstrItinData<J , [InstrStage<1, [SLOT2, SLOT3]>]>,
InstrItinData<JR , [InstrStage<1, [SLOT2]>]>,
InstrItinData<LD , [InstrStage<1, [SLOT0, SLOT1]>]>,
InstrItinData<M , [InstrStage<1, [SLOT2, SLOT3]>]>,
InstrItinData<ST , [InstrStage<1, [SLOT0, SLOT1]>]>,
InstrItinData<S , [InstrStage<1, [SLOT2, SLOT3]>]>,
InstrItinData<SYS , [InstrStage<1, [SLOT0]>]>,
InstrItinData<NV_V4 , [InstrStage<1, [SLOT0]>]>,
InstrItinData<MEM_V4 , [InstrStage<1, [SLOT0]>]>,
InstrItinData<MARKER , [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>]>,
InstrItinData<PREFIX , [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>]>,
InstrItinData<PSEUDO , [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>]>
]>;
def HexagonItinerariesV4 : ProcessorItineraries<
[SLOT0, SLOT1, SLOT2, SLOT3], [], [
InstrItinData<LD , [InstrStage<1, [SLOT0, SLOT1]>]>,
InstrItinData<ST , [InstrStage<1, [SLOT0, SLOT1]>]>,
InstrItinData<ALU32 , [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>]>,
InstrItinData<NV_V4 , [InstrStage<1, [SLOT0]>]>,
InstrItinData<MEM_V4 , [InstrStage<1, [SLOT0]>]>,
InstrItinData<J , [InstrStage<1, [SLOT2, SLOT3]>]>,
InstrItinData<JR , [InstrStage<1, [SLOT2]>]>,
InstrItinData<CR , [InstrStage<1, [SLOT3]>]>,
InstrItinData<PSEUDO , [InstrStage<1, [SLOT0, SLOT1, SLOT2, SLOT3]>]>,
InstrItinData<ALU64 , [InstrStage<1, [SLOT2, SLOT3]>]>,
InstrItinData<M , [InstrStage<1, [SLOT2, SLOT3]>]>,
InstrItinData<S , [InstrStage<1, [SLOT2, SLOT3]>]>
]>;
//===----------------------------------------------------------------------===//
// Hexagon V4 Resource Definitions -

View File

@ -100,23 +100,23 @@ TargetPassConfig *HexagonTargetMachine::createPassConfig(PassManagerBase &PM) {
}
bool HexagonPassConfig::addInstSelector() {
PM.add(createHexagonRemoveExtendOps(getHexagonTargetMachine()));
PM.add(createHexagonISelDag(getHexagonTargetMachine()));
PM.add(createHexagonPeephole());
PM->add(createHexagonRemoveExtendOps(getHexagonTargetMachine()));
PM->add(createHexagonISelDag(getHexagonTargetMachine()));
PM->add(createHexagonPeephole());
return false;
}
bool HexagonPassConfig::addPreRegAlloc() {
if (!DisableHardwareLoops) {
PM.add(createHexagonHardwareLoops());
PM->add(createHexagonHardwareLoops());
}
return false;
}
bool HexagonPassConfig::addPostRegAlloc() {
PM.add(createHexagonCFGOptimizer(getHexagonTargetMachine()));
PM->add(createHexagonCFGOptimizer(getHexagonTargetMachine()));
return true;
}
@ -129,17 +129,14 @@ bool HexagonPassConfig::addPreSched2() {
bool HexagonPassConfig::addPreEmitPass() {
if (!DisableHardwareLoops) {
PM.add(createHexagonFixupHwLoops());
PM->add(createHexagonFixupHwLoops());
}
// Expand Spill code for predicate registers.
PM.add(createHexagonExpandPredSpillCode(getHexagonTargetMachine()));
PM->add(createHexagonExpandPredSpillCode(getHexagonTargetMachine()));
// Split up TFRcondsets into conditional transfers.
PM.add(createHexagonSplitTFRCondSets(getHexagonTargetMachine()));
// Create Packets.
PM.add(createHexagonPacketizer());
PM->add(createHexagonSplitTFRCondSets(getHexagonTargetMachine()));
return false;
}

File diff suppressed because it is too large Load Diff

View File

@ -15,7 +15,6 @@
#include "Hexagon.h"
#include "HexagonAsmPrinter.h"
#include "HexagonInstPrinter.h"
#include "HexagonMCInst.h"
#include "llvm/MC/MCInst.h"
#include "llvm/MC/MCAsmInfo.h"
#include "llvm/MC/MCExpr.h"
@ -38,50 +37,20 @@ StringRef HexagonInstPrinter::getRegName(unsigned RegNo) const {
void HexagonInstPrinter::printInst(const MCInst *MI, raw_ostream &O,
StringRef Annot) {
printInst((const HexagonMCInst*)(MI), O, Annot);
}
void HexagonInstPrinter::printInst(const HexagonMCInst *MI, raw_ostream &O,
StringRef Annot) {
const char packetPadding[] = " ";
const char startPacket = '{',
endPacket = '}';
// TODO: add outer HW loop when it's supported too.
if (MI->getOpcode() == Hexagon::ENDLOOP0) {
// Ending a harware loop is different from ending an regular packet.
assert(MI->isEndPacket() && "Loop end must also end the packet");
MCInst Nop;
if (MI->isStartPacket()) {
// There must be a packet to end a loop.
// FIXME: when shuffling is always run, this shouldn't be needed.
HexagonMCInst Nop;
StringRef NoAnnot;
Nop.setOpcode (Hexagon::NOP);
Nop.setStartPacket (MI->isStartPacket());
printInst (&Nop, O, NoAnnot);
}
// Close the packet.
if (MI->isEndPacket())
O << packetPadding << endPacket;
printInstruction(MI, O);
}
else {
// Prefix the insn opening the packet.
if (MI->isStartPacket())
O << packetPadding << startPacket << '\n';
printInstruction(MI, O);
// Suffix the insn closing the packet.
if (MI->isEndPacket())
// Suffix the packet in a new line always, since the GNU assembler has
// issues with a closing brace on the same line as CONST{32,64}.
O << '\n' << packetPadding << endPacket;
O << packetPadding << startPacket << '\n';
Nop.setOpcode(Hexagon::NOP);
printInstruction(&Nop, O);
O << packetPadding << endPacket;
}
printInstruction(MI, O);
printAnnotation(O, Annot);
}
@ -100,18 +69,18 @@ void HexagonInstPrinter::printOperand(const MCInst *MI, unsigned OpNo,
}
}
void HexagonInstPrinter::printImmOperand(const MCInst *MI, unsigned OpNo,
raw_ostream &O) const {
void HexagonInstPrinter::printImmOperand
(const MCInst *MI, unsigned OpNo, raw_ostream &O) const {
O << MI->getOperand(OpNo).getImm();
}
void HexagonInstPrinter::printExtOperand(const MCInst *MI, unsigned OpNo,
raw_ostream &O) const {
raw_ostream &O) const {
O << MI->getOperand(OpNo).getImm();
}
void HexagonInstPrinter::printUnsignedImmOperand(const MCInst *MI, unsigned OpNo,
raw_ostream &O) const {
void HexagonInstPrinter::printUnsignedImmOperand
(const MCInst *MI, unsigned OpNo, raw_ostream &O) const {
O << MI->getOperand(OpNo).getImm();
}
@ -120,13 +89,13 @@ void HexagonInstPrinter::printNegImmOperand(const MCInst *MI, unsigned OpNo,
O << -MI->getOperand(OpNo).getImm();
}
void HexagonInstPrinter::printNOneImmOperand(const MCInst *MI, unsigned OpNo,
raw_ostream &O) const {
void HexagonInstPrinter::printNOneImmOperand
(const MCInst *MI, unsigned OpNo, raw_ostream &O) const {
O << -1;
}
void HexagonInstPrinter::printMEMriOperand(const MCInst *MI, unsigned OpNo,
raw_ostream &O) const {
void HexagonInstPrinter::printMEMriOperand
(const MCInst *MI, unsigned OpNo, raw_ostream &O) const {
const MCOperand& MO0 = MI->getOperand(OpNo);
const MCOperand& MO1 = MI->getOperand(OpNo + 1);
@ -134,8 +103,8 @@ void HexagonInstPrinter::printMEMriOperand(const MCInst *MI, unsigned OpNo,
O << " + #" << MO1.getImm();
}
void HexagonInstPrinter::printFrameIndexOperand(const MCInst *MI, unsigned OpNo,
raw_ostream &O) const {
void HexagonInstPrinter::printFrameIndexOperand
(const MCInst *MI, unsigned OpNo, raw_ostream &O) const {
const MCOperand& MO0 = MI->getOperand(OpNo);
const MCOperand& MO1 = MI->getOperand(OpNo + 1);

View File

@ -14,7 +14,6 @@
#ifndef HEXAGONINSTPRINTER_H
#define HEXAGONINSTPRINTER_H
#include "HexagonMCInst.h"
#include "llvm/MC/MCInstPrinter.h"
namespace llvm {
@ -26,7 +25,6 @@ namespace llvm {
: MCInstPrinter(MAI, MII, MRI) {}
virtual void printInst(const MCInst *MI, raw_ostream &O, StringRef Annot);
void printInst(const HexagonMCInst *MI, raw_ostream &O, StringRef Annot);
virtual StringRef getOpcodeName(unsigned Opcode) const;
void printInstruction(const MCInst *MI, raw_ostream &O);
StringRef getRegName(unsigned RegNo) const;

View File

@ -23,41 +23,14 @@ namespace llvm {
/// instruction info tracks.
///
namespace HexagonII {
// *** The code below must match HexagonInstrFormat*.td *** //
// Insn types.
// *** Must match HexagonInstrFormat*.td ***
enum Type {
TypePSEUDO = 0,
TypeALU32 = 1,
TypeCR = 2,
TypeJR = 3,
TypeJ = 4,
TypeLD = 5,
TypeST = 6,
TypeSYSTEM = 7,
TypeXTYPE = 8,
TypeMEMOP = 9,
TypeNV = 10,
TypePREFIX = 30, // Such as extenders.
TypeMARKER = 31 // Such as end of a HW loop.
};
// MCInstrDesc TSFlags
// *** Must match HexagonInstrFormat*.td ***
enum {
// This 5-bit field describes the insn type.
TypePos = 0,
TypeMask = 0x1f,
// Solo instructions.
SoloPos = 5,
SoloMask = 0x1,
// Predicated instructions.
PredicatedPos = 6,
PredicatedPos = 1,
PredicatedMask = 0x1
};

View File

@ -9,10 +9,6 @@
// This describes the calling conventions for MBlaze architecture.
//===----------------------------------------------------------------------===//
/// CCIfSubtarget - Match if the current subtarget has a feature F.
class CCIfSubtarget<string F, CCAction A>:
CCIf<!strconcat("State.getTarget().getSubtarget<MBlazeSubtarget>().", F), A>;
//===----------------------------------------------------------------------===//
// MBlaze ABI Calling Convention
//===----------------------------------------------------------------------===//

View File

@ -68,7 +68,7 @@ TargetPassConfig *MBlazeTargetMachine::createPassConfig(PassManagerBase &PM) {
// Install an instruction selector pass using
// the ISelDag to gen MBlaze code.
bool MBlazePassConfig::addInstSelector() {
PM.add(createMBlazeISelDag(getMBlazeTargetMachine()));
PM->add(createMBlazeISelDag(getMBlazeTargetMachine()));
return false;
}
@ -76,6 +76,6 @@ bool MBlazePassConfig::addInstSelector() {
// machine code is emitted. return true if -print-machineinstrs should
// print out the code after the passes.
bool MBlazePassConfig::addPreEmitPass() {
PM.add(createMBlazeDelaySlotFillerPass(getMBlazeTargetMachine()));
PM->add(createMBlazeDelaySlotFillerPass(getMBlazeTargetMachine()));
return true;
}

View File

@ -60,12 +60,12 @@ TargetPassConfig *MSP430TargetMachine::createPassConfig(PassManagerBase &PM) {
bool MSP430PassConfig::addInstSelector() {
// Install an instruction selector.
PM.add(createMSP430ISelDag(getMSP430TargetMachine(), getOptLevel()));
PM->add(createMSP430ISelDag(getMSP430TargetMachine(), getOptLevel()));
return false;
}
bool MSP430PassConfig::addPreEmitPass() {
// Must run branch selection immediately preceding the asm printer.
PM.add(createMSP430BranchSelectionPass());
PM->add(createMSP430BranchSelectionPass());
return false;
}

Some files were not shown because too many files have changed in this diff Show More