Vendor import of llvm trunk r302418:

https://llvm.org/svn/llvm-project/llvm/trunk@302418
This commit is contained in:
Dimitry Andric 2017-05-08 17:12:57 +00:00
parent 148779df30
commit c46e6a5940
398 changed files with 33171 additions and 20895 deletions

View File

@ -38,6 +38,13 @@ B
**BB Vectorization**
Basic-Block Vectorization
**BDCE**
Bit-tracking dead code elimination. Some bit-wise instructions (shifts,
ands, ors, etc.) "kill" some of their input bits -- that is, they make it
such that those bits can be either zero or one without affecting control or
data flow of a program. The BDCE pass removes instructions that only
compute these dead bits.
**BURS**
Bottom Up Rewriting System --- A method of instruction selection for code
generation. An example is the `BURG

View File

@ -78,6 +78,8 @@ Simplifying MIR files
The MIR code coming out of ``-stop-after``/``-stop-before`` is very verbose;
Tests are more accessible and future proof when simplified:
- Use the ``-simplify-mir`` option with llc.
- Machine function attributes often have default values or the test works just
as well with default values. Typical candidates for this are: `alignment:`,
`exposesReturnsTwice`, `legalized`, `regBankSelected`, `selected`.

View File

@ -1092,7 +1092,7 @@ Function *FunctionAST::codegen() {
TheFunction->eraseFromParent();
if (P.isBinaryOp())
BinopPrecedence.erase(Proto->getOperatorName());
BinopPrecedence.erase(P.getOperatorName());
return nullptr;
}

View File

@ -1092,7 +1092,7 @@ Function *FunctionAST::codegen() {
TheFunction->eraseFromParent();
if (P.isBinaryOp())
BinopPrecedence.erase(Proto->getOperatorName());
BinopPrecedence.erase(P.getOperatorName());
return nullptr;
}

View File

@ -1092,7 +1092,7 @@ Function *FunctionAST::codegen() {
TheFunction->eraseFromParent();
if (P.isBinaryOp())
BinopPrecedence.erase(Proto->getOperatorName());
BinopPrecedence.erase(P.getOperatorName());
return nullptr;
}

View File

@ -932,7 +932,7 @@ Function *FunctionAST::codegen() {
TheFunction->eraseFromParent();
if (P.isBinaryOp())
BinopPrecedence.erase(Proto->getOperatorName());
BinopPrecedence.erase(P.getOperatorName());
return nullptr;
}

View File

@ -1099,7 +1099,7 @@ Function *FunctionAST::codegen() {
TheFunction->eraseFromParent();
if (P.isBinaryOp())
BinopPrecedence.erase(Proto->getOperatorName());
BinopPrecedence.erase(P.getOperatorName());
return nullptr;
}

View File

@ -1097,7 +1097,7 @@ Function *FunctionAST::codegen() {
TheFunction->eraseFromParent();
if (P.isBinaryOp())
BinopPrecedence.erase(Proto->getOperatorName());
BinopPrecedence.erase(P.getOperatorName());
return nullptr;
}

View File

@ -842,6 +842,7 @@ public:
///
/// \returns *this
APInt &operator*=(const APInt &RHS);
APInt &operator*=(uint64_t RHS);
/// \brief Addition assignment operator.
///
@ -2043,6 +2044,16 @@ inline APInt operator-(uint64_t LHS, APInt b) {
return b;
}
inline APInt operator*(APInt a, uint64_t RHS) {
a *= RHS;
return a;
}
inline APInt operator*(uint64_t LHS, APInt b) {
b *= LHS;
return b;
}
namespace APIntOps {

View File

@ -217,7 +217,7 @@ public:
unsigned BitPos = Prev % BITWORD_SIZE;
BitWord Copy = Bits[WordPos];
// Mask off previous bits.
Copy &= ~0UL << BitPos;
Copy &= maskTrailingZeros<BitWord>(BitPos);
if (Copy != 0)
return WordPos * BITWORD_SIZE + countTrailingZeros(Copy);
@ -229,7 +229,7 @@ public:
return -1;
}
/// find_next_unset - Returns the index of the next usnet bit following the
/// find_next_unset - Returns the index of the next unset bit following the
/// "Prev" bit. Returns -1 if all remaining bits are set.
int find_next_unset(unsigned Prev) const {
++Prev;
@ -253,7 +253,34 @@ public:
return -1;
}
/// clear - Clear all bits.
/// find_prev - Returns the index of the first set bit that precedes the
/// the bit at \p PriorTo. Returns -1 if all previous bits are unset.
int find_prev(unsigned PriorTo) {
if (PriorTo == 0)
return -1;
--PriorTo;
unsigned WordPos = PriorTo / BITWORD_SIZE;
unsigned BitPos = PriorTo % BITWORD_SIZE;
BitWord Copy = Bits[WordPos];
// Mask off next bits.
Copy &= maskTrailingOnes<BitWord>(BitPos + 1);
if (Copy != 0)
return (WordPos + 1) * BITWORD_SIZE - countLeadingZeros(Copy) - 1;
// Check previous words.
for (unsigned i = 1; i <= WordPos; ++i) {
unsigned Index = WordPos - i;
if (Bits[Index] == 0)
continue;
return (Index + 1) * BITWORD_SIZE - countLeadingZeros(Bits[Index]) - 1;
}
return -1;
}
/// clear - Removes all bits from the bitvector. Does not change capacity.
void clear() {
Size = 0;
}

View File

@ -278,6 +278,24 @@ public:
return getPointer()->find_next_unset(Prev);
}
/// find_prev - Returns the index of the first set bit that precedes the
/// the bit at \p PriorTo. Returns -1 if all previous bits are unset.
int find_prev(unsigned PriorTo) const {
if (isSmall()) {
if (PriorTo == 0)
return -1;
--PriorTo;
uintptr_t Bits = getSmallBits();
Bits &= maskTrailingOnes<uintptr_t>(PriorTo + 1);
if (Bits == 0)
return -1;
return NumBaseBits - countLeadingZeros(Bits) - 1;
}
return getPointer()->find_prev(PriorTo);
}
/// Clear all bits.
void clear() {
if (!isSmall())

View File

@ -220,8 +220,8 @@ void LoopBase<BlockT, LoopT>::verifyLoop() const {
BI = df_ext_begin(getHeader(), VisitSet),
BE = df_ext_end(getHeader(), VisitSet);
// Keep track of the number of BBs visited.
unsigned NumVisited = 0;
// Keep track of the BBs visited.
SmallPtrSet<BlockT*, 8> VisitedBBs;
// Check the individual blocks.
for ( ; BI != BE; ++BI) {
@ -259,10 +259,18 @@ void LoopBase<BlockT, LoopT>::verifyLoop() const {
assert(BB != &getHeader()->getParent()->front() &&
"Loop contains function entry block!");
NumVisited++;
VisitedBBs.insert(BB);
}
assert(NumVisited == getNumBlocks() && "Unreachable block in loop");
if (VisitedBBs.size() != getNumBlocks()) {
dbgs() << "The following blocks are unreachable in the loop: ";
for (auto BB : Blocks) {
if (!VisitedBBs.count(BB)) {
dbgs() << *BB << "\n";
}
}
assert(false && "Unreachable block in loop");
}
// Check the subloops.
for (iterator I = begin(), E = end(); I != E; ++I)

View File

@ -54,6 +54,18 @@ public:
ProfileSummaryInfo(Module &M) : M(M) {}
ProfileSummaryInfo(ProfileSummaryInfo &&Arg)
: M(Arg.M), Summary(std::move(Arg.Summary)) {}
/// Handle the invalidation of this information.
///
/// When used as a result of \c ProfileSummaryAnalysis this method will be
/// called when the module this was computed for changes. Since profile
/// summary is immutable after it is annotated on the module, we return false
/// here.
bool invalidate(Module &, const PreservedAnalyses &,
ModuleAnalysisManager::Invalidator &) {
return false;
}
/// Returns the profile count for \p CallInst.
static Optional<uint64_t> getProfileCount(const Instruction *CallInst,
BlockFrequencyInfo *BFI);

View File

@ -782,13 +782,13 @@ private:
/// Set the memoized range for the given SCEV.
const ConstantRange &setRange(const SCEV *S, RangeSignHint Hint,
const ConstantRange &CR) {
ConstantRange &&CR) {
DenseMap<const SCEV *, ConstantRange> &Cache =
Hint == HINT_RANGE_UNSIGNED ? UnsignedRanges : SignedRanges;
auto Pair = Cache.insert({S, CR});
auto Pair = Cache.try_emplace(S, std::move(CR));
if (!Pair.second)
Pair.first->second = CR;
Pair.first->second = std::move(CR);
return Pair.first->second;
}
@ -816,6 +816,10 @@ private:
/// Helper function called from createNodeForPHI.
const SCEV *createAddRecFromPHI(PHINode *PN);
/// A helper function for createAddRecFromPHI to handle simple cases.
const SCEV *createSimpleAffineAddRec(PHINode *PN, Value *BEValueV,
Value *StartValueV);
/// Helper function called from createNodeForPHI.
const SCEV *createNodeFromSelectLikePHI(PHINode *PN);
@ -1565,7 +1569,7 @@ public:
/// delinearization).
void findArrayDimensions(SmallVectorImpl<const SCEV *> &Terms,
SmallVectorImpl<const SCEV *> &Sizes,
const SCEV *ElementSize) const;
const SCEV *ElementSize);
void print(raw_ostream &OS) const;
void verify() const;

View File

@ -1115,6 +1115,9 @@ TLI_DEFINE_STRING_INTERNAL("vsprintf")
/// int vsscanf(const char *s, const char *format, va_list arg);
TLI_DEFINE_ENUM_INTERNAL(vsscanf)
TLI_DEFINE_STRING_INTERNAL("vsscanf")
/// size_t wcslen (const wchar_t* wcs);
TLI_DEFINE_ENUM_INTERNAL(wcslen)
TLI_DEFINE_STRING_INTERNAL("wcslen")
/// ssize_t write(int fildes, const void *buf, size_t nbyte);
TLI_DEFINE_ENUM_INTERNAL(write)
TLI_DEFINE_STRING_INTERNAL("write")

View File

@ -226,6 +226,7 @@ public:
FUNCTION_EXIT = 1,
TAIL_CALL = 2,
LOG_ARGS_ENTER = 3,
CUSTOM_EVENT = 4,
};
// The table will contain these structs that point to the sled, the function
@ -242,7 +243,7 @@ public:
};
// All the sleds to be emitted.
std::vector<XRayFunctionEntry> Sleds;
SmallVector<XRayFunctionEntry, 4> Sleds;
// Helper function to record a given XRay sled.
void recordSled(MCSymbol *Sled, const MachineInstr &MI, SledKind Kind);

View File

@ -506,6 +506,7 @@ protected:
bool selectCast(const User *I, unsigned Opcode);
bool selectExtractValue(const User *I);
bool selectInsertValue(const User *I);
bool selectXRayCustomEvent(const CallInst *II);
private:
/// \brief Handle PHI nodes in successor blocks.

View File

@ -249,7 +249,7 @@ public:
void AddLiveOutRegInfo(unsigned Reg, unsigned NumSignBits,
const KnownBits &Known) {
// Only install this information if it tells us something.
if (NumSignBits == 1 && Known.Zero == 0 && Known.One == 0)
if (NumSignBits == 1 && Known.isUnknown())
return;
LiveOutRegInfo.grow(Reg);

View File

@ -78,7 +78,7 @@ private:
/// this function.
DenseMap<const AllocaInst *, int> FrameIndices;
/// Methods for translating form LLVM IR to MachineInstr.
/// \name Methods for translating form LLVM IR to MachineInstr.
/// \see ::translate for general information on the translate methods.
/// @{

View File

@ -45,7 +45,7 @@ class MachineIRBuilder {
/// Debug location to be set to any instruction we create.
DebugLoc DL;
/// Fields describing the insertion point.
/// \name Fields describing the insertion point.
/// @{
MachineBasicBlock *MBB;
MachineBasicBlock::iterator II;
@ -84,7 +84,7 @@ public:
void setInsertPt(MachineBasicBlock &MBB, MachineBasicBlock::iterator II);
/// @}
/// Setters for the insertion point.
/// \name Setters for the insertion point.
/// @{
/// Set the MachineFunction where to build instructions.
void setMF(MachineFunction &);
@ -98,7 +98,7 @@ public:
void setInstr(MachineInstr &MI);
/// @}
/// Control where instructions we create are recorded (typically for
/// \name Control where instructions we create are recorded (typically for
/// visiting again later during legalization).
/// @{
void recordInsertions(std::function<void(MachineInstr *)> InsertedInstr);

View File

@ -309,7 +309,7 @@ public:
Impossible
};
/// Convenient types for a list of insertion points.
/// \name Convenient types for a list of insertion points.
/// @{
typedef SmallVector<std::unique_ptr<InsertPoint>, 2> InsertionPoints;
typedef InsertionPoints::iterator insertpt_iterator;
@ -341,7 +341,7 @@ public:
const TargetRegisterInfo &TRI, Pass &P,
RepairingKind Kind = RepairingKind::Insert);
/// Getters.
/// \name Getters.
/// @{
RepairingKind getKind() const { return Kind; }
unsigned getOpIdx() const { return OpIdx; }
@ -349,7 +349,7 @@ public:
bool hasSplit() { return HasSplit; }
/// @}
/// Overloaded methods to add an insertion point.
/// \name Overloaded methods to add an insertion point.
/// @{
/// Add a MBBInsertionPoint to the list of InsertPoints.
void addInsertPoint(MachineBasicBlock &MBB, bool Beginning);
@ -362,7 +362,7 @@ public:
void addInsertPoint(InsertPoint &Point);
/// @}
/// Accessors related to the insertion points.
/// \name Accessors related to the insertion points.
/// @{
insertpt_iterator begin() { return InsertPoints.begin(); }
insertpt_iterator end() { return InsertPoints.end(); }
@ -561,7 +561,7 @@ private:
/// Find the best mapping for \p MI from \p PossibleMappings.
/// \return a reference on the best mapping in \p PossibleMappings.
RegisterBankInfo::InstructionMapping &
const RegisterBankInfo::InstructionMapping &
findBestMapping(MachineInstr &MI,
RegisterBankInfo::InstructionMappings &PossibleMappings,
SmallVectorImpl<RepairingPlacement> &RepairPts);

View File

@ -264,7 +264,7 @@ public:
/// Convenient type to represent the alternatives for mapping an
/// instruction.
/// \todo When we move to TableGen this should be an array ref.
typedef SmallVector<InstructionMapping, 4> InstructionMappings;
typedef SmallVector<const InstructionMapping *, 4> InstructionMappings;
/// Helper class used to get/create the virtual registers that will be used
/// to replace the MachineOperand when applying a mapping.
@ -310,7 +310,7 @@ public:
OperandsMapper(MachineInstr &MI, const InstructionMapping &InstrMapping,
MachineRegisterInfo &MRI);
/// Getters.
/// \name Getters.
/// @{
/// The MachineInstr being remapped.
MachineInstr &getMI() const { return MI; }
@ -378,15 +378,23 @@ protected:
/// Keep dynamically allocated PartialMapping in a separate map.
/// This shouldn't be needed when everything gets TableGen'ed.
mutable DenseMap<unsigned, std::unique_ptr<const PartialMapping>> MapOfPartialMappings;
mutable DenseMap<unsigned, std::unique_ptr<const PartialMapping>>
MapOfPartialMappings;
/// Keep dynamically allocated ValueMapping in a separate map.
/// This shouldn't be needed when everything gets TableGen'ed.
mutable DenseMap<unsigned, std::unique_ptr<const ValueMapping> > MapOfValueMappings;
mutable DenseMap<unsigned, std::unique_ptr<const ValueMapping>>
MapOfValueMappings;
/// Keep dynamically allocated array of ValueMapping in a separate map.
/// This shouldn't be needed when everything gets TableGen'ed.
mutable DenseMap<unsigned, std::unique_ptr<ValueMapping[]>> MapOfOperandsMappings;
mutable DenseMap<unsigned, std::unique_ptr<ValueMapping[]>>
MapOfOperandsMappings;
/// Keep dynamically allocated InstructionMapping in a separate map.
/// This shouldn't be needed when everything gets TableGen'ed.
mutable DenseMap<unsigned, std::unique_ptr<const InstructionMapping>>
MapOfInstructionMappings;
/// Create a RegisterBankInfo that can accomodate up to \p NumRegBanks
/// RegisterBank instances.
@ -425,14 +433,14 @@ protected:
/// register, a register class, or a register bank.
/// In other words, this method will likely fail to find a mapping for
/// any generic opcode that has not been lowered by target specific code.
InstructionMapping getInstrMappingImpl(const MachineInstr &MI) const;
const InstructionMapping &getInstrMappingImpl(const MachineInstr &MI) const;
/// Get the uniquely generated PartialMapping for the
/// given arguments.
const PartialMapping &getPartialMapping(unsigned StartIdx, unsigned Length,
const RegisterBank &RegBank) const;
/// Methods to get a uniquely generated ValueMapping.
/// \name Methods to get a uniquely generated ValueMapping.
/// @{
/// The most common ValueMapping consists of a single PartialMapping.
@ -445,7 +453,7 @@ protected:
unsigned NumBreakDowns) const;
/// @}
/// Methods to get a uniquely generated array of ValueMapping.
/// \name Methods to get a uniquely generated array of ValueMapping.
/// @{
/// Get the uniquely generated array of ValueMapping for the
@ -478,6 +486,33 @@ protected:
std::initializer_list<const ValueMapping *> OpdsMapping) const;
/// @}
/// \name Methods to get a uniquely generated InstructionMapping.
/// @{
private:
/// Method to get a uniquely generated InstructionMapping.
const InstructionMapping &
getInstructionMappingImpl(bool IsInvalid, unsigned ID = InvalidMappingID,
unsigned Cost = 0,
const ValueMapping *OperandsMapping = nullptr,
unsigned NumOperands = 0) const;
public:
/// Method to get a uniquely generated InstructionMapping.
const InstructionMapping &
getInstructionMapping(unsigned ID, unsigned Cost,
const ValueMapping *OperandsMapping,
unsigned NumOperands) const {
return getInstructionMappingImpl(/*IsInvalid*/ false, ID, Cost,
OperandsMapping, NumOperands);
}
/// Method to get a uniquely generated invalid InstructionMapping.
const InstructionMapping &getInvalidInstructionMapping() const {
return getInstructionMappingImpl(/*IsInvalid*/ true);
}
/// @}
/// Get the register bank for the \p OpIdx-th operand of \p MI form
/// the encoding constraints, if any.
///
@ -603,7 +638,8 @@ public:
///
/// \note If returnedVal does not verify MI, this would probably mean
/// that the target does not support that instruction.
virtual InstructionMapping getInstrMapping(const MachineInstr &MI) const;
virtual const InstructionMapping &
getInstrMapping(const MachineInstr &MI) const;
/// Get the alternative mappings for \p MI.
/// Alternative in the sense different from getInstrMapping.

View File

@ -17,9 +17,11 @@
namespace llvm {
class MachineBasicBlock;
class MachineFunction;
class Module;
class raw_ostream;
template <typename T> class SmallVectorImpl;
/// Print LLVM IR using the MIR serialization format to the given output stream.
void printMIR(raw_ostream &OS, const Module &M);
@ -28,6 +30,17 @@ void printMIR(raw_ostream &OS, const Module &M);
/// output stream.
void printMIR(raw_ostream &OS, const MachineFunction &MF);
/// Determine a possible list of successors of a basic block based on the
/// basic block machine operand being used inside the block. This should give
/// you the correct list of successor blocks in most cases except for things
/// like jump tables where the basic block references can't easily be found.
/// The MIRPRinter will skip printing successors if they match the result of
/// this funciton and the parser will use this function to construct a list if
/// it is missing.
void guessSuccessors(const MachineBasicBlock &MBB,
SmallVectorImpl<MachineBasicBlock*> &Successors,
bool &IsFallthrough);
} // end namespace llvm
#endif

View File

@ -520,6 +520,14 @@ public:
bool hasTailCall() const { return HasTailCall; }
void setHasTailCall() { HasTailCall = true; }
/// Computes the maximum size of a callframe and the AdjustsStack property.
/// This only works for targets defining
/// TargetInstrInfo::getCallFrameSetupOpcode(), getCallFrameDestroyOpcode(),
/// and getFrameSize().
/// This is usually computed by the prologue epilogue inserter but some
/// targets may call this to compute it earlier.
void computeMaxCallFrameSize(const MachineFunction &MF);
/// Return the maximum size of a call frame that must be
/// allocated for an outgoing function call. This is only available if
/// CallFrameSetup/Destroy pseudo instructions are used by the target, and

View File

@ -116,7 +116,7 @@ class MachineModuleInfo : public ImmutablePass {
// TODO: Ideally, what we'd like is to have a switch that allows emitting
// synchronous (precise at call-sites only) CFA into .eh_frame. However,
// even under this switch, we'd like .debug_frame to be precise when using.
// even under this switch, we'd like .debug_frame to be precise when using
// -g. At this moment, there's no way to specify that some CFI directives
// go into .eh_frame only, while others go into .debug_frame only.

View File

@ -21,7 +21,7 @@ namespace llvm {
namespace codeview {
class TypeDatabase {
public:
TypeDatabase() : TypeNameStorage(Allocator) {}
explicit TypeDatabase(uint32_t ExpectedSize);
/// Gets the type index for the next type record.
TypeIndex getNextTypeIndex() const;

View File

@ -310,6 +310,11 @@ class DWARFContextInMemory : public DWARFContext {
StringRef *MapSectionToMember(StringRef Name);
/// If Sec is compressed section, decompresses and updates its contents
/// provided by Data. Otherwise leaves it unchanged.
Error maybeDecompress(const object::SectionRef &Sec, StringRef Name,
StringRef &Data);
public:
DWARFContextInMemory(const object::ObjectFile &Obj,
const LoadedObjectInfo *L = nullptr);

View File

@ -39,20 +39,18 @@ public:
private:
struct ValueType {
ValueType() {
uval = 0;
}
ValueType() { uval = 0; }
union {
uint64_t uval;
int64_t sval;
const char* cstr;
const char *cstr;
};
const uint8_t* data = nullptr;
const uint8_t *data = nullptr;
};
dwarf::Form Form; // Form for this value.
ValueType Value; // Contains all data for the form.
dwarf::Form Form; // Form for this value.
ValueType Value; // Contains all data for the form.
const DWARFUnit *U = nullptr; // Remember the DWARFUnit at extract time.
public:
@ -84,7 +82,7 @@ public:
const DWARFUnit *U);
bool isInlinedCStr() const {
return Value.data != nullptr && Value.data == (const uint8_t*)Value.cstr;
return Value.data != nullptr && Value.data == (const uint8_t *)Value.cstr;
}
/// getAsFoo functions below return the extracted value as Foo if only
@ -135,45 +133,45 @@ public:
uint8_t AddrSize,
llvm::dwarf::DwarfFormat Format);
/// Skip a form in \p debug_info_data at offset specified by \p offset_ptr.
/// Skip a form in \p DebugInfoData at offset specified by \p OffsetPtr.
///
/// Skips the bytes for this form in the debug info and updates the offset.
///
/// \param debug_info_data the .debug_info data to use to skip the value.
/// \param offset_ptr a reference to the offset that will be updated.
/// \param DebugInfoData the .debug_info data to use to skip the value.
/// \param OffsetPtr a reference to the offset that will be updated.
/// \param U the DWARFUnit to use when skipping the form in case the form
/// size differs according to data in the DWARFUnit.
/// \returns true on success, false if the form was not skipped.
bool skipValue(DataExtractor debug_info_data, uint32_t *offset_ptr,
bool skipValue(DataExtractor DebugInfoData, uint32_t *OffsetPtr,
const DWARFUnit *U) const;
/// Skip a form in \p debug_info_data at offset specified by \p offset_ptr.
/// Skip a form in \p DebugInfoData at offset specified by \p OffsetPtr.
///
/// Skips the bytes for this form in the debug info and updates the offset.
///
/// \param form the DW_FORM enumeration that indicates the form to skip.
/// \param debug_info_data the .debug_info data to use to skip the value.
/// \param offset_ptr a reference to the offset that will be updated.
/// \param Form the DW_FORM enumeration that indicates the form to skip.
/// \param DebugInfoData the .debug_info data to use to skip the value.
/// \param OffsetPtr a reference to the offset that will be updated.
/// \param U the DWARFUnit to use when skipping the form in case the form
/// size differs according to data in the DWARFUnit.
/// \returns true on success, false if the form was not skipped.
static bool skipValue(dwarf::Form form, DataExtractor debug_info_data,
uint32_t *offset_ptr, const DWARFUnit *U);
static bool skipValue(dwarf::Form Form, DataExtractor DebugInfoData,
uint32_t *OffsetPtr, const DWARFUnit *U);
/// Skip a form in \p debug_info_data at offset specified by \p offset_ptr.
/// Skip a form in \p DebugInfoData at offset specified by \p OffsetPtr.
///
/// Skips the bytes for this form in the debug info and updates the offset.
///
/// \param form the DW_FORM enumeration that indicates the form to skip.
/// \param debug_info_data the .debug_info data to use to skip the value.
/// \param offset_ptr a reference to the offset that will be updated.
/// \param Form the DW_FORM enumeration that indicates the form to skip.
/// \param DebugInfoData the .debug_info data to use to skip the value.
/// \param OffsetPtr a reference to the offset that will be updated.
/// \param Version DWARF version number.
/// \param AddrSize size of an address in bytes.
/// \param Format enum value from llvm::dwarf::DwarfFormat.
/// \returns true on success, false if the form was not skipped.
static bool skipValue(dwarf::Form form, DataExtractor debug_info_data,
uint32_t *offset_ptr, uint16_t Version,
uint8_t AddrSize, llvm::dwarf::DwarfFormat Format);
static bool skipValue(dwarf::Form Form, DataExtractor DebugInfoData,
uint32_t *OffsetPtr, uint16_t Version, uint8_t AddrSize,
llvm::dwarf::DwarfFormat Format);
private:
void dumpString(raw_ostream &OS) const;
@ -181,149 +179,146 @@ private:
namespace dwarf {
/// Take an optional DWARFFormValue and try to extract a string value from it.
///
/// \param V and optional DWARFFormValue to attempt to extract the value from.
/// \returns an optional value that contains a value if the form value
/// was valid and was a string.
inline Optional<const char*> toString(const Optional<DWARFFormValue>& V) {
if (V)
return V->getAsCString();
return None;
}
/// Take an optional DWARFFormValue and extract a string value from it.
///
/// \param V and optional DWARFFormValue to attempt to extract the value from.
/// \param Default the default value to return in case of failure.
/// \returns the string value or Default if the V doesn't have a value or the
/// form value's encoding wasn't a string.
inline const char*
toString(const Optional<DWARFFormValue>& V, const char *Default) {
return toString(V).getValueOr(Default);
}
/// Take an optional DWARFFormValue and try to extract a string value from it.
///
/// \param V and optional DWARFFormValue to attempt to extract the value from.
/// \returns an optional value that contains a value if the form value
/// was valid and was a string.
inline Optional<const char *> toString(const Optional<DWARFFormValue> &V) {
if (V)
return V->getAsCString();
return None;
}
/// Take an optional DWARFFormValue and try to extract an unsigned constant.
///
/// \param V and optional DWARFFormValue to attempt to extract the value from.
/// \returns an optional value that contains a value if the form value
/// was valid and has a unsigned constant form.
inline Optional<uint64_t> toUnsigned(const Optional<DWARFFormValue>& V) {
if (V)
return V->getAsUnsignedConstant();
return None;
}
/// Take an optional DWARFFormValue and extract a unsigned constant.
///
/// \param V and optional DWARFFormValue to attempt to extract the value from.
/// \param Default the default value to return in case of failure.
/// \returns the extracted unsigned value or Default if the V doesn't have a
/// value or the form value's encoding wasn't an unsigned constant form.
inline uint64_t
toUnsigned(const Optional<DWARFFormValue>& V, uint64_t Default) {
return toUnsigned(V).getValueOr(Default);
}
/// Take an optional DWARFFormValue and try to extract an reference.
///
/// \param V and optional DWARFFormValue to attempt to extract the value from.
/// \returns an optional value that contains a value if the form value
/// was valid and has a reference form.
inline Optional<uint64_t> toReference(const Optional<DWARFFormValue>& V) {
if (V)
return V->getAsReference();
return None;
}
/// Take an optional DWARFFormValue and extract a reference.
///
/// \param V and optional DWARFFormValue to attempt to extract the value from.
/// \param Default the default value to return in case of failure.
/// \returns the extracted reference value or Default if the V doesn't have a
/// value or the form value's encoding wasn't a reference form.
inline uint64_t
toReference(const Optional<DWARFFormValue>& V, uint64_t Default) {
return toReference(V).getValueOr(Default);
}
/// Take an optional DWARFFormValue and try to extract an signed constant.
///
/// \param V and optional DWARFFormValue to attempt to extract the value from.
/// \returns an optional value that contains a value if the form value
/// was valid and has a signed constant form.
inline Optional<int64_t> toSigned(const Optional<DWARFFormValue>& V) {
if (V)
return V->getAsSignedConstant();
return None;
}
/// Take an optional DWARFFormValue and extract a string value from it.
///
/// \param V and optional DWARFFormValue to attempt to extract the value from.
/// \param Default the default value to return in case of failure.
/// \returns the string value or Default if the V doesn't have a value or the
/// form value's encoding wasn't a string.
inline const char *toString(const Optional<DWARFFormValue> &V,
const char *Default) {
return toString(V).getValueOr(Default);
}
/// Take an optional DWARFFormValue and extract a signed integer.
///
/// \param V and optional DWARFFormValue to attempt to extract the value from.
/// \param Default the default value to return in case of failure.
/// \returns the extracted signed integer value or Default if the V doesn't
/// have a value or the form value's encoding wasn't a signed integer form.
inline int64_t
toSigned(const Optional<DWARFFormValue>& V, int64_t Default) {
return toSigned(V).getValueOr(Default);
}
/// Take an optional DWARFFormValue and try to extract an unsigned constant.
///
/// \param V and optional DWARFFormValue to attempt to extract the value from.
/// \returns an optional value that contains a value if the form value
/// was valid and has a unsigned constant form.
inline Optional<uint64_t> toUnsigned(const Optional<DWARFFormValue> &V) {
if (V)
return V->getAsUnsignedConstant();
return None;
}
/// Take an optional DWARFFormValue and try to extract an address.
///
/// \param V and optional DWARFFormValue to attempt to extract the value from.
/// \returns an optional value that contains a value if the form value
/// was valid and has a address form.
inline Optional<uint64_t> toAddress(const Optional<DWARFFormValue>& V) {
if (V)
return V->getAsAddress();
return None;
}
/// Take an optional DWARFFormValue and extract a unsigned constant.
///
/// \param V and optional DWARFFormValue to attempt to extract the value from.
/// \param Default the default value to return in case of failure.
/// \returns the extracted unsigned value or Default if the V doesn't have a
/// value or the form value's encoding wasn't an unsigned constant form.
inline uint64_t toUnsigned(const Optional<DWARFFormValue> &V,
uint64_t Default) {
return toUnsigned(V).getValueOr(Default);
}
/// Take an optional DWARFFormValue and extract a address.
///
/// \param V and optional DWARFFormValue to attempt to extract the value from.
/// \param Default the default value to return in case of failure.
/// \returns the extracted address value or Default if the V doesn't have a
/// value or the form value's encoding wasn't an address form.
inline uint64_t
toAddress(const Optional<DWARFFormValue>& V, uint64_t Default) {
return toAddress(V).getValueOr(Default);
}
/// Take an optional DWARFFormValue and try to extract an reference.
///
/// \param V and optional DWARFFormValue to attempt to extract the value from.
/// \returns an optional value that contains a value if the form value
/// was valid and has a reference form.
inline Optional<uint64_t> toReference(const Optional<DWARFFormValue> &V) {
if (V)
return V->getAsReference();
return None;
}
/// Take an optional DWARFFormValue and try to extract an section offset.
///
/// \param V and optional DWARFFormValue to attempt to extract the value from.
/// \returns an optional value that contains a value if the form value
/// was valid and has a section offset form.
inline Optional<uint64_t> toSectionOffset(const Optional<DWARFFormValue>& V) {
if (V)
return V->getAsSectionOffset();
return None;
}
/// Take an optional DWARFFormValue and extract a reference.
///
/// \param V and optional DWARFFormValue to attempt to extract the value from.
/// \param Default the default value to return in case of failure.
/// \returns the extracted reference value or Default if the V doesn't have a
/// value or the form value's encoding wasn't a reference form.
inline uint64_t toReference(const Optional<DWARFFormValue> &V,
uint64_t Default) {
return toReference(V).getValueOr(Default);
}
/// Take an optional DWARFFormValue and extract a section offset.
///
/// \param V and optional DWARFFormValue to attempt to extract the value from.
/// \param Default the default value to return in case of failure.
/// \returns the extracted section offset value or Default if the V doesn't
/// have a value or the form value's encoding wasn't a section offset form.
inline uint64_t
toSectionOffset(const Optional<DWARFFormValue>& V, uint64_t Default) {
return toSectionOffset(V).getValueOr(Default);
}
/// Take an optional DWARFFormValue and try to extract an signed constant.
///
/// \param V and optional DWARFFormValue to attempt to extract the value from.
/// \returns an optional value that contains a value if the form value
/// was valid and has a signed constant form.
inline Optional<int64_t> toSigned(const Optional<DWARFFormValue> &V) {
if (V)
return V->getAsSignedConstant();
return None;
}
/// Take an optional DWARFFormValue and try to extract block data.
///
/// \param V and optional DWARFFormValue to attempt to extract the value from.
/// \returns an optional value that contains a value if the form value
/// was valid and has a block form.
inline Optional<ArrayRef<uint8_t>>
toBlock(const Optional<DWARFFormValue>& V) {
if (V)
return V->getAsBlock();
return None;
}
/// Take an optional DWARFFormValue and extract a signed integer.
///
/// \param V and optional DWARFFormValue to attempt to extract the value from.
/// \param Default the default value to return in case of failure.
/// \returns the extracted signed integer value or Default if the V doesn't
/// have a value or the form value's encoding wasn't a signed integer form.
inline int64_t toSigned(const Optional<DWARFFormValue> &V, int64_t Default) {
return toSigned(V).getValueOr(Default);
}
/// Take an optional DWARFFormValue and try to extract an address.
///
/// \param V and optional DWARFFormValue to attempt to extract the value from.
/// \returns an optional value that contains a value if the form value
/// was valid and has a address form.
inline Optional<uint64_t> toAddress(const Optional<DWARFFormValue> &V) {
if (V)
return V->getAsAddress();
return None;
}
/// Take an optional DWARFFormValue and extract a address.
///
/// \param V and optional DWARFFormValue to attempt to extract the value from.
/// \param Default the default value to return in case of failure.
/// \returns the extracted address value or Default if the V doesn't have a
/// value or the form value's encoding wasn't an address form.
inline uint64_t toAddress(const Optional<DWARFFormValue> &V, uint64_t Default) {
return toAddress(V).getValueOr(Default);
}
/// Take an optional DWARFFormValue and try to extract an section offset.
///
/// \param V and optional DWARFFormValue to attempt to extract the value from.
/// \returns an optional value that contains a value if the form value
/// was valid and has a section offset form.
inline Optional<uint64_t> toSectionOffset(const Optional<DWARFFormValue> &V) {
if (V)
return V->getAsSectionOffset();
return None;
}
/// Take an optional DWARFFormValue and extract a section offset.
///
/// \param V and optional DWARFFormValue to attempt to extract the value from.
/// \param Default the default value to return in case of failure.
/// \returns the extracted section offset value or Default if the V doesn't
/// have a value or the form value's encoding wasn't a section offset form.
inline uint64_t toSectionOffset(const Optional<DWARFFormValue> &V,
uint64_t Default) {
return toSectionOffset(V).getValueOr(Default);
}
/// Take an optional DWARFFormValue and try to extract block data.
///
/// \param V and optional DWARFFormValue to attempt to extract the value from.
/// \returns an optional value that contains a value if the form value
/// was valid and has a block form.
inline Optional<ArrayRef<uint8_t>> toBlock(const Optional<DWARFFormValue> &V) {
if (V)
return V->getAsBlock();
return None;
}
} // end namespace dwarf

View File

@ -53,14 +53,6 @@ private:
const ModuleInfoHeader *Layout = nullptr;
};
struct ModuleInfoEx {
ModuleInfoEx(const DbiModuleDescriptor &Info) : Info(Info) {}
ModuleInfoEx(const ModuleInfoEx &Ex) = default;
DbiModuleDescriptor Info;
std::vector<StringRef> SourceFiles;
};
} // end namespace pdb
template <> struct VarStreamArrayExtractor<pdb::DbiModuleDescriptor> {

View File

@ -0,0 +1,114 @@
//===- DbiModuleList.h - PDB module information list ------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_DEBUGINFO_PDB_RAW_DBIMODULELIST_H
#define LLVM_DEBUGINFO_PDB_RAW_DBIMODULELIST_H
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/iterator.h"
#include "llvm/DebugInfo/PDB/Native/DbiModuleDescriptor.h"
#include "llvm/Support/BinaryStreamArray.h"
#include "llvm/Support/BinaryStreamRef.h"
#include "llvm/Support/Endian.h"
#include "llvm/Support/Error.h"
#include <cstdint>
#include <vector>
namespace llvm {
namespace pdb {
class DbiModuleList;
struct FileInfoSubstreamHeader;
class DbiModuleSourceFilesIterator
: public iterator_facade_base<DbiModuleSourceFilesIterator,
std::random_access_iterator_tag, StringRef> {
typedef iterator_facade_base<DbiModuleSourceFilesIterator,
std::random_access_iterator_tag, StringRef>
BaseType;
public:
DbiModuleSourceFilesIterator(const DbiModuleList &Modules, uint32_t Modi,
uint16_t Filei);
DbiModuleSourceFilesIterator() = default;
DbiModuleSourceFilesIterator &
operator=(const DbiModuleSourceFilesIterator &R) = default;
bool operator==(const DbiModuleSourceFilesIterator &R) const;
const StringRef &operator*() const { return ThisValue; }
StringRef &operator*() { return ThisValue; }
bool operator<(const DbiModuleSourceFilesIterator &RHS) const;
std::ptrdiff_t operator-(const DbiModuleSourceFilesIterator &R) const;
DbiModuleSourceFilesIterator &operator+=(std::ptrdiff_t N);
DbiModuleSourceFilesIterator &operator-=(std::ptrdiff_t N);
private:
void setValue();
bool isEnd() const;
bool isCompatible(const DbiModuleSourceFilesIterator &R) const;
bool isUniversalEnd() const;
StringRef ThisValue;
const DbiModuleList *Modules{nullptr};
uint32_t Modi{0};
uint16_t Filei{0};
};
class DbiModuleList {
friend DbiModuleSourceFilesIterator;
public:
Error initialize(BinaryStreamRef ModInfo, BinaryStreamRef FileInfo);
Expected<StringRef> getFileName(uint32_t Index) const;
uint32_t getModuleCount() const;
uint32_t getSourceFileCount() const;
uint16_t getSourceFileCount(uint32_t Modi) const;
iterator_range<DbiModuleSourceFilesIterator>
source_files(uint32_t Modi) const;
DbiModuleDescriptor getModuleDescriptor(uint32_t Modi) const;
private:
Error initializeModInfo(BinaryStreamRef ModInfo);
Error initializeFileInfo(BinaryStreamRef FileInfo);
VarStreamArray<DbiModuleDescriptor> Descriptors;
FixedStreamArray<support::little32_t> FileNameOffsets;
FixedStreamArray<support::ulittle16_t> ModFileCountArray;
// For each module, there are multiple filenames, which can be obtained by
// knowing the index of the file. Given the index of the file, one can use
// that as an offset into the FileNameOffsets array, which contains the
// absolute offset of the file name in NamesBuffer. Thus, for each module
// we store the first index in the FileNameOffsets array for this module.
// The number of files for the corresponding module is stored in
// ModFileCountArray.
std::vector<uint32_t> ModuleInitialFileIndex;
// In order to provide random access into the Descriptors array, we iterate it
// once up front to find the offsets of the individual items and store them in
// this array.
std::vector<uint32_t> ModuleDescriptorOffsets;
const FileInfoSubstreamHeader *FileInfoHeader = nullptr;
BinaryStreamRef ModInfoSubstream;
BinaryStreamRef FileInfoSubstream;
BinaryStreamRef NamesBuffer;
};
}
}
#endif // LLVM_DEBUGINFO_PDB_RAW_DBIMODULELIST_H

View File

@ -13,6 +13,7 @@
#include "llvm/DebugInfo/CodeView/ModuleDebugFragment.h"
#include "llvm/DebugInfo/MSF/MappedBlockStream.h"
#include "llvm/DebugInfo/PDB/Native/DbiModuleDescriptor.h"
#include "llvm/DebugInfo/PDB/Native/DbiModuleList.h"
#include "llvm/DebugInfo/PDB/Native/PDBStringTable.h"
#include "llvm/DebugInfo/PDB/Native/RawConstants.h"
#include "llvm/DebugInfo/PDB/Native/RawTypes.h"
@ -68,9 +69,7 @@ public:
/// not present, returns InvalidStreamIndex.
uint32_t getDebugStreamIndex(DbgHeaderType Type) const;
ArrayRef<ModuleInfoEx> modules() const;
Expected<StringRef> getFileNameForIndex(uint32_t Index) const;
const DbiModuleList &modules() const;
FixedStreamArray<object::coff_section> getSectionHeaders();
@ -80,27 +79,22 @@ public:
void visitSectionContributions(ISectionContribVisitor &Visitor) const;
private:
Error initializeModInfoArray();
Error initializeSectionContributionData();
Error initializeSectionHeadersData();
Error initializeSectionMapData();
Error initializeFileInfo();
Error initializeFpoRecords();
PDBFile &Pdb;
std::unique_ptr<msf::MappedBlockStream> Stream;
std::vector<ModuleInfoEx> ModuleInfos;
PDBStringTable ECNames;
BinaryStreamRef ModInfoSubstream;
BinaryStreamRef SecContrSubstream;
BinaryStreamRef SecMapSubstream;
BinaryStreamRef FileInfoSubstream;
BinaryStreamRef TypeServerMapSubstream;
BinaryStreamRef ECSubstream;
BinaryStreamRef NamesBuffer;
DbiModuleList Modules;
FixedStreamArray<support::ulittle16_t> DbgStreams;
@ -108,7 +102,6 @@ private:
FixedStreamArray<SectionContrib> SectionContribs;
FixedStreamArray<SectionContrib2> SectionContribs2;
FixedStreamArray<SecMapEntry> SectionMap;
FixedStreamArray<support::little32_t> FileNameOffsets;
std::unique_ptr<msf::MappedBlockStream> SectionHeaderStream;
FixedStreamArray<object::coff_section> SectionHeaders;

View File

@ -18,7 +18,7 @@ namespace pdb {
class NativeCompilandSymbol : public NativeRawSymbol {
public:
NativeCompilandSymbol(NativeSession &Session, const ModuleInfoEx &MI);
NativeCompilandSymbol(NativeSession &Session, DbiModuleDescriptor MI);
PDB_SymType getSymTag() const override;
bool isEditAndContinueEnabled() const override;
uint32_t getLexicalParentId() const override;
@ -26,7 +26,7 @@ public:
std::string getName() const override;
private:
ModuleInfoEx Module;
DbiModuleDescriptor Module;
};
} // namespace pdb

View File

@ -16,13 +16,13 @@
namespace llvm {
namespace pdb {
class DbiModuleList;
class NativeSession;
class NativeEnumModules : public IPDBEnumChildren<PDBSymbol> {
public:
explicit NativeEnumModules(NativeSession &Session,
ArrayRef<ModuleInfoEx> Modules,
uint32_t Index = 0);
NativeEnumModules(NativeSession &Session, const DbiModuleList &Modules,
uint32_t Index = 0);
uint32_t getChildCount() const override;
std::unique_ptr<PDBSymbol> getChildAtIndex(uint32_t Index) const override;
@ -32,7 +32,7 @@ public:
private:
NativeSession &Session;
ArrayRef<ModuleInfoEx> Modules;
const DbiModuleList &Modules;
uint32_t Index;
};
}

View File

@ -211,7 +211,7 @@ struct ModInfoFlags {
};
/// The header preceeding each entry in the Module Info substream of the DBI
/// stream.
/// stream. Corresponds to the type MODI in the reference implementation.
struct ModuleInfoHeader {
/// Currently opened module. This field is a pointer in the reference
/// implementation, but that won't work on 64-bit systems, and anyway it
@ -243,9 +243,12 @@ struct ModuleInfoHeader {
/// Padding so the next field is 4-byte aligned.
char Padding1[2];
/// Array of [0..NumFiles) DBI name buffer offsets. This field is a pointer
/// in the reference implementation, but as with `Mod`, we ignore it for now
/// since it is unused.
/// Array of [0..NumFiles) DBI name buffer offsets. In the reference
/// implementation this field is a pointer. But since you can't portably
/// serialize a pointer, on 64-bit platforms they copy all the values except
/// this one into the 32-bit version of the struct and use that for
/// serialization. Regardless, this field is unused, it is only there to
/// store a pointer that can be accessed at runtime.
support::ulittle32_t FileNameOffs;
/// Name Index for src file name

View File

@ -40,12 +40,12 @@ public:
uint32_t TypeIndexBegin() const;
uint32_t TypeIndexEnd() const;
uint32_t NumTypeRecords() const;
uint32_t getNumTypeRecords() const;
uint16_t getTypeHashStreamIndex() const;
uint16_t getTypeHashStreamAuxIndex() const;
uint32_t getHashKeySize() const;
uint32_t NumHashBuckets() const;
uint32_t getNumHashBuckets() const;
FixedStreamArray<support::ulittle32_t> getHashValues() const;
FixedStreamArray<TypeIndexOffset> getTypeIndexOffsets() const;
HashTable &getHashAdjusters();
@ -55,8 +55,6 @@ public:
Error commit();
private:
Error verifyHashValues();
const PDBFile &Pdb;
std::unique_ptr<msf::MappedBlockStream> Stream;

View File

@ -355,7 +355,7 @@ public:
std::move(Deserialize)));
KeyName = &I->first;
}
{
assert(KeyName != nullptr && "No keyname pointer");
std::lock_guard<std::recursive_mutex> Lock(SerializersMutex);
@ -370,7 +370,7 @@ public:
};
}
}
static Error serialize(ChannelT &C, Error &&Err) {
std::lock_guard<std::recursive_mutex> Lock(SerializersMutex);

View File

@ -10,6 +10,8 @@
#ifndef LLVM_EXECUTIONENGINE_RUNTIMEDYLDCHECKER_H
#define LLVM_EXECUTIONENGINE_RUNTIMEDYLDCHECKER_H
#include "llvm/ADT/Optional.h"
#include <cstdint>
#include <memory>
#include <string>
@ -97,6 +99,10 @@ public:
StringRef SectionName,
bool LocalAddress);
/// \brief If there is a section at the given local address, return its load
/// address, otherwise return none.
Optional<uint64_t> getSectionLoadAddress(void *LocalAddress) const;
private:
std::unique_ptr<RuntimeDyldCheckerImpl> Impl;
};

View File

@ -244,7 +244,8 @@ public:
std::pair<unsigned, Optional<unsigned>> getAllocSizeArgs() const;
std::string getAsString(bool InAttrGrp = false) const;
typedef const Attribute *iterator;
using iterator = const Attribute *;
iterator begin() const;
iterator end() const;
};
@ -479,7 +480,7 @@ public:
/// \brief Return the attributes at the index as a string.
std::string getAsString(unsigned Index, bool InAttrGrp = false) const;
typedef ArrayRef<Attribute>::iterator iterator;
using iterator = ArrayRef<Attribute>::iterator;
iterator begin(unsigned Slot) const;
iterator end(unsigned Slot) const;
@ -662,11 +663,11 @@ public:
bool empty() const { return Attrs.none(); }
// Iterators for target-dependent attributes.
typedef std::pair<std::string, std::string> td_type;
typedef std::map<std::string, std::string>::iterator td_iterator;
typedef std::map<std::string, std::string>::const_iterator td_const_iterator;
typedef iterator_range<td_iterator> td_range;
typedef iterator_range<td_const_iterator> td_const_range;
using td_type = std::pair<std::string, std::string>;
using td_iterator = std::map<std::string, std::string>::iterator;
using td_const_iterator = std::map<std::string, std::string>::const_iterator;
using td_range = iterator_range<td_iterator>;
using td_const_range = iterator_range<td_const_iterator>;
td_iterator td_begin() { return TargetDepAttrs.begin(); }
td_iterator td_end() { return TargetDepAttrs.end(); }

View File

@ -21,6 +21,7 @@
#include "llvm/IR/SymbolTableListTraits.h"
#include "llvm/IR/Value.h"
#include "llvm/Support/CBindingWrapping.h"
#include "llvm/Support/Compiler.h"
#include "llvm-c/Types.h"
#include <cassert>
#include <cstddef>
@ -31,7 +32,9 @@ class CallInst;
class Function;
class LandingPadInst;
class LLVMContext;
class Module;
class TerminatorInst;
class ValueSymbolTable;
/// \brief LLVM Basic Block Representation
///
@ -51,7 +54,7 @@ class TerminatorInst;
class BasicBlock : public Value, // Basic blocks are data objects also
public ilist_node_with_parent<BasicBlock, Function> {
public:
typedef SymbolTableList<Instruction> InstListType;
using InstListType = SymbolTableList<Instruction>;
private:
friend class BlockAddress;
@ -80,10 +83,10 @@ public:
LLVMContext &getContext() const;
/// Instruction iterators...
typedef InstListType::iterator iterator;
typedef InstListType::const_iterator const_iterator;
typedef InstListType::reverse_iterator reverse_iterator;
typedef InstListType::const_reverse_iterator const_reverse_iterator;
using iterator = InstListType::iterator;
using const_iterator = InstListType::const_iterator;
using reverse_iterator = InstListType::reverse_iterator;
using const_reverse_iterator = InstListType::const_reverse_iterator;
/// \brief Creates a new BasicBlock.
///

View File

@ -37,9 +37,9 @@ namespace llvm {
template <class Ptr, class USE_iterator> // Predecessor Iterator
class PredIterator : public std::iterator<std::forward_iterator_tag,
Ptr, ptrdiff_t, Ptr*, Ptr*> {
typedef std::iterator<std::forward_iterator_tag, Ptr, ptrdiff_t, Ptr*,
Ptr*> super;
typedef PredIterator<Ptr, USE_iterator> Self;
using super =
std::iterator<std::forward_iterator_tag, Ptr, ptrdiff_t, Ptr*, Ptr*>;
using Self = PredIterator<Ptr, USE_iterator>;
USE_iterator It;
inline void advancePastNonTerminators() {
@ -49,8 +49,8 @@ class PredIterator : public std::iterator<std::forward_iterator_tag,
}
public:
typedef typename super::pointer pointer;
typedef typename super::reference reference;
using pointer = typename super::pointer;
using reference = typename super::reference;
PredIterator() = default;
explicit inline PredIterator(Ptr *bb) : It(bb->user_begin()) {
@ -90,11 +90,11 @@ public:
}
};
typedef PredIterator<BasicBlock, Value::user_iterator> pred_iterator;
typedef PredIterator<const BasicBlock,
Value::const_user_iterator> const_pred_iterator;
typedef iterator_range<pred_iterator> pred_range;
typedef iterator_range<const_pred_iterator> pred_const_range;
using pred_iterator = PredIterator<BasicBlock, Value::user_iterator>;
using const_pred_iterator =
PredIterator<const BasicBlock, Value::const_user_iterator>;
using pred_range = iterator_range<pred_iterator>;
using pred_const_range = iterator_range<const_pred_iterator>;
inline pred_iterator pred_begin(BasicBlock *BB) { return pred_iterator(BB); }
inline const_pred_iterator pred_begin(const BasicBlock *BB) {
@ -118,12 +118,12 @@ inline pred_const_range predecessors(const BasicBlock *BB) {
// BasicBlock succ_iterator helpers
//===----------------------------------------------------------------------===//
typedef TerminatorInst::SuccIterator<TerminatorInst *, BasicBlock>
succ_iterator;
typedef TerminatorInst::SuccIterator<const TerminatorInst *, const BasicBlock>
succ_const_iterator;
typedef iterator_range<succ_iterator> succ_range;
typedef iterator_range<succ_const_iterator> succ_const_range;
using succ_iterator =
TerminatorInst::SuccIterator<TerminatorInst *, BasicBlock>;
using succ_const_iterator =
TerminatorInst::SuccIterator<const TerminatorInst *, const BasicBlock>;
using succ_range = iterator_range<succ_iterator>;
using succ_const_range = iterator_range<succ_const_iterator>;
inline succ_iterator succ_begin(BasicBlock *BB) {
return succ_iterator(BB->getTerminator());
@ -160,8 +160,8 @@ struct isPodLike<TerminatorInst::SuccIterator<T, U>> {
// graph of basic blocks...
template <> struct GraphTraits<BasicBlock*> {
typedef BasicBlock *NodeRef;
typedef succ_iterator ChildIteratorType;
using NodeRef = BasicBlock *;
using ChildIteratorType = succ_iterator;
static NodeRef getEntryNode(BasicBlock *BB) { return BB; }
static ChildIteratorType child_begin(NodeRef N) { return succ_begin(N); }
@ -169,8 +169,8 @@ template <> struct GraphTraits<BasicBlock*> {
};
template <> struct GraphTraits<const BasicBlock*> {
typedef const BasicBlock *NodeRef;
typedef succ_const_iterator ChildIteratorType;
using NodeRef = const BasicBlock *;
using ChildIteratorType = succ_const_iterator;
static NodeRef getEntryNode(const BasicBlock *BB) { return BB; }
@ -184,16 +184,18 @@ template <> struct GraphTraits<const BasicBlock*> {
// instead of the successor edges.
//
template <> struct GraphTraits<Inverse<BasicBlock*>> {
typedef BasicBlock *NodeRef;
typedef pred_iterator ChildIteratorType;
using NodeRef = BasicBlock *;
using ChildIteratorType = pred_iterator;
static NodeRef getEntryNode(Inverse<BasicBlock *> G) { return G.Graph; }
static ChildIteratorType child_begin(NodeRef N) { return pred_begin(N); }
static ChildIteratorType child_end(NodeRef N) { return pred_end(N); }
};
template <> struct GraphTraits<Inverse<const BasicBlock*>> {
typedef const BasicBlock *NodeRef;
typedef const_pred_iterator ChildIteratorType;
using NodeRef = const BasicBlock *;
using ChildIteratorType = const_pred_iterator;
static NodeRef getEntryNode(Inverse<const BasicBlock *> G) { return G.Graph; }
static ChildIteratorType child_begin(NodeRef N) { return pred_begin(N); }
static ChildIteratorType child_end(NodeRef N) { return pred_end(N); }
@ -211,7 +213,7 @@ template <> struct GraphTraits<Function*> : public GraphTraits<BasicBlock*> {
static NodeRef getEntryNode(Function *F) { return &F->getEntryBlock(); }
// nodes_iterator/begin/end - Allow iteration over all nodes in the graph
typedef pointer_iterator<Function::iterator> nodes_iterator;
using nodes_iterator = pointer_iterator<Function::iterator>;
static nodes_iterator nodes_begin(Function *F) {
return nodes_iterator(F->begin());
@ -228,7 +230,7 @@ template <> struct GraphTraits<const Function*> :
static NodeRef getEntryNode(const Function *F) { return &F->getEntryBlock(); }
// nodes_iterator/begin/end - Allow iteration over all nodes in the graph
typedef pointer_iterator<Function::const_iterator> nodes_iterator;
using nodes_iterator = pointer_iterator<Function::const_iterator>;
static nodes_iterator nodes_begin(const Function *F) {
return nodes_iterator(F->begin());

View File

@ -207,7 +207,7 @@ public:
/// The type of iterator to use when looping over actual arguments at this
/// call site.
typedef IterTy arg_iterator;
using arg_iterator = IterTy;
iterator_range<IterTy> args() const {
return make_range(arg_begin(), arg_end());
@ -231,7 +231,7 @@ public:
/// Type of iterator to use when looping over data operands at this call site
/// (see below).
typedef IterTy data_operand_iterator;
using data_operand_iterator = IterTy;
/// data_operands_begin/data_operands_end - Return iterators iterating over
/// the call / invoke argument list and bundle operands. For invokes, this is

View File

@ -1,4 +1,4 @@
//===-- llvm/CallingConv.h - LLVM Calling Conventions -----------*- C++ -*-===//
//===- llvm/CallingConv.h - LLVM Calling Conventions ------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
@ -20,8 +20,9 @@ namespace llvm {
/// the well-known calling conventions.
///
namespace CallingConv {
/// LLVM IR allows to use arbitrary numbers as calling convention identifiers.
typedef unsigned ID;
using ID = unsigned;
/// A set of enums which specify the assigned numeric values for known llvm
/// calling conventions.
@ -203,8 +204,9 @@ namespace CallingConv {
/// The highest possible calling convention ID. Must be some 2^k - 1.
MaxID = 1023
};
} // End CallingConv namespace
} // End llvm namespace
} // end namespace CallingConv
#endif
} // end namespace llvm
#endif // LLVM_IR_CALLINGCONV_H

View File

@ -41,7 +41,7 @@ namespace llvm {
class MDNode;
/// This class represents a range of values.
class ConstantRange {
class LLVM_NODISCARD ConstantRange {
APInt Lower, Upper;
public:
@ -167,7 +167,10 @@ public:
APInt getSetSize() const;
/// Compare set size of this range with the range CR.
bool isSizeStrictlySmallerThanOf(const ConstantRange &CR) const;
bool isSizeStrictlySmallerThan(const ConstantRange &CR) const;
// Compare set size of this range with Value.
bool isSizeLargerThan(uint64_t MaxSize) const;
/// Return the largest unsigned value contained in the ConstantRange.
APInt getUnsignedMax() const;

View File

@ -1,4 +1,4 @@
//===--------- llvm/DataLayout.h - Data size & alignment info ---*- C++ -*-===//
//===- llvm/DataLayout.h - Data size & alignment info -----------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
@ -20,27 +20,32 @@
#ifndef LLVM_IR_DATALAYOUT_H
#define LLVM_IR_DATALAYOUT_H
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/IR/DerivedTypes.h"
#include "llvm/IR/Type.h"
#include "llvm/Pass.h"
#include "llvm/Support/DataTypes.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/MathExtras.h"
#include <cassert>
#include <cstdint>
#include <string>
// This needs to be outside of the namespace, to avoid conflict with llvm-c
// decl.
typedef struct LLVMOpaqueTargetData *LLVMTargetDataRef;
using LLVMTargetDataRef = struct LLVMOpaqueTargetData *;
namespace llvm {
class Value;
class StructType;
class StructLayout;
class Triple;
class GlobalVariable;
class LLVMContext;
template<typename T>
class ArrayRef;
class Module;
class StructLayout;
class Triple;
class Value;
/// Enum used to categorize the alignment types stored by LayoutAlignElem
enum AlignTypeEnum {
@ -72,6 +77,7 @@ struct LayoutAlignElem {
static LayoutAlignElem get(AlignTypeEnum align_type, unsigned abi_align,
unsigned pref_align, uint32_t bit_width);
bool operator==(const LayoutAlignElem &rhs) const;
};
@ -90,6 +96,7 @@ struct PointerAlignElem {
/// Initializer
static PointerAlignElem get(uint32_t AddressSpace, unsigned ABIAlign,
unsigned PrefAlign, uint32_t TypeByteWidth);
bool operator==(const PointerAlignElem &rhs) const;
};
@ -121,7 +128,7 @@ private:
/// \brief Primitive type alignment data. This is sorted by type and bit
/// width during construction.
typedef SmallVector<LayoutAlignElem, 16> AlignmentsTy;
using AlignmentsTy = SmallVector<LayoutAlignElem, 16>;
AlignmentsTy Alignments;
AlignmentsTy::const_iterator
@ -136,7 +143,7 @@ private:
/// \brief The string representation used to create this DataLayout
std::string StringRepresentation;
typedef SmallVector<PointerAlignElem, 8> PointersTy;
using PointersTy = SmallVector<PointerAlignElem, 8>;
PointersTy Pointers;
PointersTy::const_iterator
@ -147,7 +154,7 @@ private:
PointersTy::iterator findPointerLowerBound(uint32_t AddressSpace);
// The StructType -> StructLayout map.
mutable void *LayoutMap;
mutable void *LayoutMap = nullptr;
/// Pointers in these address spaces are non-integral, and don't have a
/// well-defined bitwise representation.
@ -172,16 +179,16 @@ private:
public:
/// Constructs a DataLayout from a specification string. See reset().
explicit DataLayout(StringRef LayoutDescription) : LayoutMap(nullptr) {
explicit DataLayout(StringRef LayoutDescription) {
reset(LayoutDescription);
}
/// Initialize target data from properties stored in the module.
explicit DataLayout(const Module *M);
void init(const Module *M);
DataLayout(const DataLayout &DL) { *this = DL; }
DataLayout(const DataLayout &DL) : LayoutMap(nullptr) { *this = DL; }
~DataLayout(); // Not virtual, do not subclass this class
DataLayout &operator=(const DataLayout &DL) {
clear();
@ -200,7 +207,7 @@ public:
bool operator==(const DataLayout &Other) const;
bool operator!=(const DataLayout &Other) const { return !(*this == Other); }
~DataLayout(); // Not virtual, do not subclass this class
void init(const Module *M);
/// Parse a data layout string (with fallback to default values).
void reset(StringRef LayoutDescription);
@ -489,6 +496,7 @@ class StructLayout {
unsigned IsPadded : 1;
unsigned NumElements : 31;
uint64_t MemberOffsets[1]; // variable sized array!
public:
uint64_t getSizeInBytes() const { return StructSize; }
@ -515,6 +523,7 @@ public:
private:
friend class DataLayout; // Only DataLayout can create this class
StructLayout(StructType *ST, const DataLayout &DL);
};
@ -560,6 +569,6 @@ inline uint64_t DataLayout::getTypeSizeInBits(Type *Ty) const {
}
}
} // End llvm namespace
} // end namespace llvm
#endif
#endif // LLVM_IR_DATALAYOUT_H

View File

@ -21,17 +21,12 @@
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/iterator_range.h"
#include "llvm/IR/DebugInfoMetadata.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/Dwarf.h"
#include "llvm/Support/ErrorHandling.h"
#include <iterator>
namespace llvm {
class Module;
class DbgDeclareInst;
class DbgValueInst;
template <typename K, typename V, typename KeyInfoT, typename BucketT>
class DenseMap;
class Module;
/// \brief Find subprogram that is enclosing this scope.
DISubprogram *getDISubprogram(const MDNode *Scope);
@ -95,13 +90,13 @@ private:
bool addScope(DIScope *Scope);
public:
typedef SmallVectorImpl<DICompileUnit *>::const_iterator
compile_unit_iterator;
typedef SmallVectorImpl<DISubprogram *>::const_iterator subprogram_iterator;
typedef SmallVectorImpl<DIGlobalVariableExpression *>::const_iterator
global_variable_expression_iterator;
typedef SmallVectorImpl<DIType *>::const_iterator type_iterator;
typedef SmallVectorImpl<DIScope *>::const_iterator scope_iterator;
using compile_unit_iterator =
SmallVectorImpl<DICompileUnit *>::const_iterator;
using subprogram_iterator = SmallVectorImpl<DISubprogram *>::const_iterator;
using global_variable_expression_iterator =
SmallVectorImpl<DIGlobalVariableExpression *>::const_iterator;
using type_iterator = SmallVectorImpl<DIType *>::const_iterator;
using scope_iterator = SmallVectorImpl<DIScope *>::const_iterator;
iterator_range<compile_unit_iterator> compile_units() const {
return make_range(CUs.begin(), CUs.end());
@ -140,4 +135,4 @@ private:
} // end namespace llvm
#endif
#endif // LLVM_IR_DEBUGINFO_H

View File

@ -42,7 +42,7 @@ extern template void Calculate<Function, Inverse<BasicBlock *>>(
DominatorTreeBaseByGraphTraits<GraphTraits<Inverse<BasicBlock *>>> &DT,
Function &F);
typedef DomTreeNodeBase<BasicBlock> DomTreeNode;
using DomTreeNode = DomTreeNodeBase<BasicBlock>;
class BasicBlockEdge {
const BasicBlock *Start;
@ -70,7 +70,7 @@ public:
};
template <> struct DenseMapInfo<BasicBlockEdge> {
typedef DenseMapInfo<const BasicBlock *> BBInfo;
using BBInfo = DenseMapInfo<const BasicBlock *>;
static unsigned getHashValue(const BasicBlockEdge *V);
@ -113,7 +113,7 @@ template <> struct DenseMapInfo<BasicBlockEdge> {
/// preceding statements; this is stated only to assist human understanding.
class DominatorTree : public DominatorTreeBase<BasicBlock> {
public:
typedef DominatorTreeBase<BasicBlock> Base;
using Base = DominatorTreeBase<BasicBlock>;
DominatorTree() : DominatorTreeBase<BasicBlock>(false) {}
explicit DominatorTree(Function &F) : DominatorTreeBase<BasicBlock>(false) {
@ -168,9 +168,9 @@ public:
// iterable by generic graph iterators.
template <class Node, class ChildIterator> struct DomTreeGraphTraitsBase {
typedef Node *NodeRef;
typedef ChildIterator ChildIteratorType;
typedef df_iterator<Node *, df_iterator_default_set<Node*>> nodes_iterator;
using NodeRef = Node *;
using ChildIteratorType = ChildIterator;
using nodes_iterator = df_iterator<Node *, df_iterator_default_set<Node*>>;
static NodeRef getEntryNode(NodeRef N) { return N; }
static ChildIteratorType child_begin(NodeRef N) { return N->begin(); }
@ -212,7 +212,7 @@ class DominatorTreeAnalysis : public AnalysisInfoMixin<DominatorTreeAnalysis> {
public:
/// \brief Provide the result typedef for this analysis pass.
typedef DominatorTree Result;
using Result = DominatorTree;
/// \brief Run the analysis pass over a function and produce a dominator tree.
DominatorTree run(Function &F, FunctionAnalysisManager &);

View File

@ -466,7 +466,6 @@ public:
/// @brief Determine if the parameter or return value is marked with NoAlias
/// attribute.
/// @param n The parameter to check. 1 is the first parameter, 0 is the return
bool returnDoesNotAlias() const {
return AttributeSets.hasAttribute(AttributeList::ReturnIndex,
Attribute::NoAlias);

View File

@ -95,7 +95,7 @@ public:
isClobber // '~x'
};
typedef std::vector<std::string> ConstraintCodeVector;
using ConstraintCodeVector = std::vector<std::string>;
struct SubConstraintInfo {
/// MatchingInput - If this is not -1, this is an output constraint where an
@ -112,9 +112,9 @@ public:
SubConstraintInfo() = default;
};
typedef std::vector<SubConstraintInfo> SubConstraintInfoVector;
using SubConstraintInfoVector = std::vector<SubConstraintInfo>;
struct ConstraintInfo;
typedef std::vector<ConstraintInfo> ConstraintInfoVector;
using ConstraintInfoVector = std::vector<ConstraintInfo>;
struct ConstraintInfo {
/// Type - The basic type of the constraint: input/output/clobber

View File

@ -31,20 +31,20 @@ namespace llvm {
// inst_iterator and const_inst_iterator's.
//
template <class BB_t, class BB_i_t, class BI_t, class II_t> class InstIterator {
typedef BB_t BBty;
typedef BB_i_t BBIty;
typedef BI_t BIty;
typedef II_t IIty;
using BBty = BB_t;
using BBIty = BB_i_t;
using BIty = BI_t;
using IIty = II_t;
BB_t *BBs; // BasicBlocksType
BB_i_t BB; // BasicBlocksType::iterator
BI_t BI; // BasicBlock::iterator
public:
typedef std::bidirectional_iterator_tag iterator_category;
typedef IIty value_type;
typedef signed difference_type;
typedef IIty* pointer;
typedef IIty& reference;
using iterator_category = std::bidirectional_iterator_tag;
using value_type = IIty;
using difference_type = signed;
using pointer = IIty *;
using reference = IIty &;
// Default constructor
InstIterator() = default;
@ -119,13 +119,15 @@ private:
}
};
typedef InstIterator<SymbolTableList<BasicBlock>, Function::iterator,
BasicBlock::iterator, Instruction> inst_iterator;
typedef InstIterator<const SymbolTableList<BasicBlock>,
Function::const_iterator, BasicBlock::const_iterator,
const Instruction> const_inst_iterator;
typedef iterator_range<inst_iterator> inst_range;
typedef iterator_range<const_inst_iterator> const_inst_range;
using inst_iterator =
InstIterator<SymbolTableList<BasicBlock>, Function::iterator,
BasicBlock::iterator, Instruction>;
using const_inst_iterator =
InstIterator<const SymbolTableList<BasicBlock>,
Function::const_iterator, BasicBlock::const_iterator,
const Instruction>;
using inst_range = iterator_range<inst_iterator>;
using const_inst_range = iterator_range<const_inst_iterator>;
inline inst_iterator inst_begin(Function *F) { return inst_iterator(*F); }
inline inst_iterator inst_end(Function *F) { return inst_iterator(*F, true); }

View File

@ -1,4 +1,4 @@
//===-- llvm/InstrTypes.h - Important Instruction subclasses ----*- C++ -*-===//
//===- llvm/InstrTypes.h - Important Instruction subclasses -----*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
@ -29,7 +29,9 @@
#include "llvm/IR/Instruction.h"
#include "llvm/IR/LLVMContext.h"
#include "llvm/IR/OperandTraits.h"
#include "llvm/IR/Type.h"
#include "llvm/IR/User.h"
#include "llvm/IR/Value.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/ErrorHandling.h"
#include <algorithm>
@ -114,17 +116,17 @@ public:
template <class Term, class BB> // Successor Iterator
class SuccIterator : public std::iterator<std::random_access_iterator_tag, BB,
int, BB *, BB *> {
typedef std::iterator<std::random_access_iterator_tag, BB, int, BB *, BB *>
super;
using super =
std::iterator<std::random_access_iterator_tag, BB, int, BB *, BB *>;
public:
typedef typename super::pointer pointer;
typedef typename super::reference reference;
using pointer = typename super::pointer;
using reference = typename super::reference;
private:
Term TermInst;
unsigned idx;
typedef SuccIterator<Term, BB> Self;
using Self = SuccIterator<Term, BB>;
inline bool index_is_valid(unsigned idx) {
return idx < TermInst->getNumSuccessors();
@ -260,11 +262,11 @@ public:
}
};
typedef SuccIterator<TerminatorInst *, BasicBlock> succ_iterator;
typedef SuccIterator<const TerminatorInst *, const BasicBlock>
succ_const_iterator;
typedef iterator_range<succ_iterator> succ_range;
typedef iterator_range<succ_const_iterator> succ_const_range;
using succ_iterator = SuccIterator<TerminatorInst *, BasicBlock>;
using succ_const_iterator =
SuccIterator<const TerminatorInst *, const BasicBlock>;
using succ_range = iterator_range<succ_iterator>;
using succ_const_range = iterator_range<succ_const_iterator>;
private:
inline succ_iterator succ_begin() { return succ_iterator(this); }
@ -341,14 +343,16 @@ DEFINE_TRANSPARENT_OPERAND_ACCESSORS(UnaryInstruction, Value)
class BinaryOperator : public Instruction {
protected:
void init(BinaryOps iType);
BinaryOperator(BinaryOps iType, Value *S1, Value *S2, Type *Ty,
const Twine &Name, Instruction *InsertBefore);
BinaryOperator(BinaryOps iType, Value *S1, Value *S2, Type *Ty,
const Twine &Name, BasicBlock *InsertAtEnd);
void init(BinaryOps iType);
// Note: Instruction needs to be a friend here to call cloneImpl.
friend class Instruction;
BinaryOperator *cloneImpl() const;
public:
@ -1125,8 +1129,6 @@ DEFINE_TRANSPARENT_OPERAND_ACCESSORS(CmpInst, Value)
//===----------------------------------------------------------------------===//
class FuncletPadInst : public Instruction {
private:
void init(Value *ParentPad, ArrayRef<Value *> Args, const Twine &NameStr);
FuncletPadInst(const FuncletPadInst &CPI);
explicit FuncletPadInst(Instruction::FuncletPadOps Op, Value *ParentPad,
@ -1136,11 +1138,14 @@ private:
ArrayRef<Value *> Args, unsigned Values,
const Twine &NameStr, BasicBlock *InsertAtEnd);
void init(Value *ParentPad, ArrayRef<Value *> Args, const Twine &NameStr);
protected:
// Note: Instruction needs to be a friend here to call cloneImpl.
friend class Instruction;
friend class CatchPadInst;
friend class CleanupPadInst;
FuncletPadInst *cloneImpl() const;
public:
@ -1261,7 +1266,8 @@ public:
ArrayRef<InputTy> inputs() const { return Inputs; }
typedef typename std::vector<InputTy>::const_iterator input_iterator;
using input_iterator = typename std::vector<InputTy>::const_iterator;
size_t input_size() const { return Inputs.size(); }
input_iterator input_begin() const { return Inputs.begin(); }
input_iterator input_end() const { return Inputs.end(); }
@ -1269,8 +1275,8 @@ public:
StringRef getTag() const { return Tag; }
};
typedef OperandBundleDefT<Value *> OperandBundleDef;
typedef OperandBundleDefT<const Value *> ConstOperandBundleDef;
using OperandBundleDef = OperandBundleDefT<Value *>;
using ConstOperandBundleDef = OperandBundleDefT<const Value *>;
/// \brief A mixin to add operand bundle functionality to llvm instruction
/// classes.
@ -1553,8 +1559,8 @@ protected:
return OperandBundleUse(BOI.Tag, Inputs);
}
typedef BundleOpInfo *bundle_op_iterator;
typedef const BundleOpInfo *const_bundle_op_iterator;
using bundle_op_iterator = BundleOpInfo *;
using const_bundle_op_iterator = const BundleOpInfo *;
/// \brief Return the start of the list of BundleOpInfo instances associated
/// with this OperandBundleUser.
@ -1654,6 +1660,6 @@ protected:
}
};
} // end llvm namespace
} // end namespace llvm
#endif // LLVM_IR_INSTRTYPES_H

View File

@ -795,6 +795,14 @@ def int_type_checked_load : Intrinsic<[llvm_ptr_ty, llvm_i1_ty],
def int_load_relative: Intrinsic<[llvm_ptr_ty], [llvm_ptr_ty, llvm_anyint_ty],
[IntrReadMem, IntrArgMemOnly]>;
// Xray intrinsics
//===----------------------------------------------------------------------===//
// Custom event logging for x-ray.
// Takes a pointer to a string and the length of the string.
def int_xray_customevent : Intrinsic<[], [llvm_ptr_ty, llvm_i32_ty],
[NoCapture<0>, ReadOnly<0>, IntrWriteMem]>;
//===----------------------------------------------------------------------===//
//===------ Memory intrinsics with element-wise atomicity guarantees ------===//
//

View File

@ -22,12 +22,26 @@ let TargetPrefix = "arm" in { // All intrinsics start with "llvm.arm.".
// and return value are essentially chains, used to force ordering during ISel.
def int_arm_space : Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], []>;
// 16-bit multiplications
def int_arm_smulbb : GCCBuiltin<"__builtin_arm_smulbb">,
Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
def int_arm_smulbt : GCCBuiltin<"__builtin_arm_smulbt">,
Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
def int_arm_smultb : GCCBuiltin<"__builtin_arm_smultb">,
Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
def int_arm_smultt : GCCBuiltin<"__builtin_arm_smultt">,
Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
def int_arm_smulwb : GCCBuiltin<"__builtin_arm_smulwb">,
Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
def int_arm_smulwt : GCCBuiltin<"__builtin_arm_smulwt">,
Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
//===----------------------------------------------------------------------===//
// Saturating Arithmetic
def int_arm_qadd : GCCBuiltin<"__builtin_arm_qadd">,
Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty],
[IntrNoMem, Commutative]>;
[Commutative, IntrNoMem]>;
def int_arm_qsub : GCCBuiltin<"__builtin_arm_qsub">,
Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
def int_arm_ssat : GCCBuiltin<"__builtin_arm_ssat">,
@ -35,6 +49,176 @@ def int_arm_ssat : GCCBuiltin<"__builtin_arm_ssat">,
def int_arm_usat : GCCBuiltin<"__builtin_arm_usat">,
Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
// Accumulating multiplications
def int_arm_smlabb : GCCBuiltin<"__builtin_arm_smlabb">,
Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
[IntrNoMem]>;
def int_arm_smlabt : GCCBuiltin<"__builtin_arm_smlabt">,
Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
[IntrNoMem]>;
def int_arm_smlatb : GCCBuiltin<"__builtin_arm_smlatb">,
Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
[IntrNoMem]>;
def int_arm_smlatt : GCCBuiltin<"__builtin_arm_smlatt">,
Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
[IntrNoMem]>;
def int_arm_smlawb : GCCBuiltin<"__builtin_arm_smlawb">,
Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
[IntrNoMem]>;
def int_arm_smlawt : GCCBuiltin<"__builtin_arm_smlawt">,
Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
[IntrNoMem]>;
// Parallel 16-bit saturation
def int_arm_ssat16 : GCCBuiltin<"__builtin_arm_ssat16">,
Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
def int_arm_usat16 : GCCBuiltin<"__builtin_arm_usat16">,
Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
// Packing and unpacking
def int_arm_sxtab16 : GCCBuiltin<"__builtin_arm_sxtab16">,
Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
def int_arm_sxtb16 : GCCBuiltin<"__builtin_arm_sxtb16">,
Intrinsic<[llvm_i32_ty], [llvm_i32_ty], [IntrNoMem]>;
def int_arm_uxtab16 : GCCBuiltin<"__builtin_arm_uxtab16">,
Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
def int_arm_uxtb16 : GCCBuiltin<"__builtin_arm_uxtb16">,
Intrinsic<[llvm_i32_ty], [llvm_i32_ty], [IntrNoMem]>;
// Parallel selection, reads the GE flags.
def int_arm_sel : GCCBuiltin<"__builtin_arm_sel">,
Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrReadMem]>;
// Parallel 8-bit addition and subtraction
def int_arm_qadd8 : GCCBuiltin<"__builtin_arm_qadd8">,
Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
def int_arm_qsub8 : GCCBuiltin<"__builtin_arm_qsub8">,
Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
// Writes to the GE bits.
def int_arm_sadd8 : GCCBuiltin<"__builtin_arm_sadd8">,
Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], []>;
def int_arm_shadd8 : GCCBuiltin<"__builtin_arm_shadd8">,
Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
def int_arm_shsub8 : GCCBuiltin<"__builtin_arm_shsub8">,
Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
// Writes to the GE bits.
def int_arm_ssub8 : GCCBuiltin<"__builtin_arm_ssub8">,
Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], []>;
// Writes to the GE bits.
def int_arm_uadd8 : GCCBuiltin<"__builtin_arm_uadd8">,
Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], []>;
def int_arm_uhadd8 : GCCBuiltin<"__builtin_arm_uhadd8">,
Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
def int_arm_uhsub8 : GCCBuiltin<"__builtin_arm_uhsub8">,
Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
def int_arm_uqadd8 : GCCBuiltin<"__builtin_arm_uqadd8">,
Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
def int_arm_uqsub8 : GCCBuiltin<"__builtin_arm_uqsub8">,
Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
// Writes to the GE bits.
def int_arm_usub8 : GCCBuiltin<"__builtin_arm_usub8">,
Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], []>;
// Sum of 8-bit absolute differences
def int_arm_usad8 : GCCBuiltin<"__builtin_arm_usad8">,
Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
def int_arm_usada8 : GCCBuiltin<"__builtin_arm_usada8">,
Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
[IntrNoMem]>;
// Parallel 16-bit addition and subtraction
def int_arm_qadd16 : GCCBuiltin<"__builtin_arm_qadd16">,
Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
def int_arm_qasx : GCCBuiltin<"__builtin_arm_qasx">,
Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
def int_arm_qsax : GCCBuiltin<"__builtin_arm_qsax">,
Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
def int_arm_qsub16 : GCCBuiltin<"__builtin_arm_qsub16">,
Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
// Writes to the GE bits.
def int_arm_sadd16 : GCCBuiltin<"__builtin_arm_sadd16">,
Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], []>;
// Writes to the GE bits.
def int_arm_sasx : GCCBuiltin<"__builtin_arm_sasx">,
Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], []>;
def int_arm_shadd16 : GCCBuiltin<"__builtin_arm_shadd16">,
Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
def int_arm_shasx : GCCBuiltin<"__builtin_arm_shasx">,
Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
def int_arm_shsax : GCCBuiltin<"__builtin_arm_shsax">,
Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
def int_arm_shsub16 : GCCBuiltin<"__builtin_arm_shsub16">,
Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
// Writes to the GE bits.
def int_arm_ssax : GCCBuiltin<"__builtin_arm_ssax">,
Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], []>;
// Writes to the GE bits.
def int_arm_ssub16 : GCCBuiltin<"__builtin_arm_ssub16">,
Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], []>;
// Writes to the GE bits.
def int_arm_uadd16 : GCCBuiltin<"__builtin_arm_uadd16">,
Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], []>;
// Writes to the GE bits.
def int_arm_uasx : GCCBuiltin<"__builtin_arm_uasx">,
Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], []>;
def int_arm_uhadd16 : GCCBuiltin<"__builtin_arm_uhadd16">,
Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
def int_arm_uhasx : GCCBuiltin<"__builtin_arm_uhasx">,
Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
def int_arm_uhsax : GCCBuiltin<"__builtin_arm_uhsax">,
Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
def int_arm_uhsub16 : GCCBuiltin<"__builtin_arm_uhsub16">,
Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
def int_arm_uqadd16 : GCCBuiltin<"__builtin_arm_uqadd16">,
Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
def int_arm_uqasx : GCCBuiltin<"__builtin_arm_uqasx">,
Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
def int_arm_uqsax : GCCBuiltin<"__builtin_arm_uqsax">,
Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
def int_arm_uqsub16 : GCCBuiltin<"__builtin_arm_uqsub16">,
Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
// Writes to the GE bits.
def int_arm_usax : GCCBuiltin<"__builtin_arm_usax">,
Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], []>;
// Writes to the GE bits.
def int_arm_usub16 : GCCBuiltin<"__builtin_arm_usub16">,
Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], []>;
// Parallel 16-bit multiplication
def int_arm_smlad : GCCBuiltin<"__builtin_arm_smlad">,
Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
[IntrNoMem]>;
def int_arm_smladx : GCCBuiltin<"__builtin_arm_smladx">,
Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
[IntrNoMem]>;
def int_arm_smlald : GCCBuiltin<"__builtin_arm_smlald">,
Intrinsic<[llvm_i64_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i64_ty],
[IntrNoMem]>;
def int_arm_smlaldx : GCCBuiltin<"__builtin_arm_smlaldx">,
Intrinsic<[llvm_i64_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i64_ty],
[IntrNoMem]>;
def int_arm_smlsd : GCCBuiltin<"__builtin_arm_smlsd">,
Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
[IntrNoMem]>;
def int_arm_smlsdx : GCCBuiltin<"__builtin_arm_smlsdx">,
Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
[IntrNoMem]>;
def int_arm_smlsld : GCCBuiltin<"__builtin_arm_smlsld">,
Intrinsic<[llvm_i64_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i64_ty],
[IntrNoMem]>;
def int_arm_smlsldx : GCCBuiltin<"__builtin_arm_smlsldx">,
Intrinsic<[llvm_i64_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i64_ty],
[IntrNoMem]>;
def int_arm_smuad : GCCBuiltin<"__builtin_arm_smuad">,
Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
def int_arm_smuadx : GCCBuiltin<"__builtin_arm_smuadx">,
Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
def int_arm_smusd : GCCBuiltin<"__builtin_arm_smusd">,
Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
def int_arm_smusdx : GCCBuiltin<"__builtin_arm_smusdx">,
Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
//===----------------------------------------------------------------------===//
// Load, Store and Clear exclusive

View File

@ -45,58 +45,54 @@ struct CalleeInfo {
}
};
/// Struct to hold value either by GUID or GlobalValue*. Values in combined
/// indexes as well as indirect calls are GUIDs, all others are GlobalValues.
class GlobalValueSummary;
typedef std::vector<std::unique_ptr<GlobalValueSummary>> GlobalValueSummaryList;
struct GlobalValueSummaryInfo {
/// The GlobalValue corresponding to this summary. This is only used in
/// per-module summaries.
const GlobalValue *GV = nullptr;
/// List of global value summary structures for a particular value held
/// in the GlobalValueMap. Requires a vector in the case of multiple
/// COMDAT values of the same name.
GlobalValueSummaryList SummaryList;
};
/// Map from global value GUID to corresponding summary structures. Use a
/// std::map rather than a DenseMap so that pointers to the map's value_type
/// (which are used by ValueInfo) are not invalidated by insertion. Also it will
/// likely incur less overhead, as the value type is not very small and the size
/// of the map is unknown, resulting in inefficiencies due to repeated
/// insertions and resizing.
typedef std::map<GlobalValue::GUID, GlobalValueSummaryInfo>
GlobalValueSummaryMapTy;
/// Struct that holds a reference to a particular GUID in a global value
/// summary.
struct ValueInfo {
/// The value representation used in this instance.
enum ValueInfoKind {
VI_GUID,
VI_Value,
};
const GlobalValueSummaryMapTy::value_type *Ref = nullptr;
ValueInfo() = default;
ValueInfo(const GlobalValueSummaryMapTy::value_type *Ref) : Ref(Ref) {}
operator bool() const { return Ref; }
/// Union of the two possible value types.
union ValueUnion {
GlobalValue::GUID Id;
const GlobalValue *GV;
ValueUnion(GlobalValue::GUID Id) : Id(Id) {}
ValueUnion(const GlobalValue *GV) : GV(GV) {}
};
/// The value being represented.
ValueUnion TheValue;
/// The value representation.
ValueInfoKind Kind;
/// Constructor for a GUID value
ValueInfo(GlobalValue::GUID Id = 0) : TheValue(Id), Kind(VI_GUID) {}
/// Constructor for a GlobalValue* value
ValueInfo(const GlobalValue *V) : TheValue(V), Kind(VI_Value) {}
/// Accessor for GUID value
GlobalValue::GUID getGUID() const {
assert(Kind == VI_GUID && "Not a GUID type");
return TheValue.Id;
GlobalValue::GUID getGUID() const { return Ref->first; }
const GlobalValue *getValue() const { return Ref->second.GV; }
ArrayRef<std::unique_ptr<GlobalValueSummary>> getSummaryList() const {
return Ref->second.SummaryList;
}
/// Accessor for GlobalValue* value
const GlobalValue *getValue() const {
assert(Kind == VI_Value && "Not a Value type");
return TheValue.GV;
}
bool isGUID() const { return Kind == VI_GUID; }
};
template <> struct DenseMapInfo<ValueInfo> {
static inline ValueInfo getEmptyKey() { return ValueInfo((GlobalValue *)-1); }
static inline ValueInfo getEmptyKey() {
return ValueInfo((GlobalValueSummaryMapTy::value_type *)-1);
}
static inline ValueInfo getTombstoneKey() {
return ValueInfo((GlobalValue *)-2);
}
static bool isEqual(ValueInfo L, ValueInfo R) {
if (L.isGUID() != R.isGUID())
return false;
return L.isGUID() ? (L.getGUID() == R.getGUID())
: (L.getValue() == R.getValue());
}
static unsigned getHashValue(ValueInfo I) {
return I.isGUID() ? I.getGUID() : (uintptr_t)I.getValue();
return ValueInfo((GlobalValueSummaryMapTy::value_type *)-2);
}
static bool isEqual(ValueInfo L, ValueInfo R) { return L.Ref == R.Ref; }
static unsigned getHashValue(ValueInfo I) { return (uintptr_t)I.Ref; }
};
/// \brief Function and variable summary information to aid decisions and
@ -483,19 +479,6 @@ struct TypeIdSummary {
/// 160 bits SHA1
typedef std::array<uint32_t, 5> ModuleHash;
/// List of global value summary structures for a particular value held
/// in the GlobalValueMap. Requires a vector in the case of multiple
/// COMDAT values of the same name.
typedef std::vector<std::unique_ptr<GlobalValueSummary>> GlobalValueSummaryList;
/// Map from global value GUID to corresponding summary structures.
/// Use a std::map rather than a DenseMap since it will likely incur
/// less overhead, as the value type is not very small and the size
/// of the map is unknown, resulting in inefficiencies due to repeated
/// insertions and resizing.
typedef std::map<GlobalValue::GUID, GlobalValueSummaryList>
GlobalValueSummaryMapTy;
/// Type used for iterating through the global value summary map.
typedef GlobalValueSummaryMapTy::const_iterator const_gvsummary_iterator;
typedef GlobalValueSummaryMapTy::iterator gvsummary_iterator;
@ -532,6 +515,11 @@ private:
// YAML I/O support.
friend yaml::MappingTraits<ModuleSummaryIndex>;
GlobalValueSummaryMapTy::value_type *
getOrInsertValuePtr(GlobalValue::GUID GUID) {
return &*GlobalValueMap.emplace(GUID, GlobalValueSummaryInfo{}).first;
}
public:
gvsummary_iterator begin() { return GlobalValueMap.begin(); }
const_gvsummary_iterator begin() const { return GlobalValueMap.begin(); }
@ -539,21 +527,22 @@ public:
const_gvsummary_iterator end() const { return GlobalValueMap.end(); }
size_t size() const { return GlobalValueMap.size(); }
/// Get the list of global value summary objects for a given value name.
const GlobalValueSummaryList &getGlobalValueSummaryList(StringRef ValueName) {
return GlobalValueMap[GlobalValue::getGUID(ValueName)];
/// Return a ValueInfo for GUID if it exists, otherwise return ValueInfo().
ValueInfo getValueInfo(GlobalValue::GUID GUID) const {
auto I = GlobalValueMap.find(GUID);
return ValueInfo(I == GlobalValueMap.end() ? nullptr : &*I);
}
/// Get the list of global value summary objects for a given value name.
const const_gvsummary_iterator
findGlobalValueSummaryList(StringRef ValueName) const {
return GlobalValueMap.find(GlobalValue::getGUID(ValueName));
/// Return a ValueInfo for \p GUID.
ValueInfo getOrInsertValueInfo(GlobalValue::GUID GUID) {
return ValueInfo(getOrInsertValuePtr(GUID));
}
/// Get the list of global value summary objects for a given value GUID.
const const_gvsummary_iterator
findGlobalValueSummaryList(GlobalValue::GUID ValueGUID) const {
return GlobalValueMap.find(ValueGUID);
/// Return a ValueInfo for \p GV and mark it as belonging to GV.
ValueInfo getOrInsertValueInfo(const GlobalValue *GV) {
auto VP = getOrInsertValuePtr(GV->getGUID());
VP->second.GV = GV;
return ValueInfo(VP);
}
/// Return the GUID for \p OriginalId in the OidGuidMap.
@ -565,17 +554,18 @@ public:
/// Add a global value summary for a value of the given name.
void addGlobalValueSummary(StringRef ValueName,
std::unique_ptr<GlobalValueSummary> Summary) {
addOriginalName(GlobalValue::getGUID(ValueName),
Summary->getOriginalName());
GlobalValueMap[GlobalValue::getGUID(ValueName)].push_back(
std::move(Summary));
addGlobalValueSummary(getOrInsertValueInfo(GlobalValue::getGUID(ValueName)),
std::move(Summary));
}
/// Add a global value summary for a value of the given GUID.
void addGlobalValueSummary(GlobalValue::GUID ValueGUID,
/// Add a global value summary for the given ValueInfo.
void addGlobalValueSummary(ValueInfo VI,
std::unique_ptr<GlobalValueSummary> Summary) {
addOriginalName(ValueGUID, Summary->getOriginalName());
GlobalValueMap[ValueGUID].push_back(std::move(Summary));
addOriginalName(VI.getGUID(), Summary->getOriginalName());
// Here we have a notionally const VI, but the value it points to is owned
// by the non-const *this.
const_cast<GlobalValueSummaryMapTy::value_type *>(VI.Ref)
->second.SummaryList.push_back(std::move(Summary));
}
/// Add an original name for the value of the given GUID.
@ -593,16 +583,16 @@ public:
/// not found.
GlobalValueSummary *findSummaryInModule(GlobalValue::GUID ValueGUID,
StringRef ModuleId) const {
auto CalleeInfoList = findGlobalValueSummaryList(ValueGUID);
if (CalleeInfoList == end()) {
auto CalleeInfo = getValueInfo(ValueGUID);
if (!CalleeInfo) {
return nullptr; // This function does not have a summary
}
auto Summary =
llvm::find_if(CalleeInfoList->second,
llvm::find_if(CalleeInfo.getSummaryList(),
[&](const std::unique_ptr<GlobalValueSummary> &Summary) {
return Summary->modulePath() == ModuleId;
});
if (Summary == CalleeInfoList->second.end())
if (Summary == CalleeInfo.getSummaryList().end())
return nullptr;
return Summary->get();
}

View File

@ -201,7 +201,7 @@ template <> struct CustomMappingTraits<GlobalValueSummaryMapTy> {
for (auto &FSum : FSums) {
GlobalValueSummary::GVFlags GVFlags(GlobalValue::ExternalLinkage, false,
false);
Elem.push_back(llvm::make_unique<FunctionSummary>(
Elem.SummaryList.push_back(llvm::make_unique<FunctionSummary>(
GVFlags, 0, ArrayRef<ValueInfo>{},
ArrayRef<FunctionSummary::EdgeTy>{}, std::move(FSum.TypeTests),
std::move(FSum.TypeTestAssumeVCalls),
@ -213,7 +213,7 @@ template <> struct CustomMappingTraits<GlobalValueSummaryMapTy> {
static void output(IO &io, GlobalValueSummaryMapTy &V) {
for (auto &P : V) {
std::vector<FunctionSummaryYaml> FSums;
for (auto &Sum : P.second) {
for (auto &Sum : P.second.SummaryList) {
if (auto *FSum = dyn_cast<FunctionSummary>(Sum.get()))
FSums.push_back(FunctionSummaryYaml{
FSum->type_tests(), FSum->type_test_assume_vcalls(),

View File

@ -63,6 +63,8 @@ public:
// Return true if the constant pool is empty
bool empty();
void clearCache();
};
class AssemblerConstantPools {
@ -86,6 +88,7 @@ class AssemblerConstantPools {
public:
void emitAll(MCStreamer &Streamer);
void emitForCurrentSection(MCStreamer &Streamer);
void clearCacheForCurrentSection(MCStreamer &Streamer);
const MCExpr *addEntry(MCStreamer &Streamer, const MCExpr *Expr,
unsigned Size, SMLoc Loc);

View File

@ -20,7 +20,9 @@
#include "llvm/Object/Binary.h"
#include "llvm/Object/Error.h"
#include "llvm/Object/ObjectFile.h"
#include "llvm/Support/BinaryByteStream.h"
#include "llvm/Support/COFF.h"
#include "llvm/Support/ConvertUTF.h"
#include "llvm/Support/Endian.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/ErrorOr.h"
@ -40,6 +42,7 @@ class DelayImportDirectoryEntryRef;
class ExportDirectoryEntryRef;
class ImportDirectoryEntryRef;
class ImportedSymbolRef;
class ResourceSectionRef;
using import_directory_iterator = content_iterator<ImportDirectoryEntryRef>;
using delay_import_directory_iterator =
@ -623,6 +626,26 @@ struct coff_base_reloc_block_entry {
int getOffset() const { return Data & ((1 << 12) - 1); }
};
struct coff_resource_dir_entry {
union {
support::ulittle32_t NameOffset;
support::ulittle32_t ID;
uint32_t getNameOffset() const {
return maskTrailingOnes<uint32_t>(31) & NameOffset;
}
} Identifier;
union {
support::ulittle32_t DataEntryOffset;
support::ulittle32_t SubdirOffset;
bool isSubDir() const { return SubdirOffset >> 31; }
uint32_t value() const {
return maskTrailingOnes<uint32_t>(31) & SubdirOffset;
}
} Offset;
};
struct coff_resource_dir_table {
support::ulittle32_t Characteristics;
support::ulittle32_t TimeDateStamp;
@ -1047,6 +1070,23 @@ private:
const COFFObjectFile *OwningObject = nullptr;
};
class ResourceSectionRef {
public:
ResourceSectionRef() = default;
explicit ResourceSectionRef(StringRef Ref) : BBS(Ref, support::little) {}
ErrorOr<ArrayRef<UTF16>> getEntryNameString(const coff_resource_dir_entry &Entry);
ErrorOr<const coff_resource_dir_table &>
getEntrySubDir(const coff_resource_dir_entry &Entry);
ErrorOr<const coff_resource_dir_table &> getBaseTable();
private:
BinaryByteStream BBS;
ErrorOr<const coff_resource_dir_table &> getTableAtOffset(uint32_t Offset);
ErrorOr<ArrayRef<UTF16>> getDirStringAtOffset(uint32_t Offset);
};
// Corresponds to `_FPO_DATA` structure in the PE/COFF spec.
struct FpoData {
support::ulittle32_t Offset; // ulOffStart: Offset 1st byte of function code

View File

@ -41,10 +41,14 @@ public:
DEBUG_FUNCTION_NAME,
};
WasmSymbol(StringRef Name, SymbolType Type) : Name(Name), Type(Type) {}
WasmSymbol(StringRef Name, SymbolType Type, uint32_t Section,
uint32_t ElementIndex)
: Name(Name), Type(Type), Section(Section), ElementIndex(ElementIndex) {}
StringRef Name;
SymbolType Type;
uint32_t Section;
uint32_t ElementIndex;
};
class WasmSection {

View File

@ -97,6 +97,11 @@ struct DataSegment {
yaml::BinaryRef Content;
};
struct NameEntry {
uint32_t Index;
StringRef Name;
};
struct Signature {
Signature() : Form(wasm::WASM_TYPE_FUNC) {}
@ -122,6 +127,11 @@ struct CustomSection : Section {
StringRef Name;
yaml::BinaryRef Payload;
// The follow is used by the "name" custom section.
// TODO(sbc): Add support for more then just functions names. The wasm
// name section can support multiple sub-sections.
std::vector<NameEntry> FunctionNames;
};
struct TypeSection : Section {
@ -244,6 +254,7 @@ LLVM_YAML_IS_SEQUENCE_VECTOR(llvm::WasmYAML::Global)
LLVM_YAML_IS_SEQUENCE_VECTOR(llvm::WasmYAML::Function)
LLVM_YAML_IS_SEQUENCE_VECTOR(llvm::WasmYAML::LocalDecl)
LLVM_YAML_IS_SEQUENCE_VECTOR(llvm::WasmYAML::Relocation)
LLVM_YAML_IS_SEQUENCE_VECTOR(llvm::WasmYAML::NameEntry)
LLVM_YAML_IS_FLOW_SEQUENCE_VECTOR(uint32_t)
namespace llvm {
@ -297,6 +308,10 @@ template <> struct MappingTraits<WasmYAML::Relocation> {
static void mapping(IO &IO, WasmYAML::Relocation &Relocation);
};
template <> struct MappingTraits<WasmYAML::NameEntry> {
static void mapping(IO &IO, WasmYAML::NameEntry &NameEntry);
};
template <> struct MappingTraits<WasmYAML::LocalDecl> {
static void mapping(IO &IO, WasmYAML::LocalDecl &LocalDecl);
};

View File

@ -20,8 +20,7 @@ AARCH64_ARCH("invalid", AK_INVALID, nullptr, nullptr,
ARMBuildAttrs::CPUArch::v8_A, FK_NONE, AArch64::AEK_NONE)
AARCH64_ARCH("armv8-a", AK_ARMV8A, "8-A", "v8", ARMBuildAttrs::CPUArch::v8_A,
FK_CRYPTO_NEON_FP_ARMV8,
(AArch64::AEK_CRC | AArch64::AEK_CRYPTO | AArch64::AEK_FP |
AArch64::AEK_SIMD))
(AArch64::AEK_CRYPTO | AArch64::AEK_FP | AArch64::AEK_SIMD))
AARCH64_ARCH("armv8.1-a", AK_ARMV8_1A, "8.1-A", "v8.1a",
ARMBuildAttrs::CPUArch::v8_A, FK_CRYPTO_NEON_FP_ARMV8,
(AArch64::AEK_CRC | AArch64::AEK_CRYPTO | AArch64::AEK_FP |
@ -52,38 +51,37 @@ AARCH64_ARCH_EXT_NAME("ras", AArch64::AEK_RAS, "+ras", "-ras")
#define AARCH64_CPU_NAME(NAME, ID, DEFAULT_FPU, IS_DEFAULT, DEFAULT_EXT)
#endif
AARCH64_CPU_NAME("cortex-a35", AK_ARMV8A, FK_CRYPTO_NEON_FP_ARMV8, false,
(AArch64::AEK_SIMD | AArch64::AEK_CRC | AArch64::AEK_CRYPTO))
(AArch64::AEK_CRC))
AARCH64_CPU_NAME("cortex-a53", AK_ARMV8A, FK_CRYPTO_NEON_FP_ARMV8, true,
( AArch64::AEK_SIMD | AArch64::AEK_CRC | AArch64::AEK_CRYPTO))
(AArch64::AEK_CRC))
AARCH64_CPU_NAME("cortex-a57", AK_ARMV8A, FK_CRYPTO_NEON_FP_ARMV8, false,
(AArch64::AEK_SIMD | AArch64::AEK_CRC | AArch64::AEK_CRYPTO))
(AArch64::AEK_CRC))
AARCH64_CPU_NAME("cortex-a72", AK_ARMV8A, FK_CRYPTO_NEON_FP_ARMV8, false,
(AArch64::AEK_SIMD | AArch64::AEK_CRC | AArch64::AEK_CRYPTO))
(AArch64::AEK_CRC))
AARCH64_CPU_NAME("cortex-a73", AK_ARMV8A, FK_CRYPTO_NEON_FP_ARMV8, false,
(AArch64::AEK_SIMD | AArch64::AEK_CRC | AArch64::AEK_CRYPTO))
(AArch64::AEK_CRC))
AARCH64_CPU_NAME("cyclone", AK_ARMV8A, FK_CRYPTO_NEON_FP_ARMV8, false,
(AArch64::AEK_SIMD | AArch64::AEK_CRYPTO))
(AArch64::AEK_NONE))
AARCH64_CPU_NAME("exynos-m1", AK_ARMV8A, FK_CRYPTO_NEON_FP_ARMV8, false,
(AArch64::AEK_SIMD | AArch64::AEK_CRC | AArch64::AEK_CRYPTO))
(AArch64::AEK_CRC))
AARCH64_CPU_NAME("exynos-m2", AK_ARMV8A, FK_CRYPTO_NEON_FP_ARMV8, false,
(AArch64::AEK_SIMD | AArch64::AEK_CRC | AArch64::AEK_CRYPTO))
(AArch64::AEK_CRC))
AARCH64_CPU_NAME("exynos-m3", AK_ARMV8A, FK_CRYPTO_NEON_FP_ARMV8, false,
(AArch64::AEK_SIMD | AArch64::AEK_CRC | AArch64::AEK_CRYPTO))
(AArch64::AEK_CRC))
AARCH64_CPU_NAME("falkor", AK_ARMV8A, FK_CRYPTO_NEON_FP_ARMV8, false,
(AArch64::AEK_SIMD | AArch64::AEK_CRC | AArch64::AEK_CRYPTO))
(AArch64::AEK_CRC))
AARCH64_CPU_NAME("kryo", AK_ARMV8A, FK_CRYPTO_NEON_FP_ARMV8, false,
(AArch64::AEK_SIMD | AArch64::AEK_CRC | AArch64::AEK_CRYPTO))
(AArch64::AEK_CRC))
AARCH64_CPU_NAME("thunderx2t99", AK_ARMV8_1A, FK_CRYPTO_NEON_FP_ARMV8, false,
(AArch64::AEK_SIMD | AArch64::AEK_LSE | AArch64::AEK_CRC |
AArch64::AEK_CRYPTO))
(AArch64::AEK_NONE))
AARCH64_CPU_NAME("thunderx", AK_ARMV8A, FK_CRYPTO_NEON_FP_ARMV8, false,
(AArch64::AEK_SIMD | AArch64::AEK_CRC | AArch64::AEK_CRYPTO | AArch64::AEK_FP | AArch64::AEK_PROFILE))
(AArch64::AEK_CRC | AArch64::AEK_PROFILE))
AARCH64_CPU_NAME("thunderxt88", AK_ARMV8A, FK_CRYPTO_NEON_FP_ARMV8, false,
(AArch64::AEK_SIMD | AArch64::AEK_CRC | AArch64::AEK_CRYPTO | AArch64::AEK_FP | AArch64::AEK_PROFILE))
(AArch64::AEK_CRC | AArch64::AEK_PROFILE))
AARCH64_CPU_NAME("thunderxt81", AK_ARMV8A, FK_CRYPTO_NEON_FP_ARMV8, false,
(AArch64::AEK_SIMD | AArch64::AEK_CRC | AArch64::AEK_CRYPTO | AArch64::AEK_FP | AArch64::AEK_PROFILE))
(AArch64::AEK_CRC | AArch64::AEK_PROFILE))
AARCH64_CPU_NAME("thunderxt83", AK_ARMV8A, FK_CRYPTO_NEON_FP_ARMV8, false,
(AArch64::AEK_SIMD | AArch64::AEK_CRC | AArch64::AEK_CRYPTO | AArch64::AEK_FP | AArch64::AEK_PROFILE))
(AArch64::AEK_CRC | AArch64::AEK_PROFILE))
// Invalid CPU
AARCH64_CPU_NAME("invalid", AK_INVALID, FK_INVALID, true, AArch64::AEK_INVALID)
#undef AARCH64_CPU_NAME

View File

@ -64,8 +64,10 @@ class VarStreamArrayIterator
public:
VarStreamArrayIterator() = default;
VarStreamArrayIterator(const ArrayType &Array, const WrappedCtx &Ctx,
BinaryStreamRef Stream, bool *HadError = nullptr)
: IterRef(Stream), Ctx(&Ctx), Array(&Array), HadError(HadError) {
BinaryStreamRef Stream, bool *HadError = nullptr,
uint32_t Offset = 0)
: IterRef(Stream), Ctx(&Ctx), Array(&Array), AbsOffset(Offset),
HadError(HadError) {
if (IterRef.getLength() == 0)
moveToEnd();
else {
@ -115,6 +117,7 @@ public:
for (unsigned I = 0; I < N; ++I) {
// We are done with the current record, discard it so that we are
// positioned at the next record.
AbsOffset += ThisLen;
IterRef = IterRef.drop_front(ThisLen);
if (IterRef.getLength() == 0) {
// There is nothing after the current record, we must make this an end
@ -135,6 +138,8 @@ public:
return *this;
}
uint32_t offset() const { return AbsOffset; }
private:
void moveToEnd() {
Array = nullptr;
@ -152,6 +157,7 @@ private:
const WrappedCtx *Ctx{nullptr};
const ArrayType *Array{nullptr};
uint32_t ThisLen{0};
uint32_t AbsOffset{0};
bool HasError{false};
bool *HadError{nullptr};
};
@ -234,7 +240,7 @@ public:
/// since the behavior is undefined if \p Offset does not refer to the
/// beginning of a valid record.
Iterator at(uint32_t Offset) const {
return Iterator(*this, Ctx, Stream.drop_front(Offset), nullptr);
return Iterator(*this, Ctx, Stream.drop_front(Offset), nullptr, Offset);
}
BinaryStreamRef getUnderlyingStream() const { return Stream; }
@ -338,7 +344,7 @@ private:
template <typename T>
class FixedStreamArrayIterator
: public iterator_facade_base<FixedStreamArrayIterator<T>,
std::random_access_iterator_tag, T> {
std::random_access_iterator_tag, const T> {
public:
FixedStreamArrayIterator(const FixedStreamArray<T> &Array, uint32_t Index)
@ -352,6 +358,7 @@ public:
}
const T &operator*() const { return Array[Index]; }
const T &operator*() { return Array[Index]; }
bool operator==(const FixedStreamArrayIterator<T> &R) const {
assert(Array == R.Array);

View File

@ -152,6 +152,30 @@ namespace COFF {
IMAGE_FILE_BYTES_REVERSED_HI = 0x8000
};
enum ResourceTypeID {
RID_Cursor = 1,
RID_Bitmap = 2,
RID_Icon = 3,
RID_Menu = 4,
RID_Dialog = 5,
RID_String = 6,
RID_FontDir = 7,
RID_Font = 8,
RID_Accelerator = 9,
RID_RCData = 10,
RID_MessageTable = 11,
RID_Group_Cursor = 12,
RID_Group_Icon = 14,
RID_Version = 16,
RID_DLGInclude = 17,
RID_PlugPlay = 19,
RID_VXD = 20,
RID_AniCursor = 21,
RID_AniIcon = 22,
RID_HTML = 23,
RID_Manifest = 24,
};
struct symbol {
char Name[NameSize];
uint32_t Value;
@ -349,6 +373,26 @@ namespace COFF {
IMAGE_REL_ARM_BLX23T = 0x0015
};
enum RelocationTypesARM64 {
IMAGE_REL_ARM64_ABSOLUTE = 0x0000,
IMAGE_REL_ARM64_ADDR32 = 0x0001,
IMAGE_REL_ARM64_ADDR32NB = 0x0002,
IMAGE_REL_ARM64_BRANCH26 = 0x0003,
IMAGE_REL_ARM64_PAGEBASE_REL2 = 0x0004,
IMAGE_REL_ARM64_REL21 = 0x0005,
IMAGE_REL_ARM64_PAGEOFFSET_12A = 0x0006,
IMAGE_REL_ARM64_PAGEOFFSET_12L = 0x0007,
IMAGE_REL_ARM64_SECREL = 0x0008,
IMAGE_REL_ARM64_SECREL_LOW12A = 0x0009,
IMAGE_REL_ARM64_SECREL_HIGH12A = 0x000A,
IMAGE_REL_ARM64_SECREL_LOW12L = 0x000B,
IMAGE_REL_ARM64_TOKEN = 0x000C,
IMAGE_REL_ARM64_SECTION = 0x000D,
IMAGE_REL_ARM64_ADDR64 = 0x000E,
IMAGE_REL_ARM64_BRANCH19 = 0x000F,
IMAGE_REL_ARM64_BRANCH14 = 0x0010,
};
enum COMDATType {
IMAGE_COMDAT_SELECT_NODUPLICATES = 1,
IMAGE_COMDAT_SELECT_ANY,

View File

@ -24,6 +24,12 @@ struct KnownBits {
APInt Zero;
APInt One;
private:
// Internal constructor for creating a ConstantRange from two APInts.
KnownBits(APInt Zero, APInt One)
: Zero(std::move(Zero)), One(std::move(One)) {}
public:
// Default construct Zero and One.
KnownBits() {}
@ -37,6 +43,55 @@ struct KnownBits {
return Zero.getBitWidth();
}
/// Returns true if there is conflicting information.
bool hasConflict() const { return Zero.intersects(One); }
/// Returns true if we know the value of all bits.
bool isConstant() const {
assert(!hasConflict() && "KnownBits conflict!");
return Zero.countPopulation() + One.countPopulation() == getBitWidth();
}
/// Returns the value when all bits have a known value. This just returns One
/// with a protective assertion.
const APInt &getConstant() const {
assert(isConstant() && "Can only get value when all bits are known");
return One;
}
/// Returns true if we don't know any bits.
bool isUnknown() const { return Zero.isNullValue() && One.isNullValue(); }
/// Resets the known state of all bits.
void resetAll() {
Zero.clearAllBits();
One.clearAllBits();
}
/// Returns true if value is all zero.
bool isZero() const {
assert(!hasConflict() && "KnownBits conflict!");
return Zero.isAllOnesValue();
}
/// Returns true if value is all one bits.
bool isAllOnes() const {
assert(!hasConflict() && "KnownBits conflict!");
return One.isAllOnesValue();
}
/// Make all bits known to be zero and discard any previous information.
void setAllZero() {
Zero.setAllBits();
One.clearAllBits();
}
/// Make all bits known to be one and discard any previous information.
void setAllOnes() {
Zero.clearAllBits();
One.setAllBits();
}
/// Returns true if this value is known to be negative.
bool isNegative() const { return One.isSignBitSet(); }
@ -54,6 +109,30 @@ struct KnownBits {
assert(!isNegative() && "Can't make a negative value non-negative");
Zero.setSignBit();
}
/// Truncate the underlying known Zero and One bits. This is equivalent
/// to truncating the value we're tracking.
KnownBits trunc(unsigned BitWidth) {
return KnownBits(Zero.trunc(BitWidth), One.trunc(BitWidth));
}
/// Zero extends the underlying known Zero and One bits. This is equivalent
/// to zero extending the value we're tracking.
KnownBits zext(unsigned BitWidth) {
return KnownBits(Zero.zext(BitWidth), One.zext(BitWidth));
}
/// Sign extends the underlying known Zero and One bits. This is equivalent
/// to sign extending the value we're tracking.
KnownBits sext(unsigned BitWidth) {
return KnownBits(Zero.sext(BitWidth), One.sext(BitWidth));
}
/// Zero extends or truncates the underlying known Zero and One bits. This is
/// equivalent to zero extending or truncating the value we're tracking.
KnownBits zextOrTrunc(unsigned BitWidth) {
return KnownBits(Zero.zextOrTrunc(BitWidth), One.zextOrTrunc(BitWidth));
}
};
} // end namespace llvm

View File

@ -214,6 +214,18 @@ template <typename T> T maskLeadingOnes(unsigned N) {
return ~maskTrailingOnes<T>(CHAR_BIT * sizeof(T) - N);
}
/// \brief Create a bitmask with the N right-most bits set to 0, and all other
/// bits set to 1. Only unsigned types are allowed.
template <typename T> T maskTrailingZeros(unsigned N) {
return maskLeadingOnes<T>(CHAR_BIT * sizeof(T) - N);
}
/// \brief Create a bitmask with the N left-most bits set to 0, and all other
/// bits set to 1. Only unsigned types are allowed.
template <typename T> T maskLeadingZeros(unsigned N) {
return maskTrailingOnes<T>(CHAR_BIT * sizeof(T) - N);
}
/// \brief Get the index of the last set bit starting from the least
/// significant bit.
///

View File

@ -25,25 +25,43 @@ class GINodeEquiv<Instruction i, SDNode node> {
SDNode Node = node;
}
def : GINodeEquiv<G_ZEXT, zext>;
// These are defined in the same order as the G_* instructions.
def : GINodeEquiv<G_ANYEXT, anyext>;
def : GINodeEquiv<G_SEXT, sext>;
def : GINodeEquiv<G_ZEXT, zext>;
def : GINodeEquiv<G_TRUNC, trunc>;
def : GINodeEquiv<G_BITCAST, bitconvert>;
// G_INTTOPTR - SelectionDAG has no equivalent.
// G_PTRTOINT - SelectionDAG has no equivalent.
// G_CONSTANT - Not needed since constants aren't operators.
// G_FCONSTANT - Not needed since constants aren't operators.
def : GINodeEquiv<G_ADD, add>;
def : GINodeEquiv<G_SUB, sub>;
def : GINodeEquiv<G_MUL, mul>;
def : GINodeEquiv<G_OR, or>;
def : GINodeEquiv<G_XOR, xor>;
def : GINodeEquiv<G_AND, and>;
def : GINodeEquiv<G_SHL, shl>;
def : GINodeEquiv<G_LSHR, srl>;
def : GINodeEquiv<G_ASHR, sra>;
def : GINodeEquiv<G_SDIV, sdiv>;
def : GINodeEquiv<G_UDIV, udiv>;
def : GINodeEquiv<G_SREM, srem>;
def : GINodeEquiv<G_UREM, urem>;
def : GINodeEquiv<G_AND, and>;
def : GINodeEquiv<G_OR, or>;
def : GINodeEquiv<G_XOR, xor>;
def : GINodeEquiv<G_SHL, shl>;
def : GINodeEquiv<G_LSHR, srl>;
def : GINodeEquiv<G_ASHR, sra>;
def : GINodeEquiv<G_SELECT, select>;
def : GINodeEquiv<G_FNEG, fneg>;
def : GINodeEquiv<G_FPEXT, fpextend>;
def : GINodeEquiv<G_FPTRUNC, ftrunc>;
def : GINodeEquiv<G_FPTOSI, fp_to_sint>;
def : GINodeEquiv<G_FPTOUI, fp_to_uint>;
def : GINodeEquiv<G_SITOFP, sint_to_fp>;
def : GINodeEquiv<G_UITOFP, uint_to_fp>;
def : GINodeEquiv<G_FADD, fadd>;
def : GINodeEquiv<G_FSUB, fsub>;
def : GINodeEquiv<G_FMUL, fmul>;
def : GINodeEquiv<G_FDIV, fdiv>;
def : GINodeEquiv<G_FREM, frem>;
def : GINodeEquiv<G_FPOW, fpow>;
def : GINodeEquiv<G_BR, br>;
// Specifies the GlobalISel equivalents for SelectionDAG's ComplexPattern.

View File

@ -1002,6 +1002,16 @@ def PATCHABLE_TAIL_CALL : Instruction {
let hasSideEffects = 1;
let isReturn = 1;
}
def PATCHABLE_EVENT_CALL : Instruction {
let OutOperandList = (outs);
let InOperandList = (ins ptr_rc:$event, i8imm:$size);
let AsmString = "# XRay Custom Event Log.";
let usesCustomInserter = 1;
let isCall = 1;
let mayLoad = 1;
let mayStore = 1;
let hasSideEffects = 1;
}
def FENTRY_CALL : Instruction {
let OutOperandList = (outs unknown:$dst);
let InOperandList = (ins variable_ops);

View File

@ -182,6 +182,10 @@ HANDLE_TARGET_OPCODE(PATCHABLE_FUNCTION_EXIT)
/// PATCHABLE_RET which specifically only works for return instructions.
HANDLE_TARGET_OPCODE(PATCHABLE_TAIL_CALL)
/// Wraps a logging call and its arguments with nop sleds. At runtime, this can be
/// patched to insert instrumentation instructions.
HANDLE_TARGET_OPCODE(PATCHABLE_EVENT_CALL)
/// The following generic opcodes are not supposed to appear after ISel.
/// This is something we might want to relax, but for now, this is convenient
/// to produce diagnostics.

View File

@ -177,6 +177,7 @@ struct SanitizerCoverageOptions {
bool Use8bitCounters = false;
bool TracePC = false;
bool TracePCGuard = false;
bool NoPrune = false;
SanitizerCoverageOptions() = default;
};

View File

@ -31,7 +31,7 @@ public:
private:
void findRoots(Function &F, SmallPtrSet<Instruction *, 8> &Roots);
ConstantRange seen(Instruction *I, ConstantRange R);
void seen(Instruction *I, ConstantRange R);
ConstantRange badRange();
ConstantRange unknownRange();
ConstantRange validateRange(ConstantRange R);

View File

@ -701,11 +701,10 @@ Constant *SymbolicallyEvaluateBinop(unsigned Opc, Constant *Op0, Constant *Op1,
return Op1;
}
APInt KnownZero = Known0.Zero | Known1.Zero;
APInt KnownOne = Known0.One & Known1.One;
if ((KnownZero | KnownOne).isAllOnesValue()) {
return ConstantInt::get(Op0->getType(), KnownOne);
}
Known0.Zero |= Known1.Zero;
Known0.One &= Known1.One;
if (Known0.isConstant())
return ConstantInt::get(Op0->getType(), Known0.getConstant());
}
// If the constant expr is something like &A[123] - &A[4].f, fold this into a

View File

@ -1495,36 +1495,87 @@ static Value *simplifyAndOfICmpsWithSameOperands(ICmpInst *Op0, ICmpInst *Op1) {
/// Commuted variants are assumed to be handled by calling this function again
/// with the parameters swapped.
static Value *SimplifyAndOfICmps(ICmpInst *Op0, ICmpInst *Op1) {
static Value *simplifyOrOfICmpsWithSameOperands(ICmpInst *Op0, ICmpInst *Op1) {
ICmpInst::Predicate Pred0, Pred1;
Value *A ,*B;
if (!match(Op0, m_ICmp(Pred0, m_Value(A), m_Value(B))) ||
!match(Op1, m_ICmp(Pred1, m_Specific(A), m_Specific(B))))
return nullptr;
// We have (icmp Pred0, A, B) | (icmp Pred1, A, B).
// If Op1 is always implied true by Op0, then Op0 is a subset of Op1, and we
// can eliminate Op0 from this 'or'.
if (ICmpInst::isImpliedTrueByMatchingCmp(Pred0, Pred1))
return Op1;
// Check for any combination of predicates that cover the entire range of
// possibilities.
if ((Pred0 == ICmpInst::getInversePredicate(Pred1)) ||
(Pred0 == ICmpInst::ICMP_NE && ICmpInst::isTrueWhenEqual(Pred1)) ||
(Pred0 == ICmpInst::ICMP_SLE && Pred1 == ICmpInst::ICMP_SGE) ||
(Pred0 == ICmpInst::ICMP_ULE && Pred1 == ICmpInst::ICMP_UGE))
return getTrue(Op0->getType());
return nullptr;
}
/// Test if a pair of compares with a shared operand and 2 constants has an
/// empty set intersection, full set union, or if one compare is a superset of
/// the other.
static Value *simplifyAndOrOfICmpsWithConstants(ICmpInst *Cmp0, ICmpInst *Cmp1,
bool IsAnd) {
// Look for this pattern: {and/or} (icmp X, C0), (icmp X, C1)).
if (Cmp0->getOperand(0) != Cmp1->getOperand(0))
return nullptr;
const APInt *C0, *C1;
if (!match(Cmp0->getOperand(1), m_APInt(C0)) ||
!match(Cmp1->getOperand(1), m_APInt(C1)))
return nullptr;
auto Range0 = ConstantRange::makeExactICmpRegion(Cmp0->getPredicate(), *C0);
auto Range1 = ConstantRange::makeExactICmpRegion(Cmp1->getPredicate(), *C1);
// For and-of-comapares, check if the intersection is empty:
// (icmp X, C0) && (icmp X, C1) --> empty set --> false
if (IsAnd && Range0.intersectWith(Range1).isEmptySet())
return getFalse(Cmp0->getType());
// For or-of-compares, check if the union is full:
// (icmp X, C0) || (icmp X, C1) --> full set --> true
if (!IsAnd && Range0.unionWith(Range1).isFullSet())
return getTrue(Cmp0->getType());
// Is one range a superset of the other?
// If this is and-of-compares, take the smaller set:
// (icmp sgt X, 4) && (icmp sgt X, 42) --> icmp sgt X, 42
// If this is or-of-compares, take the larger set:
// (icmp sgt X, 4) || (icmp sgt X, 42) --> icmp sgt X, 4
if (Range0.contains(Range1))
return IsAnd ? Cmp1 : Cmp0;
if (Range1.contains(Range0))
return IsAnd ? Cmp0 : Cmp1;
return nullptr;
}
/// Commuted variants are assumed to be handled by calling this function again
/// with the parameters swapped.
static Value *simplifyAndOfICmps(ICmpInst *Op0, ICmpInst *Op1) {
if (Value *X = simplifyUnsignedRangeCheck(Op0, Op1, /*IsAnd=*/true))
return X;
if (Value *X = simplifyAndOfICmpsWithSameOperands(Op0, Op1))
return X;
// FIXME: This should be shared with or-of-icmps.
// Look for this pattern: (icmp V, C0) & (icmp V, C1)).
if (Value *X = simplifyAndOrOfICmpsWithConstants(Op0, Op1, true))
return X;
// (icmp (add V, C0), C1) & (icmp V, C0)
Type *ITy = Op0->getType();
ICmpInst::Predicate Pred0, Pred1;
const APInt *C0, *C1;
Value *V;
if (match(Op0, m_ICmp(Pred0, m_Value(V), m_APInt(C0))) &&
match(Op1, m_ICmp(Pred1, m_Specific(V), m_APInt(C1)))) {
// Make a constant range that's the intersection of the two icmp ranges.
// If the intersection is empty, we know that the result is false.
auto Range0 = ConstantRange::makeExactICmpRegion(Pred0, *C0);
auto Range1 = ConstantRange::makeExactICmpRegion(Pred1, *C1);
if (Range0.intersectWith(Range1).isEmptySet())
return getFalse(ITy);
// If a range is a superset of the other, the smaller set is all we need.
if (Range0.contains(Range1))
return Op1;
if (Range1.contains(Range0))
return Op0;
}
// (icmp (add V, C0), C1) & (icmp V, C0)
if (!match(Op0, m_ICmp(Pred0, m_Add(m_Value(V), m_APInt(C0)), m_APInt(C1))))
return nullptr;
@ -1565,6 +1616,103 @@ static Value *SimplifyAndOfICmps(ICmpInst *Op0, ICmpInst *Op1) {
return nullptr;
}
/// Commuted variants are assumed to be handled by calling this function again
/// with the parameters swapped.
static Value *simplifyOrOfICmps(ICmpInst *Op0, ICmpInst *Op1) {
if (Value *X = simplifyUnsignedRangeCheck(Op0, Op1, /*IsAnd=*/false))
return X;
if (Value *X = simplifyOrOfICmpsWithSameOperands(Op0, Op1))
return X;
if (Value *X = simplifyAndOrOfICmpsWithConstants(Op0, Op1, false))
return X;
// (icmp (add V, C0), C1) | (icmp V, C0)
ICmpInst::Predicate Pred0, Pred1;
const APInt *C0, *C1;
Value *V;
if (!match(Op0, m_ICmp(Pred0, m_Add(m_Value(V), m_APInt(C0)), m_APInt(C1))))
return nullptr;
if (!match(Op1, m_ICmp(Pred1, m_Specific(V), m_Value())))
return nullptr;
auto *AddInst = cast<BinaryOperator>(Op0->getOperand(0));
if (AddInst->getOperand(1) != Op1->getOperand(1))
return nullptr;
Type *ITy = Op0->getType();
bool isNSW = AddInst->hasNoSignedWrap();
bool isNUW = AddInst->hasNoUnsignedWrap();
const APInt Delta = *C1 - *C0;
if (C0->isStrictlyPositive()) {
if (Delta == 2) {
if (Pred0 == ICmpInst::ICMP_UGE && Pred1 == ICmpInst::ICMP_SLE)
return getTrue(ITy);
if (Pred0 == ICmpInst::ICMP_SGE && Pred1 == ICmpInst::ICMP_SLE && isNSW)
return getTrue(ITy);
}
if (Delta == 1) {
if (Pred0 == ICmpInst::ICMP_UGT && Pred1 == ICmpInst::ICMP_SLE)
return getTrue(ITy);
if (Pred0 == ICmpInst::ICMP_SGT && Pred1 == ICmpInst::ICMP_SLE && isNSW)
return getTrue(ITy);
}
}
if (C0->getBoolValue() && isNUW) {
if (Delta == 2)
if (Pred0 == ICmpInst::ICMP_UGE && Pred1 == ICmpInst::ICMP_ULE)
return getTrue(ITy);
if (Delta == 1)
if (Pred0 == ICmpInst::ICMP_UGT && Pred1 == ICmpInst::ICMP_ULE)
return getTrue(ITy);
}
return nullptr;
}
static Value *simplifyPossiblyCastedAndOrOfICmps(ICmpInst *Cmp0, ICmpInst *Cmp1,
bool IsAnd, CastInst *Cast) {
Value *V =
IsAnd ? simplifyAndOfICmps(Cmp0, Cmp1) : simplifyOrOfICmps(Cmp0, Cmp1);
if (!V)
return nullptr;
if (!Cast)
return V;
// If we looked through casts, we can only handle a constant simplification
// because we are not allowed to create a cast instruction here.
if (auto *C = dyn_cast<Constant>(V))
return ConstantExpr::getCast(Cast->getOpcode(), C, Cast->getType());
return nullptr;
}
static Value *simplifyAndOrOfICmps(Value *Op0, Value *Op1, bool IsAnd) {
// Look through casts of the 'and' operands to find compares.
auto *Cast0 = dyn_cast<CastInst>(Op0);
auto *Cast1 = dyn_cast<CastInst>(Op1);
if (Cast0 && Cast1 && Cast0->getOpcode() == Cast1->getOpcode() &&
Cast0->getSrcTy() == Cast1->getSrcTy()) {
Op0 = Cast0->getOperand(0);
Op1 = Cast1->getOperand(0);
}
auto *Cmp0 = dyn_cast<ICmpInst>(Op0);
auto *Cmp1 = dyn_cast<ICmpInst>(Op1);
if (!Cmp0 || !Cmp1)
return nullptr;
if (Value *V = simplifyPossiblyCastedAndOrOfICmps(Cmp0, Cmp1, IsAnd, Cast0))
return V;
if (Value *V = simplifyPossiblyCastedAndOrOfICmps(Cmp1, Cmp0, IsAnd, Cast0))
return V;
return nullptr;
}
/// Given operands for an And, see if we can fold the result.
/// If not, this returns null.
static Value *SimplifyAndInst(Value *Op0, Value *Op1, const SimplifyQuery &Q,
@ -1615,32 +1763,8 @@ static Value *SimplifyAndInst(Value *Op0, Value *Op1, const SimplifyQuery &Q,
return Op1;
}
if (auto *ICILHS = dyn_cast<ICmpInst>(Op0)) {
if (auto *ICIRHS = dyn_cast<ICmpInst>(Op1)) {
if (Value *V = SimplifyAndOfICmps(ICILHS, ICIRHS))
return V;
if (Value *V = SimplifyAndOfICmps(ICIRHS, ICILHS))
return V;
}
}
// The compares may be hidden behind casts. Look through those and try the
// same folds as above.
auto *Cast0 = dyn_cast<CastInst>(Op0);
auto *Cast1 = dyn_cast<CastInst>(Op1);
if (Cast0 && Cast1 && Cast0->getOpcode() == Cast1->getOpcode() &&
Cast0->getSrcTy() == Cast1->getSrcTy()) {
auto *Cmp0 = dyn_cast<ICmpInst>(Cast0->getOperand(0));
auto *Cmp1 = dyn_cast<ICmpInst>(Cast1->getOperand(0));
if (Cmp0 && Cmp1) {
Instruction::CastOps CastOpc = Cast0->getOpcode();
Type *ResultType = Cast0->getType();
if (auto *V = dyn_cast_or_null<Constant>(SimplifyAndOfICmps(Cmp0, Cmp1)))
return ConstantExpr::getCast(CastOpc, V, ResultType);
if (auto *V = dyn_cast_or_null<Constant>(SimplifyAndOfICmps(Cmp1, Cmp0)))
return ConstantExpr::getCast(CastOpc, V, ResultType);
}
}
if (Value *V = simplifyAndOrOfICmps(Op0, Op1, true))
return V;
// Try some generic simplifications for associative operations.
if (Value *V = SimplifyAssociativeBinOp(Instruction::And, Op0, Op1, Q,
@ -1678,86 +1802,6 @@ Value *llvm::SimplifyAndInst(Value *Op0, Value *Op1, const SimplifyQuery &Q) {
return ::SimplifyAndInst(Op0, Op1, Q, RecursionLimit);
}
/// Commuted variants are assumed to be handled by calling this function again
/// with the parameters swapped.
static Value *simplifyOrOfICmpsWithSameOperands(ICmpInst *Op0, ICmpInst *Op1) {
ICmpInst::Predicate Pred0, Pred1;
Value *A ,*B;
if (!match(Op0, m_ICmp(Pred0, m_Value(A), m_Value(B))) ||
!match(Op1, m_ICmp(Pred1, m_Specific(A), m_Specific(B))))
return nullptr;
// We have (icmp Pred0, A, B) | (icmp Pred1, A, B).
// If Op1 is always implied true by Op0, then Op0 is a subset of Op1, and we
// can eliminate Op0 from this 'or'.
if (ICmpInst::isImpliedTrueByMatchingCmp(Pred0, Pred1))
return Op1;
// Check for any combination of predicates that cover the entire range of
// possibilities.
if ((Pred0 == ICmpInst::getInversePredicate(Pred1)) ||
(Pred0 == ICmpInst::ICMP_NE && ICmpInst::isTrueWhenEqual(Pred1)) ||
(Pred0 == ICmpInst::ICMP_SLE && Pred1 == ICmpInst::ICMP_SGE) ||
(Pred0 == ICmpInst::ICMP_ULE && Pred1 == ICmpInst::ICMP_UGE))
return getTrue(Op0->getType());
return nullptr;
}
/// Commuted variants are assumed to be handled by calling this function again
/// with the parameters swapped.
static Value *SimplifyOrOfICmps(ICmpInst *Op0, ICmpInst *Op1) {
if (Value *X = simplifyUnsignedRangeCheck(Op0, Op1, /*IsAnd=*/false))
return X;
if (Value *X = simplifyOrOfICmpsWithSameOperands(Op0, Op1))
return X;
// (icmp (add V, C0), C1) | (icmp V, C0)
ICmpInst::Predicate Pred0, Pred1;
const APInt *C0, *C1;
Value *V;
if (!match(Op0, m_ICmp(Pred0, m_Add(m_Value(V), m_APInt(C0)), m_APInt(C1))))
return nullptr;
if (!match(Op1, m_ICmp(Pred1, m_Specific(V), m_Value())))
return nullptr;
auto *AddInst = cast<BinaryOperator>(Op0->getOperand(0));
if (AddInst->getOperand(1) != Op1->getOperand(1))
return nullptr;
Type *ITy = Op0->getType();
bool isNSW = AddInst->hasNoSignedWrap();
bool isNUW = AddInst->hasNoUnsignedWrap();
const APInt Delta = *C1 - *C0;
if (C0->isStrictlyPositive()) {
if (Delta == 2) {
if (Pred0 == ICmpInst::ICMP_UGE && Pred1 == ICmpInst::ICMP_SLE)
return getTrue(ITy);
if (Pred0 == ICmpInst::ICMP_SGE && Pred1 == ICmpInst::ICMP_SLE && isNSW)
return getTrue(ITy);
}
if (Delta == 1) {
if (Pred0 == ICmpInst::ICMP_UGT && Pred1 == ICmpInst::ICMP_SLE)
return getTrue(ITy);
if (Pred0 == ICmpInst::ICMP_SGT && Pred1 == ICmpInst::ICMP_SLE && isNSW)
return getTrue(ITy);
}
}
if (C0->getBoolValue() && isNUW) {
if (Delta == 2)
if (Pred0 == ICmpInst::ICMP_UGE && Pred1 == ICmpInst::ICMP_ULE)
return getTrue(ITy);
if (Delta == 1)
if (Pred0 == ICmpInst::ICMP_UGT && Pred1 == ICmpInst::ICMP_ULE)
return getTrue(ITy);
}
return nullptr;
}
/// Given operands for an Or, see if we can fold the result.
/// If not, this returns null.
static Value *SimplifyOrInst(Value *Op0, Value *Op1, const SimplifyQuery &Q,
@ -1826,14 +1870,8 @@ static Value *SimplifyOrInst(Value *Op0, Value *Op1, const SimplifyQuery &Q,
match(Op1, m_c_And(m_Not(m_Specific(A)), m_Specific(B)))))
return Op0;
if (auto *ICILHS = dyn_cast<ICmpInst>(Op0)) {
if (auto *ICIRHS = dyn_cast<ICmpInst>(Op1)) {
if (Value *V = SimplifyOrOfICmps(ICILHS, ICIRHS))
return V;
if (Value *V = SimplifyOrOfICmps(ICIRHS, ICILHS))
return V;
}
}
if (Value *V = simplifyAndOrOfICmps(Op0, Op1, false))
return V;
// Try some generic simplifications for associative operations.
if (Value *V = SimplifyAssociativeBinOp(Instruction::Or, Op0, Op1, Q,
@ -4056,20 +4094,13 @@ static Value *SimplifyShuffleVectorInst(Value *Op0, Value *Op1, Constant *Mask,
unsigned MaskNumElts = Mask->getType()->getVectorNumElements();
unsigned InVecNumElts = InVecTy->getVectorNumElements();
auto *Op0Const = dyn_cast<Constant>(Op0);
auto *Op1Const = dyn_cast<Constant>(Op1);
// If all operands are constant, constant fold the shuffle.
if (Op0Const && Op1Const)
return ConstantFoldShuffleVectorInstruction(Op0Const, Op1Const, Mask);
SmallVector<int, 32> Indices;
ShuffleVectorInst::getShuffleMask(Mask, Indices);
assert(MaskNumElts == Indices.size() &&
"Size of Indices not same as number of mask elements?");
// If only one of the operands is constant, constant fold the shuffle if the
// mask does not select elements from the variable operand.
// Canonicalization: If mask does not select elements from an input vector,
// replace that input vector with undef.
bool MaskSelects0 = false, MaskSelects1 = false;
for (unsigned i = 0; i != MaskNumElts; ++i) {
if (Indices[i] == -1)
@ -4079,23 +4110,41 @@ static Value *SimplifyShuffleVectorInst(Value *Op0, Value *Op1, Constant *Mask,
else
MaskSelects1 = true;
}
if (!MaskSelects0 && Op1Const)
return ConstantFoldShuffleVectorInstruction(UndefValue::get(InVecTy),
Op1Const, Mask);
if (!MaskSelects1 && Op0Const)
return ConstantFoldShuffleVectorInstruction(Op0Const,
UndefValue::get(InVecTy), Mask);
if (!MaskSelects0)
Op0 = UndefValue::get(InVecTy);
if (!MaskSelects1)
Op1 = UndefValue::get(InVecTy);
auto *Op0Const = dyn_cast<Constant>(Op0);
auto *Op1Const = dyn_cast<Constant>(Op1);
// If all operands are constant, constant fold the shuffle.
if (Op0Const && Op1Const)
return ConstantFoldShuffleVectorInstruction(Op0Const, Op1Const, Mask);
// Canonicalization: if only one input vector is constant, it shall be the
// second one.
if (Op0Const && !Op1Const) {
std::swap(Op0, Op1);
for (int &Idx : Indices) {
if (Idx == -1)
continue;
Idx = Idx < (int)InVecNumElts ? Idx + InVecNumElts : Idx - InVecNumElts;
assert(Idx >= 0 && Idx < (int)InVecNumElts * 2 &&
"shufflevector mask index out of range");
}
Mask = ConstantDataVector::get(
Mask->getContext(),
makeArrayRef(reinterpret_cast<uint32_t *>(Indices.data()),
MaskNumElts));
}
// A shuffle of a splat is always the splat itself. Legal if the shuffle's
// value type is same as the input vectors' type.
if (auto *OpShuf = dyn_cast<ShuffleVectorInst>(Op0))
if (!MaskSelects1 && RetTy == InVecTy &&
if (isa<UndefValue>(Op1) && RetTy == InVecTy &&
OpShuf->getMask()->getSplatValue())
return Op0;
if (auto *OpShuf = dyn_cast<ShuffleVectorInst>(Op1))
if (!MaskSelects0 && RetTy == InVecTy &&
OpShuf->getMask()->getSplatValue())
return Op1;
// Don't fold a shuffle with undef mask elements. This may get folded in a
// better way using demanded bits or other analysis.
@ -4595,8 +4644,8 @@ Value *llvm::SimplifyInstruction(Instruction *I, const SimplifyQuery &SQ,
unsigned BitWidth = I->getType()->getScalarSizeInBits();
KnownBits Known(BitWidth);
computeKnownBits(I, Known, Q.DL, /*Depth*/ 0, Q.AC, I, Q.DT, ORE);
if ((Known.Zero | Known.One).isAllOnesValue())
Result = ConstantInt::get(I->getType(), Known.One);
if (Known.isConstant())
Result = ConstantInt::get(I->getType(), Known.getConstant());
}
/// If called on unreachable code, the above logic may report that the

View File

@ -142,7 +142,7 @@ public:
return Val;
}
ConstantRange getConstantRange() const {
const ConstantRange &getConstantRange() const {
assert(isConstantRange() &&
"Cannot get the constant-range of a non-constant-range!");
return Range;
@ -250,7 +250,7 @@ public:
if (NewR.isFullSet())
markOverdefined();
else
markConstantRange(NewR);
markConstantRange(std::move(NewR));
}
};
@ -1079,8 +1079,8 @@ bool LazyValueInfoImpl::solveBlockValueSelect(LVILatticeVal &BBLV,
}
if (TrueVal.isConstantRange() && FalseVal.isConstantRange()) {
ConstantRange TrueCR = TrueVal.getConstantRange();
ConstantRange FalseCR = FalseVal.getConstantRange();
const ConstantRange &TrueCR = TrueVal.getConstantRange();
const ConstantRange &FalseCR = FalseVal.getConstantRange();
Value *LHS = nullptr;
Value *RHS = nullptr;
SelectPatternResult SPR = matchSelectPattern(SI, LHS, RHS);
@ -1649,7 +1649,7 @@ Constant *LazyValueInfo::getConstant(Value *V, BasicBlock *BB,
if (Result.isConstant())
return Result.getConstant();
if (Result.isConstantRange()) {
ConstantRange CR = Result.getConstantRange();
const ConstantRange &CR = Result.getConstantRange();
if (const APInt *SingleVal = CR.getSingleElement())
return ConstantInt::get(V->getContext(), *SingleVal);
}
@ -1686,7 +1686,7 @@ Constant *LazyValueInfo::getConstantOnEdge(Value *V, BasicBlock *FromBB,
if (Result.isConstant())
return Result.getConstant();
if (Result.isConstantRange()) {
ConstantRange CR = Result.getConstantRange();
const ConstantRange &CR = Result.getConstantRange();
if (const APInt *SingleVal = CR.getSingleElement())
return ConstantInt::get(V->getContext(), *SingleVal);
}
@ -1712,7 +1712,7 @@ static LazyValueInfo::Tristate getPredicateResult(unsigned Pred, Constant *C,
ConstantInt *CI = dyn_cast<ConstantInt>(C);
if (!CI) return LazyValueInfo::Unknown;
ConstantRange CR = Result.getConstantRange();
const ConstantRange &CR = Result.getConstantRange();
if (Pred == ICmpInst::ICMP_EQ) {
if (!CR.contains(CI->getValue()))
return LazyValueInfo::False;

View File

@ -537,7 +537,7 @@ static bool isZero(Value *V, const DataLayout &DL, DominatorTree *DT,
unsigned BitWidth = V->getType()->getIntegerBitWidth();
KnownBits Known(BitWidth);
computeKnownBits(V, Known, DL, 0, AC, dyn_cast<Instruction>(V), DT);
return Known.Zero.isAllOnesValue();
return Known.isZero();
}
// Per-component check doesn't work with zeroinitializer
@ -558,7 +558,7 @@ static bool isZero(Value *V, const DataLayout &DL, DominatorTree *DT,
KnownBits Known(BitWidth);
computeKnownBits(Elem, Known, DL);
if (Known.Zero.isAllOnesValue())
if (Known.isZero())
return true;
}

View File

@ -37,7 +37,8 @@ using namespace llvm;
// Walk through the operands of a given User via worklist iteration and populate
// the set of GlobalValue references encountered. Invoked either on an
// Instruction or a GlobalVariable (which walks its initializer).
static void findRefEdges(const User *CurUser, SetVector<ValueInfo> &RefEdges,
static void findRefEdges(ModuleSummaryIndex &Index, const User *CurUser,
SetVector<ValueInfo> &RefEdges,
SmallPtrSet<const User *, 8> &Visited) {
SmallVector<const User *, 32> Worklist;
Worklist.push_back(CurUser);
@ -61,7 +62,7 @@ static void findRefEdges(const User *CurUser, SetVector<ValueInfo> &RefEdges,
// the reference set unless it is a callee. Callees are handled
// specially by WriteFunction and are added to a separate list.
if (!(CS && CS.isCallee(&OI)))
RefEdges.insert(GV);
RefEdges.insert(Index.getOrInsertValueInfo(GV));
continue;
}
Worklist.push_back(Operand);
@ -198,7 +199,7 @@ computeFunctionSummary(ModuleSummaryIndex &Index, const Module &M,
if (isa<DbgInfoIntrinsic>(I))
continue;
++NumInsts;
findRefEdges(&I, RefEdges, Visited);
findRefEdges(Index, &I, RefEdges, Visited);
auto CS = ImmutableCallSite(&I);
if (!CS)
continue;
@ -239,7 +240,9 @@ computeFunctionSummary(ModuleSummaryIndex &Index, const Module &M,
// to record the call edge to the alias in that case. Eventually
// an alias summary will be created to associate the alias and
// aliasee.
CallGraphEdges[cast<GlobalValue>(CalledValue)].updateHotness(Hotness);
CallGraphEdges[Index.getOrInsertValueInfo(
cast<GlobalValue>(CalledValue))]
.updateHotness(Hotness);
} else {
// Skip inline assembly calls.
if (CI && CI->isInlineAsm())
@ -254,15 +257,16 @@ computeFunctionSummary(ModuleSummaryIndex &Index, const Module &M,
ICallAnalysis.getPromotionCandidatesForInstruction(
&I, NumVals, TotalCount, NumCandidates);
for (auto &Candidate : CandidateProfileData)
CallGraphEdges[Candidate.Value].updateHotness(
getHotness(Candidate.Count, PSI));
CallGraphEdges[Index.getOrInsertValueInfo(Candidate.Value)]
.updateHotness(getHotness(Candidate.Count, PSI));
}
}
// Explicit add hot edges to enforce importing for designated GUIDs for
// sample PGO, to enable the same inlines as the profiled optimized binary.
for (auto &I : F.getImportGUIDs())
CallGraphEdges[I].updateHotness(CalleeInfo::HotnessType::Hot);
CallGraphEdges[Index.getOrInsertValueInfo(I)].updateHotness(
CalleeInfo::HotnessType::Hot);
bool NonRenamableLocal = isNonRenamableLocal(F);
bool NotEligibleForImport =
@ -288,7 +292,7 @@ computeVariableSummary(ModuleSummaryIndex &Index, const GlobalVariable &V,
DenseSet<GlobalValue::GUID> &CantBePromoted) {
SetVector<ValueInfo> RefEdges;
SmallPtrSet<const User *, 8> Visited;
findRefEdges(&V, RefEdges, Visited);
findRefEdges(Index, &V, RefEdges, Visited);
bool NonRenamableLocal = isNonRenamableLocal(V);
GlobalValueSummary::GVFlags Flags(V.getLinkage(), NonRenamableLocal,
/* LiveRoot = */ false);
@ -317,12 +321,9 @@ computeAliasSummary(ModuleSummaryIndex &Index, const GlobalAlias &A,
// Set LiveRoot flag on entries matching the given value name.
static void setLiveRoot(ModuleSummaryIndex &Index, StringRef Name) {
auto SummaryList =
Index.findGlobalValueSummaryList(GlobalValue::getGUID(Name));
if (SummaryList == Index.end())
return;
for (auto &Summary : SummaryList->second)
Summary->setLiveRoot();
if (ValueInfo VI = Index.getValueInfo(GlobalValue::getGUID(Name)))
for (auto &Summary : VI.getSummaryList())
Summary->setLiveRoot();
}
ModuleSummaryIndex llvm::buildModuleSummaryIndex(
@ -446,12 +447,16 @@ ModuleSummaryIndex llvm::buildModuleSummaryIndex(
}
for (auto &GlobalList : Index) {
assert(GlobalList.second.size() == 1 &&
// Ignore entries for references that are undefined in the current module.
if (GlobalList.second.SummaryList.empty())
continue;
assert(GlobalList.second.SummaryList.size() == 1 &&
"Expected module's index to have one summary per GUID");
auto &Summary = GlobalList.second[0];
auto &Summary = GlobalList.second.SummaryList[0];
bool AllRefsCanBeExternallyReferenced =
llvm::all_of(Summary->refs(), [&](const ValueInfo &VI) {
return !CantBePromoted.count(VI.getValue()->getGUID());
return !CantBePromoted.count(VI.getGUID());
});
if (!AllRefsCanBeExternallyReferenced) {
Summary->setNotEligibleToImport();
@ -461,9 +466,7 @@ ModuleSummaryIndex llvm::buildModuleSummaryIndex(
if (auto *FuncSummary = dyn_cast<FunctionSummary>(Summary.get())) {
bool AllCallsCanBeExternallyReferenced = llvm::all_of(
FuncSummary->calls(), [&](const FunctionSummary::EdgeTy &Edge) {
auto GUID = Edge.first.isGUID() ? Edge.first.getGUID()
: Edge.first.getValue()->getGUID();
return !CantBePromoted.count(GUID);
return !CantBePromoted.count(Edge.first.getGUID());
});
if (!AllCallsCanBeExternallyReferenced)
Summary->setNotEligibleToImport();

View File

@ -2970,7 +2970,7 @@ static const APInt gcd(const SCEVConstant *C1, const SCEVConstant *C2) {
else if (ABW < BBW)
A = A.zext(BBW);
return APIntOps::GreatestCommonDivisor(A, B);
return APIntOps::GreatestCommonDivisor(std::move(A), std::move(B));
}
/// Get a canonical unsigned division expression, or something simpler if
@ -4083,6 +4083,56 @@ static Optional<BinaryOp> MatchBinaryOp(Value *V, DominatorTree &DT) {
return None;
}
/// A helper function for createAddRecFromPHI to handle simple cases.
///
/// This function tries to find an AddRec expression for the simplest (yet most
/// common) cases: PN = PHI(Start, OP(Self, LoopInvariant)).
/// If it fails, createAddRecFromPHI will use a more general, but slow,
/// technique for finding the AddRec expression.
const SCEV *ScalarEvolution::createSimpleAffineAddRec(PHINode *PN,
Value *BEValueV,
Value *StartValueV) {
const Loop *L = LI.getLoopFor(PN->getParent());
assert(L && L->getHeader() == PN->getParent());
assert(BEValueV && StartValueV);
auto BO = MatchBinaryOp(BEValueV, DT);
if (!BO)
return nullptr;
if (BO->Opcode != Instruction::Add)
return nullptr;
const SCEV *Accum = nullptr;
if (BO->LHS == PN && L->isLoopInvariant(BO->RHS))
Accum = getSCEV(BO->RHS);
else if (BO->RHS == PN && L->isLoopInvariant(BO->LHS))
Accum = getSCEV(BO->LHS);
if (!Accum)
return nullptr;
SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap;
if (BO->IsNUW)
Flags = setFlags(Flags, SCEV::FlagNUW);
if (BO->IsNSW)
Flags = setFlags(Flags, SCEV::FlagNSW);
const SCEV *StartVal = getSCEV(StartValueV);
const SCEV *PHISCEV = getAddRecExpr(StartVal, Accum, L, Flags);
ValueExprMap[SCEVCallbackVH(PN, this)] = PHISCEV;
// We can add Flags to the post-inc expression only if we
// know that it is *undefined behavior* for BEValueV to
// overflow.
if (auto *BEInst = dyn_cast<Instruction>(BEValueV))
if (isLoopInvariant(Accum, L) && isAddRecNeverPoison(BEInst, L))
(void)getAddRecExpr(getAddExpr(StartVal, Accum), Accum, L, Flags);
return PHISCEV;
}
const SCEV *ScalarEvolution::createAddRecFromPHI(PHINode *PN) {
const Loop *L = LI.getLoopFor(PN->getParent());
if (!L || L->getHeader() != PN->getParent())
@ -4111,10 +4161,16 @@ const SCEV *ScalarEvolution::createAddRecFromPHI(PHINode *PN) {
if (!BEValueV || !StartValueV)
return nullptr;
// While we are analyzing this PHI node, handle its value symbolically.
const SCEV *SymbolicName = getUnknown(PN);
assert(ValueExprMap.find_as(PN) == ValueExprMap.end() &&
"PHI node already processed?");
// First, try to find AddRec expression without creating a fictituos symbolic
// value for PN.
if (auto *S = createSimpleAffineAddRec(PN, BEValueV, StartValueV))
return S;
// Handle PHI node value symbolically.
const SCEV *SymbolicName = getUnknown(PN);
ValueExprMap.insert({SCEVCallbackVH(PN, this), SymbolicName});
// Using this symbolic name for the PHI, analyze the value coming around
@ -4189,7 +4245,7 @@ const SCEV *ScalarEvolution::createAddRecFromPHI(PHINode *PN) {
ValueExprMap[SCEVCallbackVH(PN, this)] = PHISCEV;
// We can add Flags to the post-inc expression only if we
// know that it us *undefined behavior* for BEValueV to
// know that it is *undefined behavior* for BEValueV to
// overflow.
if (auto *BEInst = dyn_cast<Instruction>(BEValueV))
if (isLoopInvariant(Accum, L) && isAddRecNeverPoison(BEInst, L))
@ -4744,7 +4800,7 @@ ScalarEvolution::getRange(const SCEV *S,
}
}
return setRange(AddRec, SignHint, ConservativeResult);
return setRange(AddRec, SignHint, std::move(ConservativeResult));
}
if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) {
@ -4775,10 +4831,10 @@ ScalarEvolution::getRange(const SCEV *S,
APInt::getSignedMaxValue(BitWidth).ashr(NS - 1) + 1));
}
return setRange(U, SignHint, ConservativeResult);
return setRange(U, SignHint, std::move(ConservativeResult));
}
return setRange(S, SignHint, ConservativeResult);
return setRange(S, SignHint, std::move(ConservativeResult));
}
// Given a StartRange, Step and MaxBECount for an expression compute a range of
@ -4786,8 +4842,8 @@ ScalarEvolution::getRange(const SCEV *S,
// from StartRange and then is changed by Step up to MaxBECount times. Signed
// argument defines if we treat Step as signed or unsigned.
static ConstantRange getRangeForAffineARHelper(APInt Step,
ConstantRange StartRange,
APInt MaxBECount,
const ConstantRange &StartRange,
const APInt &MaxBECount,
unsigned BitWidth, bool Signed) {
// If either Step or MaxBECount is 0, then the expression won't change, and we
// just need to return the initial range.
@ -4826,8 +4882,8 @@ static ConstantRange getRangeForAffineARHelper(APInt Step,
// if the expression is decreasing and will be increased by Offset otherwise.
APInt StartLower = StartRange.getLower();
APInt StartUpper = StartRange.getUpper() - 1;
APInt MovedBoundary =
Descending ? (StartLower - Offset) : (StartUpper + Offset);
APInt MovedBoundary = Descending ? (StartLower - std::move(Offset))
: (StartUpper + std::move(Offset));
// It's possible that the new minimum/maximum value will fall into the initial
// range (due to wrap around). This means that the expression can take any
@ -4835,21 +4891,18 @@ static ConstantRange getRangeForAffineARHelper(APInt Step,
if (StartRange.contains(MovedBoundary))
return ConstantRange(BitWidth, /* isFullSet = */ true);
APInt NewLower, NewUpper;
if (Descending) {
NewLower = MovedBoundary;
NewUpper = StartUpper;
} else {
NewLower = StartLower;
NewUpper = MovedBoundary;
}
APInt NewLower =
Descending ? std::move(MovedBoundary) : std::move(StartLower);
APInt NewUpper =
Descending ? std::move(StartUpper) : std::move(MovedBoundary);
NewUpper += 1;
// If we end up with full range, return a proper full range.
if (NewLower == NewUpper + 1)
if (NewLower == NewUpper)
return ConstantRange(BitWidth, /* isFullSet = */ true);
// No overflow detected, return [StartLower, StartUpper + Offset + 1) range.
return ConstantRange(NewLower, NewUpper + 1);
return ConstantRange(std::move(NewLower), std::move(NewUpper));
}
ConstantRange ScalarEvolution::getRangeForAffineAR(const SCEV *Start,
@ -7323,7 +7376,6 @@ SolveQuadraticEquation(const SCEVAddRecExpr *AddRec, ScalarEvolution &SE) {
const APInt &M = MC->getAPInt();
const APInt &N = NC->getAPInt();
APInt Two(BitWidth, 2);
APInt Four(BitWidth, 4);
{
using namespace APIntOps;
@ -7339,7 +7391,7 @@ SolveQuadraticEquation(const SCEVAddRecExpr *AddRec, ScalarEvolution &SE) {
// Compute the B^2-4ac term.
APInt SqrtTerm(B);
SqrtTerm *= B;
SqrtTerm -= Four * (A * C);
SqrtTerm -= 4 * (A * C);
if (SqrtTerm.isNegative()) {
// The loop is provably infinite.
@ -8887,7 +8939,7 @@ bool ScalarEvolution::isImpliedCondOperandsViaRanges(ICmpInst::Predicate Pred,
if (!Addend)
return false;
APInt ConstFoundRHS = cast<SCEVConstant>(FoundRHS)->getAPInt();
const APInt &ConstFoundRHS = cast<SCEVConstant>(FoundRHS)->getAPInt();
// `FoundLHSRange` is the range we know `FoundLHS` to be in by virtue of the
// antecedent "`FoundLHS` `Pred` `FoundRHS`".
@ -8899,7 +8951,7 @@ bool ScalarEvolution::isImpliedCondOperandsViaRanges(ICmpInst::Predicate Pred,
// We can also compute the range of values for `LHS` that satisfy the
// consequent, "`LHS` `Pred` `RHS`":
APInt ConstRHS = cast<SCEVConstant>(RHS)->getAPInt();
const APInt &ConstRHS = cast<SCEVConstant>(RHS)->getAPInt();
ConstantRange SatisfyingLHSRange =
ConstantRange::makeSatisfyingICmpRegion(Pred, ConstRHS);
@ -8924,7 +8976,7 @@ bool ScalarEvolution::doesIVOverflowOnLT(const SCEV *RHS, const SCEV *Stride,
.getSignedMax();
// SMaxRHS + SMaxStrideMinusOne > SMaxValue => overflow!
return (MaxValue - MaxStrideMinusOne).slt(MaxRHS);
return (std::move(MaxValue) - std::move(MaxStrideMinusOne)).slt(MaxRHS);
}
APInt MaxRHS = getUnsignedRange(RHS).getUnsignedMax();
@ -8933,7 +8985,7 @@ bool ScalarEvolution::doesIVOverflowOnLT(const SCEV *RHS, const SCEV *Stride,
.getUnsignedMax();
// UMaxRHS + UMaxStrideMinusOne > UMaxValue => overflow!
return (MaxValue - MaxStrideMinusOne).ult(MaxRHS);
return (std::move(MaxValue) - std::move(MaxStrideMinusOne)).ult(MaxRHS);
}
bool ScalarEvolution::doesIVOverflowOnGT(const SCEV *RHS, const SCEV *Stride,
@ -8950,7 +9002,7 @@ bool ScalarEvolution::doesIVOverflowOnGT(const SCEV *RHS, const SCEV *Stride,
.getSignedMax();
// SMinRHS - SMaxStrideMinusOne < SMinValue => overflow!
return (MinValue + MaxStrideMinusOne).sgt(MinRHS);
return (std::move(MinValue) + std::move(MaxStrideMinusOne)).sgt(MinRHS);
}
APInt MinRHS = getUnsignedRange(RHS).getUnsignedMin();
@ -8959,7 +9011,7 @@ bool ScalarEvolution::doesIVOverflowOnGT(const SCEV *RHS, const SCEV *Stride,
.getUnsignedMax();
// UMinRHS - UMaxStrideMinusOne < UMinValue => overflow!
return (MinValue + MaxStrideMinusOne).ugt(MinRHS);
return (std::move(MinValue) + std::move(MaxStrideMinusOne)).ugt(MinRHS);
}
const SCEV *ScalarEvolution::computeBECount(const SCEV *Delta, const SCEV *Step,
@ -9250,9 +9302,8 @@ const SCEV *SCEVAddRecExpr::getNumIterationsInRange(const ConstantRange &Range,
// the upper value of the range must be the first possible exit value.
// If A is negative then the lower of the range is the last possible loop
// value. Also note that we already checked for a full range.
APInt One(BitWidth,1);
APInt A = cast<SCEVConstant>(getOperand(1))->getAPInt();
APInt End = A.sge(One) ? (Range.getUpper() - One) : Range.getLower();
APInt End = A.sge(1) ? (Range.getUpper() - 1) : Range.getLower();
// The exit value should be (End+A)/A.
APInt ExitVal = (End + A).udiv(A);
@ -9268,7 +9319,7 @@ const SCEV *SCEVAddRecExpr::getNumIterationsInRange(const ConstantRange &Range,
// Ensure that the previous value is in the range. This is a sanity check.
assert(Range.contains(
EvaluateConstantChrecAtConstant(this,
ConstantInt::get(SE.getContext(), ExitVal - One), SE)->getValue()) &&
ConstantInt::get(SE.getContext(), ExitVal - 1), SE)->getValue()) &&
"Linear scev computation is off in a bad way!");
return SE.getConstant(ExitValue);
} else if (isQuadratic()) {
@ -9574,7 +9625,7 @@ const SCEV *ScalarEvolution::getElementSize(Instruction *Inst) {
void ScalarEvolution::findArrayDimensions(SmallVectorImpl<const SCEV *> &Terms,
SmallVectorImpl<const SCEV *> &Sizes,
const SCEV *ElementSize) const {
const SCEV *ElementSize) {
if (Terms.size() < 1 || !ElementSize)
return;
@ -9590,7 +9641,7 @@ void ScalarEvolution::findArrayDimensions(SmallVectorImpl<const SCEV *> &Terms,
});
// Remove duplicates.
std::sort(Terms.begin(), Terms.end());
array_pod_sort(Terms.begin(), Terms.end());
Terms.erase(std::unique(Terms.begin(), Terms.end()), Terms.end());
// Put larger terms first.
@ -9598,13 +9649,11 @@ void ScalarEvolution::findArrayDimensions(SmallVectorImpl<const SCEV *> &Terms,
return numberOfTerms(LHS) > numberOfTerms(RHS);
});
ScalarEvolution &SE = *const_cast<ScalarEvolution *>(this);
// Try to divide all terms by the element size. If term is not divisible by
// element size, proceed with the original term.
for (const SCEV *&Term : Terms) {
const SCEV *Q, *R;
SCEVDivision::divide(SE, Term, ElementSize, &Q, &R);
SCEVDivision::divide(*this, Term, ElementSize, &Q, &R);
if (!Q->isZero())
Term = Q;
}
@ -9613,7 +9662,7 @@ void ScalarEvolution::findArrayDimensions(SmallVectorImpl<const SCEV *> &Terms,
// Remove constant factors.
for (const SCEV *T : Terms)
if (const SCEV *NewT = removeConstantFactors(SE, T))
if (const SCEV *NewT = removeConstantFactors(*this, T))
NewTerms.push_back(NewT);
DEBUG({
@ -9622,8 +9671,7 @@ void ScalarEvolution::findArrayDimensions(SmallVectorImpl<const SCEV *> &Terms,
dbgs() << *T << "\n";
});
if (NewTerms.empty() ||
!findArrayDimensionsRec(SE, NewTerms, Sizes)) {
if (NewTerms.empty() || !findArrayDimensionsRec(*this, NewTerms, Sizes)) {
Sizes.clear();
return;
}

View File

@ -1176,6 +1176,10 @@ bool TargetLibraryInfoImpl::isValidProtoForLibFunc(const FunctionType &FTy,
FTy.getParamType(0)->isPointerTy() &&
FTy.getParamType(1) == SizeTTy && FTy.getParamType(2) == SizeTTy);
case LibFunc_wcslen:
return (NumParams == 1 && FTy.getParamType(0)->isPointerTy() &&
FTy.getReturnType()->isIntegerTy());
case LibFunc::NumLibFuncs:
break;
}

View File

@ -59,8 +59,8 @@ static cl::opt<bool>
DontImproveNonNegativePhiBits("dont-improve-non-negative-phi-bits",
cl::Hidden, cl::init(true));
/// Returns the bitwidth of the given scalar or pointer type (if unknown returns
/// 0). For vector types, returns the element type's bitwidth.
/// Returns the bitwidth of the given scalar or pointer type. For vector types,
/// returns the element type's bitwidth.
static unsigned getBitWidth(Type *Ty, const DataLayout &DL) {
if (unsigned BitWidth = Ty->getScalarSizeInBits())
return BitWidth;
@ -342,7 +342,6 @@ static void computeKnownBitsMul(const Value *Op0, const Value *Op1, bool NSW,
// Also compute a conservative estimate for high known-0 bits.
// More trickiness is possible, but this is sufficient for the
// interesting case of alignment computation.
Known.One.clearAllBits();
unsigned TrailZ = Known.Zero.countTrailingOnes() +
Known2.Zero.countTrailingOnes();
unsigned LeadZ = std::max(Known.Zero.countLeadingOnes() +
@ -351,7 +350,7 @@ static void computeKnownBitsMul(const Value *Op0, const Value *Op1, bool NSW,
TrailZ = std::min(TrailZ, BitWidth);
LeadZ = std::min(LeadZ, BitWidth);
Known.Zero.clearAllBits();
Known.resetAll();
Known.Zero.setLowBits(TrailZ);
Known.Zero.setHighBits(LeadZ);
@ -529,15 +528,13 @@ static void computeKnownBitsFromAssume(const Value *V, KnownBits &Known,
if (Arg == V && isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
assert(BitWidth == 1 && "assume operand is not i1?");
Known.Zero.clearAllBits();
Known.One.setAllBits();
Known.setAllOnes();
return;
}
if (match(Arg, m_Not(m_Specific(V))) &&
isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
assert(BitWidth == 1 && "assume operand is not i1?");
Known.Zero.setAllBits();
Known.One.clearAllBits();
Known.setAllZero();
return;
}
@ -719,7 +716,7 @@ static void computeKnownBitsFromAssume(const Value *V, KnownBits &Known,
KnownBits RHSKnown(BitWidth);
computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
if (RHSKnown.One.isAllOnesValue() || RHSKnown.isNonNegative()) {
if (RHSKnown.isAllOnes() || RHSKnown.isNonNegative()) {
// We know that the sign bit is zero.
Known.makeNonNegative();
}
@ -741,7 +738,7 @@ static void computeKnownBitsFromAssume(const Value *V, KnownBits &Known,
KnownBits RHSKnown(BitWidth);
computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
if (RHSKnown.Zero.isAllOnesValue() || RHSKnown.isNegative()) {
if (RHSKnown.isZero() || RHSKnown.isNegative()) {
// We know that the sign bit is one.
Known.makeNegative();
}
@ -776,8 +773,7 @@ static void computeKnownBitsFromAssume(const Value *V, KnownBits &Known,
// behavior, or we might have a bug in the compiler. We can't assert/crash, so
// clear out the known bits, try to warn the user, and hope for the best.
if (Known.Zero.intersects(Known.One)) {
Known.Zero.clearAllBits();
Known.One.clearAllBits();
Known.resetAll();
if (Q.ORE) {
auto *CxtI = const_cast<Instruction *>(Q.CxtI);
@ -813,10 +809,8 @@ static void computeKnownBitsFromShiftOperator(
// If there is conflict between Known.Zero and Known.One, this must be an
// overflowing left shift, so the shift result is undefined. Clear Known
// bits so that other code could propagate this undef.
if ((Known.Zero & Known.One) != 0) {
Known.Zero.clearAllBits();
Known.One.clearAllBits();
}
if ((Known.Zero & Known.One) != 0)
Known.resetAll();
return;
}
@ -826,8 +820,7 @@ static void computeKnownBitsFromShiftOperator(
// If the shift amount could be greater than or equal to the bit-width of the LHS, the
// value could be undef, so we don't know anything about it.
if ((~Known.Zero).uge(BitWidth)) {
Known.Zero.clearAllBits();
Known.One.clearAllBits();
Known.resetAll();
return;
}
@ -839,8 +832,7 @@ static void computeKnownBitsFromShiftOperator(
// It would be more-clearly correct to use the two temporaries for this
// calculation. Reusing the APInts here to prevent unnecessary allocations.
Known.Zero.clearAllBits();
Known.One.clearAllBits();
Known.resetAll();
// If we know the shifter operand is nonzero, we can sometimes infer more
// known bits. However this is expensive to compute, so be lazy about it and
@ -886,10 +878,8 @@ static void computeKnownBitsFromShiftOperator(
// return anything we'd like, but we need to make sure the sets of known bits
// stay disjoint (it should be better for some other code to actually
// propagate the undef than to pick a value here using known bits).
if (Known.Zero.intersects(Known.One)) {
Known.Zero.clearAllBits();
Known.One.clearAllBits();
}
if (Known.Zero.intersects(Known.One))
Known.resetAll();
}
static void computeKnownBitsFromOperator(const Operator *I, KnownBits &Known,
@ -924,7 +914,7 @@ static void computeKnownBitsFromOperator(const Operator *I, KnownBits &Known,
m_Value(Y))) ||
match(I->getOperand(1), m_Add(m_Specific(I->getOperand(0)),
m_Value(Y))))) {
Known2.Zero.clearAllBits(); Known2.One.clearAllBits();
Known2.resetAll();
computeKnownBits(Y, Known2, Depth + 1, Q);
if (Known2.One.countTrailingOnes() > 0)
Known.Zero.setBit(0);
@ -965,8 +955,7 @@ static void computeKnownBitsFromOperator(const Operator *I, KnownBits &Known,
computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
unsigned LeadZ = Known2.Zero.countLeadingOnes();
Known2.One.clearAllBits();
Known2.Zero.clearAllBits();
Known2.resetAll();
computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q);
unsigned RHSUnknownLeadingOnes = Known2.One.countLeadingZeros();
if (RHSUnknownLeadingOnes != BitWidth)
@ -1051,11 +1040,9 @@ static void computeKnownBitsFromOperator(const Operator *I, KnownBits &Known,
SrcBitWidth = Q.DL.getTypeSizeInBits(SrcTy->getScalarType());
assert(SrcBitWidth && "SrcBitWidth can't be zero");
Known.Zero = Known.Zero.zextOrTrunc(SrcBitWidth);
Known.One = Known.One.zextOrTrunc(SrcBitWidth);
Known = Known.zextOrTrunc(SrcBitWidth);
computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
Known.Zero = Known.Zero.zextOrTrunc(BitWidth);
Known.One = Known.One.zextOrTrunc(BitWidth);
Known = Known.zextOrTrunc(BitWidth);
// Any top bits are known to be zero.
if (BitWidth > SrcBitWidth)
Known.Zero.setBitsFrom(SrcBitWidth);
@ -1076,13 +1063,11 @@ static void computeKnownBitsFromOperator(const Operator *I, KnownBits &Known,
// Compute the bits in the result that are not present in the input.
unsigned SrcBitWidth = I->getOperand(0)->getType()->getScalarSizeInBits();
Known.Zero = Known.Zero.trunc(SrcBitWidth);
Known.One = Known.One.trunc(SrcBitWidth);
Known = Known.trunc(SrcBitWidth);
computeKnownBits(I->getOperand(0), Known, Depth + 1, Q);
// If the sign bit of the input is known set or clear, then we know the
// top bits of the result.
Known.Zero = Known.Zero.sext(BitWidth);
Known.One = Known.One.sext(BitWidth);
Known = Known.sext(BitWidth);
break;
}
case Instruction::Shl: {
@ -1202,8 +1187,7 @@ static void computeKnownBitsFromOperator(const Operator *I, KnownBits &Known,
unsigned Leaders = std::max(Known.Zero.countLeadingOnes(),
Known2.Zero.countLeadingOnes());
Known.One.clearAllBits();
Known.Zero.clearAllBits();
Known.resetAll();
Known.Zero.setHighBits(Leaders);
break;
}
@ -1504,8 +1488,7 @@ void computeKnownBits(const Value *V, KnownBits &Known, unsigned Depth,
}
// Null and aggregate-zero are all-zeros.
if (isa<ConstantPointerNull>(V) || isa<ConstantAggregateZero>(V)) {
Known.One.clearAllBits();
Known.Zero.setAllBits();
Known.setAllZero();
return;
}
// Handle a constant vector by taking the intersection of the known bits of
@ -1532,8 +1515,7 @@ void computeKnownBits(const Value *V, KnownBits &Known, unsigned Depth,
Constant *Element = CV->getAggregateElement(i);
auto *ElementCI = dyn_cast_or_null<ConstantInt>(Element);
if (!ElementCI) {
Known.Zero.clearAllBits();
Known.One.clearAllBits();
Known.resetAll();
return;
}
Elt = ElementCI->getValue();
@ -1544,7 +1526,7 @@ void computeKnownBits(const Value *V, KnownBits &Known, unsigned Depth,
}
// Start out not knowing anything.
Known.Zero.clearAllBits(); Known.One.clearAllBits();
Known.resetAll();
// We can't imply anything about undefs.
if (isa<UndefValue>(V))
@ -1590,13 +1572,7 @@ void computeKnownBits(const Value *V, KnownBits &Known, unsigned Depth,
/// Convenience wrapper around computeKnownBits.
void ComputeSignBit(const Value *V, bool &KnownZero, bool &KnownOne,
unsigned Depth, const Query &Q) {
unsigned BitWidth = getBitWidth(V->getType(), Q.DL);
if (!BitWidth) {
KnownZero = false;
KnownOne = false;
return;
}
KnownBits Bits(BitWidth);
KnownBits Bits(getBitWidth(V->getType(), Q.DL));
computeKnownBits(V, Bits, Depth, Q);
KnownOne = Bits.isNegative();
KnownZero = Bits.isNonNegative();
@ -1847,7 +1823,7 @@ bool isKnownNonZero(const Value *V, unsigned Depth, const Query &Q) {
// shl X, Y != 0 if X is odd. Note that the value of the shift is undefined
// if the lowest bit is shifted off the end.
if (BitWidth && match(V, m_Shl(m_Value(X), m_Value(Y)))) {
if (match(V, m_Shl(m_Value(X), m_Value(Y)))) {
// shl nuw can't remove any non-zero bits.
const OverflowingBinaryOperator *BO = cast<OverflowingBinaryOperator>(V);
if (BO->hasNoUnsignedWrap())
@ -1906,7 +1882,7 @@ bool isKnownNonZero(const Value *V, unsigned Depth, const Query &Q) {
// If X and Y are both negative (as signed values) then their sum is not
// zero unless both X and Y equal INT_MIN.
if (BitWidth && XKnownNegative && YKnownNegative) {
if (XKnownNegative && YKnownNegative) {
KnownBits Known(BitWidth);
APInt Mask = APInt::getSignedMaxValue(BitWidth);
// The sign bit of X is set. If some other bit is set then X is not equal
@ -1971,7 +1947,6 @@ bool isKnownNonZero(const Value *V, unsigned Depth, const Query &Q) {
return true;
}
if (!BitWidth) return false;
KnownBits Known(BitWidth);
computeKnownBits(V, Known, Depth, Q);
return Known.One != 0;

View File

@ -694,15 +694,16 @@ class ModuleSummaryIndexBitcodeReader : public BitcodeReaderBase {
/// Used to enable on-demand parsing of the VST.
uint64_t VSTOffset = 0;
// Map to save ValueId to GUID association that was recorded in the
// Map to save ValueId to ValueInfo association that was recorded in the
// ValueSymbolTable. It is used after the VST is parsed to convert
// call graph edges read from the function summary from referencing
// callees by their ValueId to using the GUID instead, which is how
// callees by their ValueId to using the ValueInfo instead, which is how
// they are recorded in the summary index being built.
// We save a second GUID which is the same as the first one, but ignoring the
// linkage, i.e. for value other than local linkage they are identical.
DenseMap<unsigned, std::pair<GlobalValue::GUID, GlobalValue::GUID>>
ValueIdToCallGraphGUIDMap;
// We save a GUID which refers to the same global as the ValueInfo, but
// ignoring the linkage, i.e. for values other than local linkage they are
// identical.
DenseMap<unsigned, std::pair<ValueInfo, GlobalValue::GUID>>
ValueIdToValueInfoMap;
/// Map populated during module path string table parsing, from the
/// module ID to a string reference owned by the index's module
@ -742,8 +743,8 @@ private:
Error parseEntireSummary();
Error parseModuleStringTable();
std::pair<GlobalValue::GUID, GlobalValue::GUID>
getGUIDFromValueId(unsigned ValueId);
std::pair<ValueInfo, GlobalValue::GUID>
getValueInfoFromValueId(unsigned ValueId);
ModulePathStringTableTy::iterator addThisModulePath();
};
@ -4697,11 +4698,11 @@ ModuleSummaryIndexBitcodeReader::addThisModulePath() {
return TheIndex.addModulePath(ModulePath, ModuleId);
}
std::pair<GlobalValue::GUID, GlobalValue::GUID>
ModuleSummaryIndexBitcodeReader::getGUIDFromValueId(unsigned ValueId) {
auto VGI = ValueIdToCallGraphGUIDMap.find(ValueId);
assert(VGI != ValueIdToCallGraphGUIDMap.end());
return VGI->second;
std::pair<ValueInfo, GlobalValue::GUID>
ModuleSummaryIndexBitcodeReader::getValueInfoFromValueId(unsigned ValueId) {
auto VGI = ValueIdToValueInfoMap[ValueId];
assert(VGI.first);
return VGI;
}
void ModuleSummaryIndexBitcodeReader::setValueGUID(
@ -4716,8 +4717,8 @@ void ModuleSummaryIndexBitcodeReader::setValueGUID(
if (PrintSummaryGUIDs)
dbgs() << "GUID " << ValueGUID << "(" << OriginalNameID << ") is "
<< ValueName << "\n";
ValueIdToCallGraphGUIDMap[ValueID] =
std::make_pair(ValueGUID, OriginalNameID);
ValueIdToValueInfoMap[ValueID] =
std::make_pair(TheIndex.getOrInsertValueInfo(ValueGUID), OriginalNameID);
}
// Specialized value symbol table parser used when reading module index
@ -4795,7 +4796,8 @@ Error ModuleSummaryIndexBitcodeReader::parseValueSymbolTable(
GlobalValue::GUID RefGUID = Record[1];
// The "original name", which is the second value of the pair will be
// overriden later by a FS_COMBINED_ORIGINAL_NAME in the combined index.
ValueIdToCallGraphGUIDMap[ValueID] = std::make_pair(RefGUID, RefGUID);
ValueIdToValueInfoMap[ValueID] =
std::make_pair(TheIndex.getOrInsertValueInfo(RefGUID), RefGUID);
break;
}
}
@ -4940,7 +4942,7 @@ ModuleSummaryIndexBitcodeReader::makeRefList(ArrayRef<uint64_t> Record) {
std::vector<ValueInfo> Ret;
Ret.reserve(Record.size());
for (uint64_t RefValueId : Record)
Ret.push_back(getGUIDFromValueId(RefValueId).first);
Ret.push_back(getValueInfoFromValueId(RefValueId).first);
return Ret;
}
@ -4950,14 +4952,14 @@ std::vector<FunctionSummary::EdgeTy> ModuleSummaryIndexBitcodeReader::makeCallLi
Ret.reserve(Record.size());
for (unsigned I = 0, E = Record.size(); I != E; ++I) {
CalleeInfo::HotnessType Hotness = CalleeInfo::HotnessType::Unknown;
GlobalValue::GUID CalleeGUID = getGUIDFromValueId(Record[I]).first;
ValueInfo Callee = getValueInfoFromValueId(Record[I]).first;
if (IsOldProfileFormat) {
I += 1; // Skip old callsitecount field
if (HasProfile)
I += 1; // Skip old profilecount field
} else if (HasProfile)
Hotness = static_cast<CalleeInfo::HotnessType>(Record[++I]);
Ret.push_back(FunctionSummary::EdgeTy{CalleeGUID, CalleeInfo{Hotness}});
Ret.push_back(FunctionSummary::EdgeTy{Callee, CalleeInfo{Hotness}});
}
return Ret;
}
@ -5027,7 +5029,8 @@ Error ModuleSummaryIndexBitcodeReader::parseEntireSummary() {
case bitc::FS_VALUE_GUID: { // [valueid, refguid]
uint64_t ValueID = Record[0];
GlobalValue::GUID RefGUID = Record[1];
ValueIdToCallGraphGUIDMap[ValueID] = std::make_pair(RefGUID, RefGUID);
ValueIdToValueInfoMap[ValueID] =
std::make_pair(TheIndex.getOrInsertValueInfo(RefGUID), RefGUID);
break;
}
// FS_PERMODULE: [valueid, flags, instcount, numrefs, numrefs x valueid,
@ -5068,10 +5071,10 @@ Error ModuleSummaryIndexBitcodeReader::parseEntireSummary() {
PendingTypeCheckedLoadVCalls.clear();
PendingTypeTestAssumeConstVCalls.clear();
PendingTypeCheckedLoadConstVCalls.clear();
auto GUID = getGUIDFromValueId(ValueID);
auto VIAndOriginalGUID = getValueInfoFromValueId(ValueID);
FS->setModulePath(addThisModulePath()->first());
FS->setOriginalName(GUID.second);
TheIndex.addGlobalValueSummary(GUID.first, std::move(FS));
FS->setOriginalName(VIAndOriginalGUID.second);
TheIndex.addGlobalValueSummary(VIAndOriginalGUID.first, std::move(FS));
break;
}
// FS_ALIAS: [valueid, flags, valueid]
@ -5091,14 +5094,15 @@ Error ModuleSummaryIndexBitcodeReader::parseEntireSummary() {
// ownership.
AS->setModulePath(addThisModulePath()->first());
GlobalValue::GUID AliaseeGUID = getGUIDFromValueId(AliaseeID).first;
GlobalValue::GUID AliaseeGUID =
getValueInfoFromValueId(AliaseeID).first.getGUID();
auto AliaseeInModule =
TheIndex.findSummaryInModule(AliaseeGUID, ModulePath);
if (!AliaseeInModule)
return error("Alias expects aliasee summary to be parsed");
AS->setAliasee(AliaseeInModule);
auto GUID = getGUIDFromValueId(ValueID);
auto GUID = getValueInfoFromValueId(ValueID);
AS->setOriginalName(GUID.second);
TheIndex.addGlobalValueSummary(GUID.first, std::move(AS));
break;
@ -5112,7 +5116,7 @@ Error ModuleSummaryIndexBitcodeReader::parseEntireSummary() {
makeRefList(ArrayRef<uint64_t>(Record).slice(2));
auto FS = llvm::make_unique<GlobalVarSummary>(Flags, std::move(Refs));
FS->setModulePath(addThisModulePath()->first());
auto GUID = getGUIDFromValueId(ValueID);
auto GUID = getValueInfoFromValueId(ValueID);
FS->setOriginalName(GUID.second);
TheIndex.addGlobalValueSummary(GUID.first, std::move(FS));
break;
@ -5139,7 +5143,7 @@ Error ModuleSummaryIndexBitcodeReader::parseEntireSummary() {
std::vector<FunctionSummary::EdgeTy> Edges = makeCallList(
ArrayRef<uint64_t>(Record).slice(CallGraphEdgeStartIndex),
IsOldProfileFormat, HasProfile);
GlobalValue::GUID GUID = getGUIDFromValueId(ValueID).first;
ValueInfo VI = getValueInfoFromValueId(ValueID).first;
auto FS = llvm::make_unique<FunctionSummary>(
Flags, InstCount, std::move(Refs), std::move(Edges),
std::move(PendingTypeTests), std::move(PendingTypeTestAssumeVCalls),
@ -5152,9 +5156,9 @@ Error ModuleSummaryIndexBitcodeReader::parseEntireSummary() {
PendingTypeTestAssumeConstVCalls.clear();
PendingTypeCheckedLoadConstVCalls.clear();
LastSeenSummary = FS.get();
LastSeenGUID = GUID;
LastSeenGUID = VI.getGUID();
FS->setModulePath(ModuleIdMap[ModuleId]);
TheIndex.addGlobalValueSummary(GUID, std::move(FS));
TheIndex.addGlobalValueSummary(VI, std::move(FS));
break;
}
// FS_COMBINED_ALIAS: [valueid, modid, flags, valueid]
@ -5170,16 +5174,17 @@ Error ModuleSummaryIndexBitcodeReader::parseEntireSummary() {
LastSeenSummary = AS.get();
AS->setModulePath(ModuleIdMap[ModuleId]);
auto AliaseeGUID = getGUIDFromValueId(AliaseeValueId).first;
auto AliaseeGUID =
getValueInfoFromValueId(AliaseeValueId).first.getGUID();
auto AliaseeInModule =
TheIndex.findSummaryInModule(AliaseeGUID, AS->modulePath());
if (!AliaseeInModule)
return error("Alias expects aliasee summary to be parsed");
AS->setAliasee(AliaseeInModule);
GlobalValue::GUID GUID = getGUIDFromValueId(ValueID).first;
LastSeenGUID = GUID;
TheIndex.addGlobalValueSummary(GUID, std::move(AS));
ValueInfo VI = getValueInfoFromValueId(ValueID).first;
LastSeenGUID = VI.getGUID();
TheIndex.addGlobalValueSummary(VI, std::move(AS));
break;
}
// FS_COMBINED_GLOBALVAR_INIT_REFS: [valueid, modid, flags, n x valueid]
@ -5193,9 +5198,9 @@ Error ModuleSummaryIndexBitcodeReader::parseEntireSummary() {
auto FS = llvm::make_unique<GlobalVarSummary>(Flags, std::move(Refs));
LastSeenSummary = FS.get();
FS->setModulePath(ModuleIdMap[ModuleId]);
GlobalValue::GUID GUID = getGUIDFromValueId(ValueID).first;
LastSeenGUID = GUID;
TheIndex.addGlobalValueSummary(GUID, std::move(FS));
ValueInfo VI = getValueInfoFromValueId(ValueID).first;
LastSeenGUID = VI.getGUID();
TheIndex.addGlobalValueSummary(VI, std::move(FS));
break;
}
// FS_COMBINED_ORIGINAL_NAME: [original_name]

View File

@ -156,14 +156,14 @@ public:
return;
for (const auto &GUIDSummaryLists : *Index)
// Examine all summaries for this GUID.
for (auto &Summary : GUIDSummaryLists.second)
for (auto &Summary : GUIDSummaryLists.second.SummaryList)
if (auto FS = dyn_cast<FunctionSummary>(Summary.get()))
// For each call in the function summary, see if the call
// is to a GUID (which means it is for an indirect call,
// otherwise we would have a Value for it). If so, synthesize
// a value id.
for (auto &CallEdge : FS->calls())
if (CallEdge.first.isGUID())
if (!CallEdge.first.getValue())
assignValueId(CallEdge.first.getGUID());
}
@ -304,7 +304,7 @@ private:
}
// Helper to get the valueId for the type of value recorded in VI.
unsigned getValueId(ValueInfo VI) {
if (VI.isGUID())
if (!VI.getValue())
return getValueId(VI.getGUID());
return VE.getValueID(VI.getValue());
}
@ -358,7 +358,7 @@ public:
Callback(Summary);
} else {
for (auto &Summaries : Index)
for (auto &Summary : Summaries.second)
for (auto &Summary : Summaries.second.SummaryList)
Callback({Summaries.first, Summary.get()});
}
}
@ -3270,15 +3270,14 @@ void ModuleBitcodeWriter::writePerModuleFunctionSummaryRecord(
void ModuleBitcodeWriter::writeModuleLevelReferences(
const GlobalVariable &V, SmallVector<uint64_t, 64> &NameVals,
unsigned FSModRefsAbbrev) {
auto Summaries =
Index->findGlobalValueSummaryList(GlobalValue::getGUID(V.getName()));
if (Summaries == Index->end()) {
auto VI = Index->getValueInfo(GlobalValue::getGUID(V.getName()));
if (!VI || VI.getSummaryList().empty()) {
// Only declarations should not have a summary (a declaration might however
// have a summary if the def was in module level asm).
assert(V.isDeclaration());
return;
}
auto *Summary = Summaries->second.front().get();
auto *Summary = VI.getSummaryList()[0].get();
NameVals.push_back(VE.getValueID(&V));
GlobalVarSummary *VS = cast<GlobalVarSummary>(Summary);
NameVals.push_back(getEncodedGVSummaryFlags(VS->flags()));
@ -3367,15 +3366,14 @@ void ModuleBitcodeWriter::writePerModuleGlobalValueSummary() {
if (!F.hasName())
report_fatal_error("Unexpected anonymous function when writing summary");
auto Summaries =
Index->findGlobalValueSummaryList(GlobalValue::getGUID(F.getName()));
if (Summaries == Index->end()) {
ValueInfo VI = Index->getValueInfo(GlobalValue::getGUID(F.getName()));
if (!VI || VI.getSummaryList().empty()) {
// Only declarations should not have a summary (a declaration might
// however have a summary if the def was in module level asm).
assert(F.isDeclaration());
continue;
}
auto *Summary = Summaries->second.front().get();
auto *Summary = VI.getSummaryList()[0].get();
writePerModuleFunctionSummaryRecord(NameVals, Summary, VE.getValueID(&F),
FSCallsAbbrev, FSCallsProfileAbbrev, F);
}

View File

@ -2761,37 +2761,63 @@ void AsmPrinter::emitXRayTable() {
auto PrevSection = OutStreamer->getCurrentSectionOnly();
auto Fn = MF->getFunction();
MCSection *Section = nullptr;
MCSection *InstMap = nullptr;
MCSection *FnSledIndex = nullptr;
if (MF->getSubtarget().getTargetTriple().isOSBinFormatELF()) {
if (Fn->hasComdat()) {
Section = OutContext.getELFSection("xray_instr_map", ELF::SHT_PROGBITS,
InstMap = OutContext.getELFSection("xray_instr_map", ELF::SHT_PROGBITS,
ELF::SHF_ALLOC | ELF::SHF_GROUP, 0,
Fn->getComdat()->getName());
FnSledIndex = OutContext.getELFSection("xray_fn_idx", ELF::SHT_PROGBITS,
ELF::SHF_ALLOC | ELF::SHF_GROUP, 0,
Fn->getComdat()->getName());
} else {
Section = OutContext.getELFSection("xray_instr_map", ELF::SHT_PROGBITS,
InstMap = OutContext.getELFSection("xray_instr_map", ELF::SHT_PROGBITS,
ELF::SHF_ALLOC);
FnSledIndex = OutContext.getELFSection("xray_fn_idx", ELF::SHT_PROGBITS,
ELF::SHF_ALLOC);
}
} else if (MF->getSubtarget().getTargetTriple().isOSBinFormatMachO()) {
Section = OutContext.getMachOSection("__DATA", "xray_instr_map", 0,
InstMap = OutContext.getMachOSection("__DATA", "xray_instr_map", 0,
SectionKind::getReadOnlyWithRel());
FnSledIndex = OutContext.getMachOSection("__DATA", "xray_fn_idx", 0,
SectionKind::getReadOnlyWithRel());
} else {
llvm_unreachable("Unsupported target");
}
// Before we switch over, we force a reference to a label inside the
// xray_instr_map section. Since this function is always called just
// before the function's end, we assume that this is happening after
// the last return instruction.
// xray_instr_map and xray_fn_idx sections. Since this function is always
// called just before the function's end, we assume that this is happening
// after the last return instruction. We also use the synthetic label in the
// xray_inster_map as a delimeter for the range of sleds for this function in
// the index.
auto WordSizeBytes = MAI->getCodePointerSize();
MCSymbol *Tmp = OutContext.createTempSymbol("xray_synthetic_", true);
MCSymbol *SledsStart = OutContext.createTempSymbol("xray_synthetic_", true);
MCSymbol *IdxRef = OutContext.createTempSymbol("xray_fn_idx_synth_", true);
OutStreamer->EmitCodeAlignment(16);
OutStreamer->EmitSymbolValue(Tmp, WordSizeBytes, false);
OutStreamer->SwitchSection(Section);
OutStreamer->EmitLabel(Tmp);
OutStreamer->EmitSymbolValue(SledsStart, WordSizeBytes, false);
OutStreamer->EmitSymbolValue(IdxRef, WordSizeBytes, false);
// Now we switch to the instrumentation map section. Because this is done
// per-function, we are able to create an index entry that will represent the
// range of sleds associated with a function.
OutStreamer->SwitchSection(InstMap);
OutStreamer->EmitLabel(SledsStart);
for (const auto &Sled : Sleds)
Sled.emit(WordSizeBytes, OutStreamer.get(), CurrentFnSym);
MCSymbol *SledsEnd = OutContext.createTempSymbol("xray_synthetic_end", true);
OutStreamer->EmitLabel(SledsEnd);
// We then emit a single entry in the index per function. We use the symbols
// that bound the instrumentation map as the range for a specific function.
// Each entry here will be 2 * word size aligned, as we're writing down two
// pointers. This should work for both 32-bit and 64-bit platforms.
OutStreamer->SwitchSection(FnSledIndex);
OutStreamer->EmitCodeAlignment(2 * WordSizeBytes);
OutStreamer->EmitLabel(IdxRef);
OutStreamer->EmitSymbolValue(SledsStart, WordSizeBytes);
OutStreamer->EmitSymbolValue(SledsEnd, WordSizeBytes);
OutStreamer->SwitchSection(PrevSection);
Sleds.clear();
}

View File

@ -469,7 +469,7 @@ void CodeViewDebug::emitTypeInformation() {
CommentPrefix += ' ';
}
TypeDatabase TypeDB;
TypeDatabase TypeDB(TypeTable.records().size());
CVTypeDumper CVTD(TypeDB);
TypeTable.ForEachRecord([&](TypeIndex Index, ArrayRef<uint8_t> Record) {
if (OS.isVerboseAsm()) {
@ -1705,10 +1705,12 @@ TypeIndex CodeViewDebug::lowerCompleteTypeClass(const DICompositeType *Ty) {
SizeInBytes, FullName, Ty->getIdentifier());
TypeIndex ClassTI = TypeTable.writeKnownType(CR);
StringIdRecord SIDR(TypeIndex(0x0), getFullFilepath(Ty->getFile()));
TypeIndex SIDI = TypeTable.writeKnownType(SIDR);
UdtSourceLineRecord USLR(ClassTI, SIDI, Ty->getLine());
TypeTable.writeKnownType(USLR);
if (const auto *File = Ty->getFile()) {
StringIdRecord SIDR(TypeIndex(0x0), getFullFilepath(File));
TypeIndex SIDI = TypeTable.writeKnownType(SIDR);
UdtSourceLineRecord USLR(ClassTI, SIDI, Ty->getLine());
TypeTable.writeKnownType(USLR);
}
addToUDTs(Ty, ClassTI);

View File

@ -1850,8 +1850,8 @@ bool BranchFolder::HoistCommonCodeInSuccs(MachineBasicBlock *MBB) {
return false;
bool HasDups = false;
SmallVector<unsigned, 4> LocalDefs;
SmallSet<unsigned, 4> LocalDefsSet;
SmallVector<unsigned, 4> LocalDefs, LocalKills;
SmallSet<unsigned, 4> ActiveDefsSet, AllDefsSet;
MachineBasicBlock::iterator TIB = TBB->begin();
MachineBasicBlock::iterator FIB = FBB->begin();
MachineBasicBlock::iterator TIE = TBB->end();
@ -1905,7 +1905,7 @@ bool BranchFolder::HoistCommonCodeInSuccs(MachineBasicBlock *MBB) {
IsSafe = false;
break;
}
} else if (!LocalDefsSet.count(Reg)) {
} else if (!ActiveDefsSet.count(Reg)) {
if (Defs.count(Reg)) {
// Use is defined by the instruction at the point of insertion.
IsSafe = false;
@ -1925,18 +1925,22 @@ bool BranchFolder::HoistCommonCodeInSuccs(MachineBasicBlock *MBB) {
if (!TIB->isSafeToMove(nullptr, DontMoveAcrossStore))
break;
// Remove kills from LocalDefsSet, these registers had short live ranges.
// Remove kills from ActiveDefsSet, these registers had short live ranges.
for (const MachineOperand &MO : TIB->operands()) {
if (!MO.isReg() || !MO.isUse() || !MO.isKill())
continue;
unsigned Reg = MO.getReg();
if (!Reg || !LocalDefsSet.count(Reg))
if (!Reg)
continue;
if (!AllDefsSet.count(Reg)) {
LocalKills.push_back(Reg);
continue;
}
if (TargetRegisterInfo::isPhysicalRegister(Reg)) {
for (MCRegAliasIterator AI(Reg, TRI, true); AI.isValid(); ++AI)
LocalDefsSet.erase(*AI);
ActiveDefsSet.erase(*AI);
} else {
LocalDefsSet.erase(Reg);
ActiveDefsSet.erase(Reg);
}
}
@ -1948,7 +1952,8 @@ bool BranchFolder::HoistCommonCodeInSuccs(MachineBasicBlock *MBB) {
if (!Reg || TargetRegisterInfo::isVirtualRegister(Reg))
continue;
LocalDefs.push_back(Reg);
addRegAndItsAliases(Reg, TRI, LocalDefsSet);
addRegAndItsAliases(Reg, TRI, ActiveDefsSet);
addRegAndItsAliases(Reg, TRI, AllDefsSet);
}
HasDups = true;
@ -1963,17 +1968,22 @@ bool BranchFolder::HoistCommonCodeInSuccs(MachineBasicBlock *MBB) {
FBB->erase(FBB->begin(), FIB);
// Update livein's.
bool AddedLiveIns = false;
bool ChangedLiveIns = false;
for (unsigned i = 0, e = LocalDefs.size(); i != e; ++i) {
unsigned Def = LocalDefs[i];
if (LocalDefsSet.count(Def)) {
if (ActiveDefsSet.count(Def)) {
TBB->addLiveIn(Def);
FBB->addLiveIn(Def);
AddedLiveIns = true;
ChangedLiveIns = true;
}
}
for (unsigned K : LocalKills) {
TBB->removeLiveIn(K);
FBB->removeLiveIn(K);
ChangedLiveIns = true;
}
if (AddedLiveIns) {
if (ChangedLiveIns) {
TBB->sortUniqueLiveIns();
FBB->sortUniqueLiveIns();
}

View File

@ -1108,6 +1108,14 @@ bool IRTranslator::translate(const Constant &C, unsigned Reg) {
default:
return false;
}
} else if (auto CV = dyn_cast<ConstantVector>(&C)) {
if (CV->getNumOperands() == 1)
return translate(*CV->getOperand(0), Reg);
SmallVector<unsigned, 4> Ops;
for (unsigned i = 0; i < CV->getNumOperands(); ++i) {
Ops.push_back(getOrCreateVReg(*CV->getOperand(i)));
}
EntryBuilder.buildMerge(Reg, Ops);
} else
return false;
@ -1199,9 +1207,6 @@ bool IRTranslator::runOnMachineFunction(MachineFunction &CurMF) {
finishPendingPhis();
auto &TLI = *MF->getSubtarget().getTargetLowering();
TLI.finalizeLowering(*MF);
// Merge the argument lowering and constants block with its single
// successor, the LLVM-IR entry block. We want the basic block to
// be maximal.

View File

@ -24,6 +24,7 @@
#include "llvm/IR/Function.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
#include "llvm/Target/TargetLowering.h"
#include "llvm/Target/TargetSubtargetInfo.h"
#define DEBUG_TYPE "instruction-select"
@ -70,8 +71,7 @@ bool InstructionSelect::runOnMachineFunction(MachineFunction &MF) {
// An optimization remark emitter. Used to report failures.
MachineOptimizationRemarkEmitter MORE(MF, /*MBFI=*/nullptr);
// FIXME: freezeReservedRegs is now done in IRTranslator, but there are many
// other MF/MFI fields we need to initialize.
// FIXME: There are many other MF/MFI fields we need to initialize.
#ifndef NDEBUG
// Check that our input is fully legal: we require the function to have the
@ -184,6 +184,9 @@ bool InstructionSelect::runOnMachineFunction(MachineFunction &MF) {
return false;
}
auto &TLI = *MF.getSubtarget().getTargetLowering();
TLI.finalizeLowering(MF);
// FIXME: Should we accurately track changes?
return true;
}

View File

@ -176,8 +176,13 @@ bool Legalizer::runOnMachineFunction(MachineFunction &MF) {
unsigned NumNewInsns = 0;
SmallVector<MachineInstr *, 4> WorkList;
Helper.MIRBuilder.recordInsertions([&](MachineInstr *MI) {
++NumNewInsns;
WorkList.push_back(MI);
// Only legalize pre-isel generic instructions.
// Legalization process could generate Target specific pseudo
// instructions with generic types. Don't record them
if (isPreISelGenericOpcode(MI->getOpcode())) {
++NumNewInsns;
WorkList.push_back(MI);
}
});
WorkList.push_back(&*MI);

View File

@ -213,21 +213,23 @@ uint64_t RegBankSelect::getRepairCost(
return UINT_MAX;
}
RegisterBankInfo::InstructionMapping &RegBankSelect::findBestMapping(
const RegisterBankInfo::InstructionMapping &RegBankSelect::findBestMapping(
MachineInstr &MI, RegisterBankInfo::InstructionMappings &PossibleMappings,
SmallVectorImpl<RepairingPlacement> &RepairPts) {
assert(!PossibleMappings.empty() &&
"Do not know how to map this instruction");
RegisterBankInfo::InstructionMapping *BestMapping = nullptr;
const RegisterBankInfo::InstructionMapping *BestMapping = nullptr;
MappingCost Cost = MappingCost::ImpossibleCost();
SmallVector<RepairingPlacement, 4> LocalRepairPts;
for (RegisterBankInfo::InstructionMapping &CurMapping : PossibleMappings) {
MappingCost CurCost = computeMapping(MI, CurMapping, LocalRepairPts, &Cost);
for (const RegisterBankInfo::InstructionMapping *CurMapping :
PossibleMappings) {
MappingCost CurCost =
computeMapping(MI, *CurMapping, LocalRepairPts, &Cost);
if (CurCost < Cost) {
DEBUG(dbgs() << "New best: " << CurCost << '\n');
Cost = CurCost;
BestMapping = &CurMapping;
BestMapping = CurMapping;
RepairPts.clear();
for (RepairingPlacement &RepairPt : LocalRepairPts)
RepairPts.emplace_back(std::move(RepairPt));
@ -237,7 +239,7 @@ RegisterBankInfo::InstructionMapping &RegBankSelect::findBestMapping(
// If none of the mapping worked that means they are all impossible.
// Thus, pick the first one and set an impossible repairing point.
// It will trigger the failed isel mode.
BestMapping = &(*PossibleMappings.begin());
BestMapping = *PossibleMappings.begin();
RepairPts.emplace_back(
RepairingPlacement(MI, 0, *TRI, *this, RepairingPlacement::Impossible));
} else
@ -543,10 +545,10 @@ bool RegBankSelect::assignInstr(MachineInstr &MI) {
// Remember the repairing placement for all the operands.
SmallVector<RepairingPlacement, 4> RepairPts;
RegisterBankInfo::InstructionMapping BestMapping;
const RegisterBankInfo::InstructionMapping *BestMapping;
if (OptMode == RegBankSelect::Mode::Fast) {
BestMapping = RBI->getInstrMapping(MI);
MappingCost DefaultCost = computeMapping(MI, BestMapping, RepairPts);
BestMapping = &RBI->getInstrMapping(MI);
MappingCost DefaultCost = computeMapping(MI, *BestMapping, RepairPts);
(void)DefaultCost;
if (DefaultCost == MappingCost::ImpossibleCost())
return false;
@ -555,16 +557,16 @@ bool RegBankSelect::assignInstr(MachineInstr &MI) {
RBI->getInstrPossibleMappings(MI);
if (PossibleMappings.empty())
return false;
BestMapping = std::move(findBestMapping(MI, PossibleMappings, RepairPts));
BestMapping = &findBestMapping(MI, PossibleMappings, RepairPts);
}
// Make sure the mapping is valid for MI.
assert(BestMapping.verify(MI) && "Invalid instruction mapping");
assert(BestMapping->verify(MI) && "Invalid instruction mapping");
DEBUG(dbgs() << "Best Mapping: " << BestMapping << '\n');
DEBUG(dbgs() << "Best Mapping: " << *BestMapping << '\n');
// After this call, MI may not be valid anymore.
// Do not use it.
return applyMapping(MI, BestMapping, RepairPts);
return applyMapping(MI, *BestMapping, RepairPts);
}
bool RegBankSelect::runOnMachineFunction(MachineFunction &MF) {

View File

@ -45,6 +45,10 @@ STATISTIC(NumOperandsMappingsCreated,
"Number of operands mappings dynamically created");
STATISTIC(NumOperandsMappingsAccessed,
"Number of operands mappings dynamically accessed");
STATISTIC(NumInstructionMappingsCreated,
"Number of instruction mappings dynamically created");
STATISTIC(NumInstructionMappingsAccessed,
"Number of instruction mappings dynamically accessed");
const unsigned RegisterBankInfo::DefaultMappingID = UINT_MAX;
const unsigned RegisterBankInfo::InvalidMappingID = UINT_MAX - 1;
@ -137,7 +141,7 @@ static bool isCopyLike(const MachineInstr &MI) {
MI.getOpcode() == TargetOpcode::REG_SEQUENCE;
}
RegisterBankInfo::InstructionMapping
const RegisterBankInfo::InstructionMapping &
RegisterBankInfo::getInstrMappingImpl(const MachineInstr &MI) const {
// For copies we want to walk over the operands and try to find one
// that has a register bank since the instruction itself will not get
@ -147,9 +151,6 @@ RegisterBankInfo::getInstrMappingImpl(const MachineInstr &MI) const {
// is important. The rest is not constrained.
unsigned NumOperandsForMapping = IsCopyLike ? 1 : MI.getNumOperands();
RegisterBankInfo::InstructionMapping Mapping(DefaultMappingID, /*Cost*/ 1,
/*OperandsMapping*/ nullptr,
NumOperandsForMapping);
const MachineFunction &MF = *MI.getParent()->getParent();
const TargetSubtargetInfo &STI = MF.getSubtarget();
const TargetRegisterInfo &TRI = *STI.getRegisterInfo();
@ -190,7 +191,7 @@ RegisterBankInfo::getInstrMappingImpl(const MachineInstr &MI) const {
if (!IsCopyLike)
// MI does not carry enough information to guess the mapping.
return InstructionMapping();
return getInvalidInstructionMapping();
continue;
}
}
@ -206,11 +207,13 @@ RegisterBankInfo::getInstrMappingImpl(const MachineInstr &MI) const {
if (IsCopyLike && !CompleteMapping)
// No way to deduce the type from what we have.
return InstructionMapping();
return getInvalidInstructionMapping();
assert(CompleteMapping && "Setting an uncomplete mapping");
Mapping.setOperandsMapping(getOperandsMapping(OperandsMapping));
return Mapping;
return getInstructionMapping(
DefaultMappingID, /*Cost*/ 1,
/*OperandsMapping*/ getOperandsMapping(OperandsMapping),
NumOperandsForMapping);
}
/// Hashing function for PartialMapping.
@ -320,9 +323,44 @@ const RegisterBankInfo::ValueMapping *RegisterBankInfo::getOperandsMapping(
return getOperandsMapping(OpdsMapping.begin(), OpdsMapping.end());
}
RegisterBankInfo::InstructionMapping
static hash_code
hashInstructionMapping(unsigned ID, unsigned Cost,
const RegisterBankInfo::ValueMapping *OperandsMapping,
unsigned NumOperands) {
return hash_combine(ID, Cost, OperandsMapping, NumOperands);
}
const RegisterBankInfo::InstructionMapping &
RegisterBankInfo::getInstructionMappingImpl(
bool IsInvalid, unsigned ID, unsigned Cost,
const RegisterBankInfo::ValueMapping *OperandsMapping,
unsigned NumOperands) const {
assert(((IsInvalid && ID == InvalidMappingID && Cost == 0 &&
OperandsMapping == nullptr && NumOperands == 0) ||
!IsInvalid) &&
"Mismatch argument for invalid input");
++NumInstructionMappingsAccessed;
hash_code Hash =
hashInstructionMapping(ID, Cost, OperandsMapping, NumOperands);
const auto &It = MapOfInstructionMappings.find(Hash);
if (It != MapOfInstructionMappings.end())
return *It->second;
++NumInstructionMappingsCreated;
auto &InstrMapping = MapOfInstructionMappings[Hash];
if (IsInvalid)
InstrMapping = llvm::make_unique<InstructionMapping>();
else
InstrMapping = llvm::make_unique<InstructionMapping>(
ID, Cost, OperandsMapping, NumOperands);
return *InstrMapping;
}
const RegisterBankInfo::InstructionMapping &
RegisterBankInfo::getInstrMapping(const MachineInstr &MI) const {
RegisterBankInfo::InstructionMapping Mapping = getInstrMappingImpl(MI);
const RegisterBankInfo::InstructionMapping &Mapping = getInstrMappingImpl(MI);
if (Mapping.isValid())
return Mapping;
llvm_unreachable("The target must implement this");
@ -332,14 +370,14 @@ RegisterBankInfo::InstructionMappings
RegisterBankInfo::getInstrPossibleMappings(const MachineInstr &MI) const {
InstructionMappings PossibleMappings;
// Put the default mapping first.
PossibleMappings.push_back(getInstrMapping(MI));
PossibleMappings.push_back(&getInstrMapping(MI));
// Then the alternative mapping, if any.
InstructionMappings AltMappings = getInstrAlternativeMappings(MI);
for (InstructionMapping &AltMapping : AltMappings)
PossibleMappings.emplace_back(std::move(AltMapping));
for (const InstructionMapping *AltMapping : AltMappings)
PossibleMappings.push_back(AltMapping);
#ifndef NDEBUG
for (const InstructionMapping &Mapping : PossibleMappings)
assert(Mapping.verify(MI) && "Mapping is invalid");
for (const InstructionMapping *Mapping : PossibleMappings)
assert(Mapping->verify(MI) && "Mapping is invalid");
#endif
return PossibleMappings;
}

View File

@ -12,11 +12,13 @@
//===----------------------------------------------------------------------===//
#include "MIParser.h"
#include "MILexer.h"
#include "llvm/ADT/StringMap.h"
#include "llvm/ADT/StringSwitch.h"
#include "llvm/AsmParser/Parser.h"
#include "llvm/AsmParser/SlotMapping.h"
#include "llvm/CodeGen/MIRPrinter.h"
#include "llvm/CodeGen/MachineBasicBlock.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineFunction.h"
@ -134,7 +136,8 @@ public:
bool
parseBasicBlockDefinition(DenseMap<unsigned, MachineBasicBlock *> &MBBSlots);
bool parseBasicBlock(MachineBasicBlock &MBB);
bool parseBasicBlock(MachineBasicBlock &MBB,
MachineBasicBlock *&AddFalthroughFrom);
bool parseBasicBlockLiveins(MachineBasicBlock &MBB);
bool parseBasicBlockSuccessors(MachineBasicBlock &MBB);
@ -518,7 +521,8 @@ bool MIParser::parseBasicBlockSuccessors(MachineBasicBlock &MBB) {
return false;
}
bool MIParser::parseBasicBlock(MachineBasicBlock &MBB) {
bool MIParser::parseBasicBlock(MachineBasicBlock &MBB,
MachineBasicBlock *&AddFalthroughFrom) {
// Skip the definition.
assert(Token.is(MIToken::MachineBasicBlockLabel));
lex();
@ -538,10 +542,12 @@ bool MIParser::parseBasicBlock(MachineBasicBlock &MBB) {
//
// is equivalent to
// liveins: %edi, %esi
bool ExplicitSuccesors = false;
while (true) {
if (Token.is(MIToken::kw_successors)) {
if (parseBasicBlockSuccessors(MBB))
return true;
ExplicitSuccesors = true;
} else if (Token.is(MIToken::kw_liveins)) {
if (parseBasicBlockLiveins(MBB))
return true;
@ -557,10 +563,9 @@ bool MIParser::parseBasicBlock(MachineBasicBlock &MBB) {
// Parse the instructions.
bool IsInBundle = false;
MachineInstr *PrevMI = nullptr;
while (true) {
if (Token.is(MIToken::MachineBasicBlockLabel) || Token.is(MIToken::Eof))
return false;
else if (consumeIfPresent(MIToken::Newline))
while (!Token.is(MIToken::MachineBasicBlockLabel) &&
!Token.is(MIToken::Eof)) {
if (consumeIfPresent(MIToken::Newline))
continue;
if (consumeIfPresent(MIToken::rbrace)) {
// The first parsing pass should verify that all closing '}' have an
@ -592,6 +597,22 @@ bool MIParser::parseBasicBlock(MachineBasicBlock &MBB) {
assert(Token.isNewlineOrEOF() && "MI is not fully parsed");
lex();
}
// Construct successor list by searching for basic block machine operands.
if (!ExplicitSuccesors) {
SmallVector<MachineBasicBlock*,4> Successors;
bool IsFallthrough;
guessSuccessors(MBB, Successors, IsFallthrough);
for (MachineBasicBlock *Succ : Successors)
MBB.addSuccessor(Succ);
if (IsFallthrough) {
AddFalthroughFrom = &MBB;
} else {
MBB.normalizeSuccProbs();
}
}
return false;
}
@ -605,11 +626,18 @@ bool MIParser::parseBasicBlocks() {
// The first parsing pass should have verified that this token is a MBB label
// in the 'parseBasicBlockDefinitions' method.
assert(Token.is(MIToken::MachineBasicBlockLabel));
MachineBasicBlock *AddFalthroughFrom = nullptr;
do {
MachineBasicBlock *MBB = nullptr;
if (parseMBBReference(MBB))
return true;
if (parseBasicBlock(*MBB))
if (AddFalthroughFrom) {
if (!AddFalthroughFrom->isSuccessor(MBB))
AddFalthroughFrom->addSuccessor(MBB);
AddFalthroughFrom->normalizeSuccProbs();
AddFalthroughFrom = nullptr;
}
if (parseBasicBlock(*MBB, AddFalthroughFrom))
return true;
// The method 'parseBasicBlock' should parse the whole block until the next
// block or the end of file.

View File

@ -12,7 +12,8 @@
//
//===----------------------------------------------------------------------===//
#include "MIRPrinter.h"
#include "llvm/CodeGen/MIRPrinter.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallBitVector.h"
#include "llvm/CodeGen/GlobalISel/RegisterBank.h"
@ -34,6 +35,7 @@
#include "llvm/MC/MCSymbol.h"
#include "llvm/Support/Format.h"
#include "llvm/Support/MemoryBuffer.h"
#include "llvm/Support/Options.h"
#include "llvm/Support/YAMLTraits.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Target/TargetInstrInfo.h"
@ -42,6 +44,9 @@
using namespace llvm;
static cl::opt<bool> SimplifyMIR("simplify-mir",
cl::desc("Leave out unnecessary information when printing MIR"));
namespace {
/// This structure describes how to print out stack object references.
@ -105,6 +110,9 @@ class MIPrinter {
const DenseMap<const uint32_t *, unsigned> &RegisterMaskIds;
const DenseMap<int, FrameIndexOperand> &StackObjectOperandMapping;
bool canPredictBranchProbabilities(const MachineBasicBlock &MBB) const;
bool canPredictSuccessors(const MachineBasicBlock &MBB) const;
public:
MIPrinter(raw_ostream &OS, ModuleSlotTracker &MST,
const DenseMap<const uint32_t *, unsigned> &RegisterMaskIds,
@ -454,6 +462,63 @@ void MIRPrinter::initRegisterMaskIds(const MachineFunction &MF) {
RegisterMaskIds.insert(std::make_pair(Mask, I++));
}
void llvm::guessSuccessors(const MachineBasicBlock &MBB,
SmallVectorImpl<MachineBasicBlock*> &Result,
bool &IsFallthrough) {
SmallPtrSet<MachineBasicBlock*,8> Seen;
for (const MachineInstr &MI : MBB) {
if (MI.isPHI())
continue;
for (const MachineOperand &MO : MI.operands()) {
if (!MO.isMBB())
continue;
MachineBasicBlock *Succ = MO.getMBB();
auto RP = Seen.insert(Succ);
if (RP.second)
Result.push_back(Succ);
}
}
MachineBasicBlock::const_iterator I = MBB.getLastNonDebugInstr();
IsFallthrough = I == MBB.end() || !I->isBarrier();
}
bool
MIPrinter::canPredictBranchProbabilities(const MachineBasicBlock &MBB) const {
if (MBB.succ_size() <= 1)
return true;
if (!MBB.hasSuccessorProbabilities())
return true;
SmallVector<BranchProbability,8> Normalized(MBB.Probs.begin(),
MBB.Probs.end());
BranchProbability::normalizeProbabilities(Normalized.begin(),
Normalized.end());
SmallVector<BranchProbability,8> Equal(Normalized.size());
BranchProbability::normalizeProbabilities(Equal.begin(), Equal.end());
return std::equal(Normalized.begin(), Normalized.end(), Equal.begin());
}
bool MIPrinter::canPredictSuccessors(const MachineBasicBlock &MBB) const {
SmallVector<MachineBasicBlock*,8> GuessedSuccs;
bool GuessedFallthrough;
guessSuccessors(MBB, GuessedSuccs, GuessedFallthrough);
if (GuessedFallthrough) {
const MachineFunction &MF = *MBB.getParent();
MachineFunction::const_iterator NextI = std::next(MBB.getIterator());
if (NextI != MF.end()) {
MachineBasicBlock *Next = const_cast<MachineBasicBlock*>(&*NextI);
if (!is_contained(GuessedSuccs, Next))
GuessedSuccs.push_back(Next);
}
}
if (GuessedSuccs.size() != MBB.succ_size())
return false;
return std::equal(MBB.succ_begin(), MBB.succ_end(), GuessedSuccs.begin());
}
void MIPrinter::print(const MachineBasicBlock &MBB) {
assert(MBB.getNumber() >= 0 && "Invalid MBB number");
OS << "bb." << MBB.getNumber();
@ -492,13 +557,15 @@ void MIPrinter::print(const MachineBasicBlock &MBB) {
bool HasLineAttributes = false;
// Print the successors
if (!MBB.succ_empty()) {
bool canPredictProbs = canPredictBranchProbabilities(MBB);
if (!MBB.succ_empty() && (!SimplifyMIR || !canPredictProbs ||
!canPredictSuccessors(MBB))) {
OS.indent(2) << "successors: ";
for (auto I = MBB.succ_begin(), E = MBB.succ_end(); I != E; ++I) {
if (I != MBB.succ_begin())
OS << ", ";
printMBBReference(**I);
if (MBB.hasSuccessorProbabilities())
if (!SimplifyMIR || !canPredictProbs)
OS << '('
<< format("0x%08" PRIx32, MBB.getSuccProbability(I).getNumerator())
<< ')';

View File

@ -12,7 +12,8 @@
//
//===----------------------------------------------------------------------===//
#include "MIRPrinter.h"
#include "llvm/CodeGen/MIRPrinter.h"
#include "llvm/CodeGen/Passes.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
#include "llvm/CodeGen/MIRYamlMapping.h"

View File

@ -19,6 +19,7 @@
#include "llvm/Support/Debug.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Target/TargetFrameLowering.h"
#include "llvm/Target/TargetInstrInfo.h"
#include "llvm/Target/TargetRegisterInfo.h"
#include "llvm/Target/TargetSubtargetInfo.h"
#include <cassert>
@ -175,6 +176,31 @@ unsigned MachineFrameInfo::estimateStackSize(const MachineFunction &MF) const {
return (unsigned)Offset;
}
void MachineFrameInfo::computeMaxCallFrameSize(const MachineFunction &MF) {
const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo();
unsigned FrameSetupOpcode = TII.getCallFrameSetupOpcode();
unsigned FrameDestroyOpcode = TII.getCallFrameDestroyOpcode();
assert(FrameSetupOpcode != ~0u && FrameDestroyOpcode != ~0u &&
"Can only compute MaxCallFrameSize if Setup/Destroy opcode are known");
MaxCallFrameSize = 0;
for (const MachineBasicBlock &MBB : MF) {
for (const MachineInstr &MI : MBB) {
unsigned Opcode = MI.getOpcode();
if (Opcode == FrameSetupOpcode || Opcode == FrameDestroyOpcode) {
unsigned Size = TII.getFrameSize(MI);
MaxCallFrameSize = std::max(MaxCallFrameSize, Size);
AdjustsStack = true;
} else if (MI.isInlineAsm()) {
// Some inline asm's need a stack frame, as indicated by operand 1.
unsigned ExtraInfo = MI.getOperand(InlineAsm::MIOp_ExtraInfo).getImm();
if (ExtraInfo & InlineAsm::Extra_IsAlignStack)
AdjustsStack = true;
}
}
}
}
void MachineFrameInfo::print(const MachineFunction &MF, raw_ostream &OS) const{
if (Objects.empty()) return;

View File

@ -188,8 +188,9 @@ namespace {
return Reg < regsReserved.size() && regsReserved.test(Reg);
}
bool isAllocatable(unsigned Reg) {
return Reg < TRI->getNumRegs() && MRI->isAllocatable(Reg);
bool isAllocatable(unsigned Reg) const {
return Reg < TRI->getNumRegs() && TRI->isInAllocatableClass(Reg) &&
!regsReserved.test(Reg);
}
// Analysis information if available
@ -526,7 +527,8 @@ void MachineVerifier::markReachable(const MachineBasicBlock *MBB) {
void MachineVerifier::visitMachineFunctionBefore() {
lastIndex = SlotIndex();
regsReserved = MRI->getReservedRegs();
regsReserved = MRI->reservedRegsFrozen() ? MRI->getReservedRegs()
: TRI->getReservedRegs(*MF);
if (!MF->empty())
markReachable(&MF->front());

View File

@ -277,6 +277,9 @@ void PEI::calculateCallFrameInfo(MachineFunction &Fn) {
AdjustsStack = true;
}
assert(!MFI.isMaxCallFrameSizeComputed() ||
(MFI.getMaxCallFrameSize() == MaxCallFrameSize &&
MFI.adjustsStack() == AdjustsStack));
MFI.setAdjustsStack(AdjustsStack);
MFI.setMaxCallFrameSize(MaxCallFrameSize);

View File

@ -6688,6 +6688,9 @@ SDValue DAGCombiner::visitVSELECT(SDNode *N) {
if (isAbs) {
EVT VT = LHS.getValueType();
if (TLI.isOperationLegalOrCustom(ISD::ABS, VT))
return DAG.getNode(ISD::ABS, DL, VT, LHS);
SDValue Shift = DAG.getNode(
ISD::SRA, DL, VT, LHS,
DAG.getConstant(VT.getScalarSizeInBits() - 1, DL, VT));
@ -9469,6 +9472,14 @@ SDValue DAGCombiner::visitFMULForFMADistributiveCombine(SDNode *N) {
return SDValue();
}
static bool isFMulNegTwo(SDValue &N) {
if (N.getOpcode() != ISD::FMUL)
return false;
if (ConstantFPSDNode *CFP = isConstOrConstSplatFP(N.getOperand(1)))
return CFP->isExactlyValue(-2.0);
return false;
}
SDValue DAGCombiner::visitFADD(SDNode *N) {
SDValue N0 = N->getOperand(0);
SDValue N1 = N->getOperand(1);
@ -9507,6 +9518,16 @@ SDValue DAGCombiner::visitFADD(SDNode *N) {
return DAG.getNode(ISD::FSUB, DL, VT, N1,
GetNegatedExpression(N0, DAG, LegalOperations), Flags);
// fold (fadd A, (fmul B, -2.0)) -> (fsub A, (fadd B, B))
// fold (fadd (fmul B, -2.0), A) -> (fsub A, (fadd B, B))
if ((isFMulNegTwo(N0) && N0.hasOneUse()) ||
(isFMulNegTwo(N1) && N1.hasOneUse())) {
bool N1IsFMul = isFMulNegTwo(N1);
SDValue AddOp = N1IsFMul ? N1.getOperand(0) : N0.getOperand(0);
SDValue Add = DAG.getNode(ISD::FADD, DL, VT, AddOp, AddOp, Flags);
return DAG.getNode(ISD::FSUB, DL, VT, N1IsFMul ? N0 : N1, Add, Flags);
}
// FIXME: Auto-upgrade the target/function-level option.
if (Options.NoSignedZerosFPMath || N->getFlags().hasNoSignedZeros()) {
// fold (fadd A, 0) -> A

View File

@ -861,6 +861,25 @@ bool FastISel::selectPatchpoint(const CallInst *I) {
return true;
}
bool FastISel::selectXRayCustomEvent(const CallInst *I) {
const auto &Triple = TM.getTargetTriple();
if (Triple.getArch() != Triple::x86_64 || !Triple.isOSLinux())
return true; // don't do anything to this instruction.
SmallVector<MachineOperand, 8> Ops;
Ops.push_back(MachineOperand::CreateReg(getRegForValue(I->getArgOperand(0)),
/*IsDef=*/false));
Ops.push_back(MachineOperand::CreateReg(getRegForValue(I->getArgOperand(1)),
/*IsDef=*/false));
MachineInstrBuilder MIB =
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
TII.get(TargetOpcode::PATCHABLE_EVENT_CALL));
for (auto &MO : Ops)
MIB.add(MO);
// Insert the Patchable Event Call instruction, that gets lowered properly.
return true;
}
/// Returns an AttributeList representing the attributes applied to the return
/// value of the given call.
static AttributeList getReturnAttrs(FastISel::CallLoweringInfo &CLI) {
@ -1252,6 +1271,9 @@ bool FastISel::selectIntrinsicCall(const IntrinsicInst *II) {
case Intrinsic::experimental_patchpoint_void:
case Intrinsic::experimental_patchpoint_i64:
return selectPatchpoint(II);
case Intrinsic::xray_customevent:
return selectXRayCustomEvent(II);
}
return fastLowerIntrinsicCall(II);

View File

@ -402,8 +402,7 @@ FunctionLoweringInfo::GetLiveOutRegInfo(unsigned Reg, unsigned BitWidth) {
if (BitWidth > LOI->Known.getBitWidth()) {
LOI->NumSignBits = 1;
LOI->Known.Zero = LOI->Known.Zero.zextOrTrunc(BitWidth);
LOI->Known.One = LOI->Known.One.zextOrTrunc(BitWidth);
LOI->Known = LOI->Known.zextOrTrunc(BitWidth);
}
return LOI;

View File

@ -67,12 +67,11 @@ ResourcePriorityQueue::ResourcePriorityQueue(SelectionDAGISel *IS)
unsigned
ResourcePriorityQueue::numberRCValPredInSU(SUnit *SU, unsigned RCId) {
unsigned NumberDeps = 0;
for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
I != E; ++I) {
if (I->isCtrl())
for (SDep &Pred : SU->Preds) {
if (Pred.isCtrl())
continue;
SUnit *PredSU = I->getSUnit();
SUnit *PredSU = Pred.getSUnit();
const SDNode *ScegN = PredSU->getNode();
if (!ScegN)
@ -105,12 +104,11 @@ ResourcePriorityQueue::numberRCValPredInSU(SUnit *SU, unsigned RCId) {
unsigned ResourcePriorityQueue::numberRCValSuccInSU(SUnit *SU,
unsigned RCId) {
unsigned NumberDeps = 0;
for (SUnit::const_succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
I != E; ++I) {
if (I->isCtrl())
for (const SDep &Succ : SU->Succs) {
if (Succ.isCtrl())
continue;
SUnit *SuccSU = I->getSUnit();
SUnit *SuccSU = Succ.getSUnit();
const SDNode *ScegN = SuccSU->getNode();
if (!ScegN)
continue;
@ -142,9 +140,8 @@ unsigned ResourcePriorityQueue::numberRCValSuccInSU(SUnit *SU,
static unsigned numberCtrlDepsInSU(SUnit *SU) {
unsigned NumberDeps = 0;
for (SUnit::const_succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
I != E; ++I)
if (I->isCtrl())
for (const SDep &Succ : SU->Succs)
if (Succ.isCtrl())
NumberDeps++;
return NumberDeps;
@ -152,9 +149,8 @@ static unsigned numberCtrlDepsInSU(SUnit *SU) {
static unsigned numberCtrlPredInSU(SUnit *SU) {
unsigned NumberDeps = 0;
for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
I != E; ++I)
if (I->isCtrl())
for (SDep &Pred : SU->Preds)
if (Pred.isCtrl())
NumberDeps++;
return NumberDeps;
@ -212,15 +208,14 @@ bool resource_sort::operator()(const SUnit *LHS, const SUnit *RHS) const {
/// of SU, return it, otherwise return null.
SUnit *ResourcePriorityQueue::getSingleUnscheduledPred(SUnit *SU) {
SUnit *OnlyAvailablePred = nullptr;
for (SUnit::const_pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
I != E; ++I) {
SUnit &Pred = *I->getSUnit();
if (!Pred.isScheduled) {
for (const SDep &Pred : SU->Preds) {
SUnit &PredSU = *Pred.getSUnit();
if (!PredSU.isScheduled) {
// We found an available, but not scheduled, predecessor. If it's the
// only one we have found, keep track of it... otherwise give up.
if (OnlyAvailablePred && OnlyAvailablePred != &Pred)
if (OnlyAvailablePred && OnlyAvailablePred != &PredSU)
return nullptr;
OnlyAvailablePred = &Pred;
OnlyAvailablePred = &PredSU;
}
}
return OnlyAvailablePred;
@ -230,9 +225,8 @@ void ResourcePriorityQueue::push(SUnit *SU) {
// Look at all of the successors of this node. Count the number of nodes that
// this node is the sole unscheduled node for.
unsigned NumNodesBlocking = 0;
for (SUnit::const_succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
I != E; ++I)
if (getSingleUnscheduledPred(I->getSUnit()) == SU)
for (const SDep &Succ : SU->Succs)
if (getSingleUnscheduledPred(Succ.getSUnit()) == SU)
++NumNodesBlocking;
NumNodesSolelyBlocking[SU->NodeNum] = NumNodesBlocking;
@ -269,14 +263,13 @@ bool ResourcePriorityQueue::isResourceAvailable(SUnit *SU) {
// Now see if there are no other dependencies
// to instructions already in the packet.
for (unsigned i = 0, e = Packet.size(); i != e; ++i)
for (SUnit::const_succ_iterator I = Packet[i]->Succs.begin(),
E = Packet[i]->Succs.end(); I != E; ++I) {
for (const SDep &Succ : Packet[i]->Succs) {
// Since we do not add pseudos to packets, might as well
// ignore order deps.
if (I->isCtrl())
if (Succ.isCtrl())
continue;
if (I->getSUnit() == SU)
if (Succ.getSUnit() == SU)
return false;
}
@ -499,11 +492,10 @@ void ResourcePriorityQueue::scheduledNode(SUnit *SU) {
}
}
}
for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
I != E; ++I) {
if (I->isCtrl() || (I->getSUnit()->NumRegDefsLeft == 0))
for (SDep &Pred : SU->Preds) {
if (Pred.isCtrl() || (Pred.getSUnit()->NumRegDefsLeft == 0))
continue;
--I->getSUnit()->NumRegDefsLeft;
--Pred.getSUnit()->NumRegDefsLeft;
}
}
@ -515,10 +507,9 @@ void ResourcePriorityQueue::scheduledNode(SUnit *SU) {
// number of live ranges. All others, increase it.
unsigned NumberNonControlDeps = 0;
for (SUnit::const_succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
I != E; ++I) {
adjustPriorityOfUnscheduledPreds(I->getSUnit());
if (!I->isCtrl())
for (const SDep &Succ : SU->Succs) {
adjustPriorityOfUnscheduledPreds(Succ.getSUnit());
if (!Succ.isCtrl())
NumberNonControlDeps++;
}
@ -595,8 +586,7 @@ SUnit *ResourcePriorityQueue::pop() {
std::vector<SUnit *>::iterator Best = Queue.begin();
if (!DisableDFASched) {
int BestCost = SUSchedulingCost(*Best);
for (std::vector<SUnit *>::iterator I = std::next(Queue.begin()),
E = Queue.end(); I != E; ++I) {
for (auto I = std::next(Queue.begin()), E = Queue.end(); I != E; ++I) {
if (SUSchedulingCost(*I) > BestCost) {
BestCost = SUSchedulingCost(*I);
@ -606,8 +596,7 @@ SUnit *ResourcePriorityQueue::pop() {
}
// Use default TD scheduling mechanism.
else {
for (std::vector<SUnit *>::iterator I = std::next(Queue.begin()),
E = Queue.end(); I != E; ++I)
for (auto I = std::next(Queue.begin()), E = Queue.end(); I != E; ++I)
if (Picker(*Best, *I))
Best = I;
}

View File

@ -160,18 +160,17 @@ void ScheduleDAGFast::ReleasePred(SUnit *SU, SDep *PredEdge) {
void ScheduleDAGFast::ReleasePredecessors(SUnit *SU, unsigned CurCycle) {
// Bottom up: release predecessors
for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
I != E; ++I) {
ReleasePred(SU, &*I);
if (I->isAssignedRegDep()) {
for (SDep &Pred : SU->Preds) {
ReleasePred(SU, &Pred);
if (Pred.isAssignedRegDep()) {
// This is a physical register dependency and it's impossible or
// expensive to copy the register. Make sure nothing that can
// clobber the register is scheduled between the predecessor and
// this node.
if (!LiveRegDefs[I->getReg()]) {
if (!LiveRegDefs[Pred.getReg()]) {
++NumLiveRegs;
LiveRegDefs[I->getReg()] = I->getSUnit();
LiveRegCycles[I->getReg()] = CurCycle;
LiveRegDefs[Pred.getReg()] = Pred.getSUnit();
LiveRegCycles[Pred.getReg()] = CurCycle;
}
}
}
@ -191,16 +190,15 @@ void ScheduleDAGFast::ScheduleNodeBottomUp(SUnit *SU, unsigned CurCycle) {
ReleasePredecessors(SU, CurCycle);
// Release all the implicit physical register defs that are live.
for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
I != E; ++I) {
if (I->isAssignedRegDep()) {
if (LiveRegCycles[I->getReg()] == I->getSUnit()->getHeight()) {
for (SDep &Succ : SU->Succs) {
if (Succ.isAssignedRegDep()) {
if (LiveRegCycles[Succ.getReg()] == Succ.getSUnit()->getHeight()) {
assert(NumLiveRegs > 0 && "NumLiveRegs is already zero!");
assert(LiveRegDefs[I->getReg()] == SU &&
assert(LiveRegDefs[Succ.getReg()] == SU &&
"Physical register dependency violated?");
--NumLiveRegs;
LiveRegDefs[I->getReg()] = nullptr;
LiveRegCycles[I->getReg()] = 0;
LiveRegDefs[Succ.getReg()] = nullptr;
LiveRegCycles[Succ.getReg()] = 0;
}
}
}
@ -282,22 +280,20 @@ SUnit *ScheduleDAGFast::CopyAndMoveSuccessors(SUnit *SU) {
SmallVector<SDep, 4> LoadPreds;
SmallVector<SDep, 4> NodePreds;
SmallVector<SDep, 4> NodeSuccs;
for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
I != E; ++I) {
if (I->isCtrl())
ChainPred = *I;
else if (I->getSUnit()->getNode() &&
I->getSUnit()->getNode()->isOperandOf(LoadNode))
LoadPreds.push_back(*I);
for (SDep &Pred : SU->Preds) {
if (Pred.isCtrl())
ChainPred = Pred;
else if (Pred.getSUnit()->getNode() &&
Pred.getSUnit()->getNode()->isOperandOf(LoadNode))
LoadPreds.push_back(Pred);
else
NodePreds.push_back(*I);
NodePreds.push_back(Pred);
}
for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
I != E; ++I) {
if (I->isCtrl())
ChainSuccs.push_back(*I);
for (SDep &Succ : SU->Succs) {
if (Succ.isCtrl())
ChainSuccs.push_back(Succ);
else
NodeSuccs.push_back(*I);
NodeSuccs.push_back(Succ);
}
if (ChainPred.getSUnit()) {
@ -354,21 +350,19 @@ SUnit *ScheduleDAGFast::CopyAndMoveSuccessors(SUnit *SU) {
NewSU = Clone(SU);
// New SUnit has the exact same predecessors.
for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
I != E; ++I)
if (!I->isArtificial())
AddPred(NewSU, *I);
for (SDep &Pred : SU->Preds)
if (!Pred.isArtificial())
AddPred(NewSU, Pred);
// Only copy scheduled successors. Cut them from old node's successor
// list and move them over.
SmallVector<std::pair<SUnit *, SDep>, 4> DelDeps;
for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
I != E; ++I) {
if (I->isArtificial())
for (SDep &Succ : SU->Succs) {
if (Succ.isArtificial())
continue;
SUnit *SuccSU = I->getSUnit();
SUnit *SuccSU = Succ.getSUnit();
if (SuccSU->isScheduled) {
SDep D = *I;
SDep D = Succ;
D.setSUnit(NewSU);
AddPred(SuccSU, D);
D.setSUnit(SU);
@ -399,16 +393,15 @@ void ScheduleDAGFast::InsertCopiesAndMoveSuccs(SUnit *SU, unsigned Reg,
// Only copy scheduled successors. Cut them from old node's successor
// list and move them over.
SmallVector<std::pair<SUnit *, SDep>, 4> DelDeps;
for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
I != E; ++I) {
if (I->isArtificial())
for (SDep &Succ : SU->Succs) {
if (Succ.isArtificial())
continue;
SUnit *SuccSU = I->getSUnit();
SUnit *SuccSU = Succ.getSUnit();
if (SuccSU->isScheduled) {
SDep D = *I;
SDep D = Succ;
D.setSUnit(CopyToSU);
AddPred(SuccSU, D);
DelDeps.push_back(std::make_pair(SuccSU, *I));
DelDeps.push_back(std::make_pair(SuccSU, Succ));
}
}
for (unsigned i = 0, e = DelDeps.size(); i != e; ++i) {
@ -479,10 +472,9 @@ bool ScheduleDAGFast::DelayForLiveRegsBottomUp(SUnit *SU,
SmallSet<unsigned, 4> RegAdded;
// If this node would clobber any "live" register, then it's not ready.
for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
I != E; ++I) {
if (I->isAssignedRegDep()) {
CheckForLiveRegDef(I->getSUnit(), I->getReg(), LiveRegDefs,
for (SDep &Pred : SU->Preds) {
if (Pred.isAssignedRegDep()) {
CheckForLiveRegDef(Pred.getSUnit(), Pred.getReg(), LiveRegDefs,
RegAdded, LRegs, TRI);
}
}
@ -755,9 +747,8 @@ void ScheduleDAGLinearize::Schedule() {
// Glue user must be scheduled together with the glue operand. So other
// users of the glue operand must be treated as its users.
SDNode *ImmGUser = Glue->getGluedUser();
for (SDNode::use_iterator ui = Glue->use_begin(), ue = Glue->use_end();
ui != ue; ++ui)
if (*ui == ImmGUser)
for (const SDNode *U : Glue->uses())
if (U == ImmGUser)
--Degree;
GUser->setNodeId(UDegree + Degree);
Glue->setNodeId(1);

View File

@ -520,21 +520,20 @@ FindCallSeqStart(SDNode *N, unsigned &NestLevel, unsigned &MaxNest,
/// interference on flags.
void ScheduleDAGRRList::ReleasePredecessors(SUnit *SU) {
// Bottom up: release predecessors
for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
I != E; ++I) {
ReleasePred(SU, &*I);
if (I->isAssignedRegDep()) {
for (SDep &Pred : SU->Preds) {
ReleasePred(SU, &Pred);
if (Pred.isAssignedRegDep()) {
// This is a physical register dependency and it's impossible or
// expensive to copy the register. Make sure nothing that can
// clobber the register is scheduled between the predecessor and
// this node.
SUnit *RegDef = LiveRegDefs[I->getReg()]; (void)RegDef;
assert((!RegDef || RegDef == SU || RegDef == I->getSUnit()) &&
SUnit *RegDef = LiveRegDefs[Pred.getReg()]; (void)RegDef;
assert((!RegDef || RegDef == SU || RegDef == Pred.getSUnit()) &&
"interference on register dependence");
LiveRegDefs[I->getReg()] = I->getSUnit();
if (!LiveRegGens[I->getReg()]) {
LiveRegDefs[Pred.getReg()] = Pred.getSUnit();
if (!LiveRegGens[Pred.getReg()]) {
++NumLiveRegs;
LiveRegGens[I->getReg()] = SU;
LiveRegGens[Pred.getReg()] = SU;
}
}
}
@ -733,15 +732,14 @@ void ScheduleDAGRRList::ScheduleNodeBottomUp(SUnit *SU) {
ReleasePredecessors(SU);
// Release all the implicit physical register defs that are live.
for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
I != E; ++I) {
// LiveRegDegs[I->getReg()] != SU when SU is a two-address node.
if (I->isAssignedRegDep() && LiveRegDefs[I->getReg()] == SU) {
for (SDep &Succ : SU->Succs) {
// LiveRegDegs[Succ.getReg()] != SU when SU is a two-address node.
if (Succ.isAssignedRegDep() && LiveRegDefs[Succ.getReg()] == SU) {
assert(NumLiveRegs > 0 && "NumLiveRegs is already zero!");
--NumLiveRegs;
LiveRegDefs[I->getReg()] = nullptr;
LiveRegGens[I->getReg()] = nullptr;
releaseInterferences(I->getReg());
LiveRegDefs[Succ.getReg()] = nullptr;
LiveRegGens[Succ.getReg()] = nullptr;
releaseInterferences(Succ.getReg());
}
}
// Release the special call resource dependence, if this is the beginning
@ -802,17 +800,16 @@ void ScheduleDAGRRList::UnscheduleNodeBottomUp(SUnit *SU) {
DEBUG(dbgs() << "*** Unscheduling [" << SU->getHeight() << "]: ");
DEBUG(SU->dump(this));
for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
I != E; ++I) {
CapturePred(&*I);
if (I->isAssignedRegDep() && SU == LiveRegGens[I->getReg()]){
for (SDep &Pred : SU->Preds) {
CapturePred(&Pred);
if (Pred.isAssignedRegDep() && SU == LiveRegGens[Pred.getReg()]){
assert(NumLiveRegs > 0 && "NumLiveRegs is already zero!");
assert(LiveRegDefs[I->getReg()] == I->getSUnit() &&
assert(LiveRegDefs[Pred.getReg()] == Pred.getSUnit() &&
"Physical register dependency violated?");
--NumLiveRegs;
LiveRegDefs[I->getReg()] = nullptr;
LiveRegGens[I->getReg()] = nullptr;
releaseInterferences(I->getReg());
LiveRegDefs[Pred.getReg()] = nullptr;
LiveRegGens[Pred.getReg()] = nullptr;
releaseInterferences(Pred.getReg());
}
}
@ -895,7 +892,7 @@ void ScheduleDAGRRList::RestoreHazardCheckerBottomUp() {
std::vector<SUnit*>::const_iterator I = (Sequence.end() - LookAhead);
unsigned HazardCycle = (*I)->getHeight();
for (std::vector<SUnit*>::const_iterator E = Sequence.end(); I != E; ++I) {
for (auto E = Sequence.end(); I != E; ++I) {
SUnit *SU = *I;
for (; SU->getHeight() > HazardCycle; ++HazardCycle) {
HazardRec->RecedeCycle();
@ -1261,10 +1258,9 @@ DelayForLiveRegsBottomUp(SUnit *SU, SmallVectorImpl<unsigned> &LRegs) {
//
// If SU is the currently live definition of the same register that it uses,
// then we are free to schedule it.
for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
I != E; ++I) {
if (I->isAssignedRegDep() && LiveRegDefs[I->getReg()] != SU)
CheckForLiveRegDef(I->getSUnit(), I->getReg(), LiveRegDefs.get(),
for (SDep &Pred : SU->Preds) {
if (Pred.isAssignedRegDep() && LiveRegDefs[Pred.getReg()] != SU)
CheckForLiveRegDef(Pred.getSUnit(), Pred.getReg(), LiveRegDefs.get(),
RegAdded, LRegs, TRI);
}
@ -1743,8 +1739,7 @@ protected:
template<class SF>
static SUnit *popFromQueueImpl(std::vector<SUnit*> &Q, SF &Picker) {
std::vector<SUnit *>::iterator Best = Q.begin();
for (std::vector<SUnit *>::iterator I = std::next(Q.begin()),
E = Q.end(); I != E; ++I)
for (auto I = std::next(Q.begin()), E = Q.end(); I != E; ++I)
if (Picker(*Best, *I))
Best = I;
SUnit *V = *Best;

View File

@ -2017,8 +2017,7 @@ void SelectionDAG::computeKnownBits(SDValue Op, KnownBits &Known,
if (SrcOp.getValueSizeInBits() != BitWidth) {
assert(SrcOp.getValueSizeInBits() > BitWidth &&
"Expected BUILD_VECTOR implicit truncation");
Known2.One = Known2.One.trunc(BitWidth);
Known2.Zero = Known2.Zero.trunc(BitWidth);
Known2 = Known2.trunc(BitWidth);
}
// Known bits are the values that are shared by every demanded element.
@ -2045,8 +2044,7 @@ void SelectionDAG::computeKnownBits(SDValue Op, KnownBits &Known,
if (M < 0) {
// For UNDEF elements, we don't know anything about the common state of
// the shuffle result.
Known.One.clearAllBits();
Known.Zero.clearAllBits();
Known.resetAll();
DemandedLHS.clearAllBits();
DemandedRHS.clearAllBits();
break;
@ -2219,14 +2217,13 @@ void SelectionDAG::computeKnownBits(SDValue Op, KnownBits &Known,
// Also compute a conservative estimate for high known-0 bits.
// More trickiness is possible, but this is sufficient for the
// interesting case of alignment computation.
Known.One.clearAllBits();
unsigned TrailZ = Known.Zero.countTrailingOnes() +
Known2.Zero.countTrailingOnes();
unsigned LeadZ = std::max(Known.Zero.countLeadingOnes() +
Known2.Zero.countLeadingOnes(),
BitWidth) - BitWidth;
Known.Zero.clearAllBits();
Known.resetAll();
Known.Zero.setLowBits(std::min(TrailZ, BitWidth));
Known.Zero.setHighBits(std::min(LeadZ, BitWidth));
break;
@ -2377,7 +2374,10 @@ void SelectionDAG::computeKnownBits(SDValue Op, KnownBits &Known,
break;
}
case ISD::CTPOP: {
Known.Zero.setBitsFrom(Log2_32(BitWidth)+1);
computeKnownBits(Op.getOperand(0), Known2, DemandedElts, Depth + 1);
// If we know some of the bits are zero, they can't be one.
unsigned PossibleOnes = BitWidth - Known2.Zero.countPopulation();
Known.Zero.setBitsFrom(Log2_32(PossibleOnes) + 1);
break;
}
case ISD::LOAD: {
@ -2396,24 +2396,20 @@ void SelectionDAG::computeKnownBits(SDValue Op, KnownBits &Known,
case ISD::ZERO_EXTEND_VECTOR_INREG: {
EVT InVT = Op.getOperand(0).getValueType();
unsigned InBits = InVT.getScalarSizeInBits();
Known.Zero = Known.Zero.trunc(InBits);
Known.One = Known.One.trunc(InBits);
Known = Known.trunc(InBits);
computeKnownBits(Op.getOperand(0), Known,
DemandedElts.zext(InVT.getVectorNumElements()),
Depth + 1);
Known.Zero = Known.Zero.zext(BitWidth);
Known.One = Known.One.zext(BitWidth);
Known = Known.zext(BitWidth);
Known.Zero.setBitsFrom(InBits);
break;
}
case ISD::ZERO_EXTEND: {
EVT InVT = Op.getOperand(0).getValueType();
unsigned InBits = InVT.getScalarSizeInBits();
Known.Zero = Known.Zero.trunc(InBits);
Known.One = Known.One.trunc(InBits);
Known = Known.trunc(InBits);
computeKnownBits(Op.getOperand(0), Known, DemandedElts, Depth + 1);
Known.Zero = Known.Zero.zext(BitWidth);
Known.One = Known.One.zext(BitWidth);
Known = Known.zext(BitWidth);
Known.Zero.setBitsFrom(InBits);
break;
}
@ -2422,34 +2418,28 @@ void SelectionDAG::computeKnownBits(SDValue Op, KnownBits &Known,
EVT InVT = Op.getOperand(0).getValueType();
unsigned InBits = InVT.getScalarSizeInBits();
Known.Zero = Known.Zero.trunc(InBits);
Known.One = Known.One.trunc(InBits);
Known = Known.trunc(InBits);
computeKnownBits(Op.getOperand(0), Known, DemandedElts, Depth + 1);
// If the sign bit is known to be zero or one, then sext will extend
// it to the top bits, else it will just zext.
Known.Zero = Known.Zero.sext(BitWidth);
Known.One = Known.One.sext(BitWidth);
Known = Known.sext(BitWidth);
break;
}
case ISD::ANY_EXTEND: {
EVT InVT = Op.getOperand(0).getValueType();
unsigned InBits = InVT.getScalarSizeInBits();
Known.Zero = Known.Zero.trunc(InBits);
Known.One = Known.One.trunc(InBits);
Known = Known.trunc(InBits);
computeKnownBits(Op.getOperand(0), Known, Depth+1);
Known.Zero = Known.Zero.zext(BitWidth);
Known.One = Known.One.zext(BitWidth);
Known = Known.zext(BitWidth);
break;
}
case ISD::TRUNCATE: {
EVT InVT = Op.getOperand(0).getValueType();
unsigned InBits = InVT.getScalarSizeInBits();
Known.Zero = Known.Zero.zext(InBits);
Known.One = Known.One.zext(InBits);
Known = Known.zext(InBits);
computeKnownBits(Op.getOperand(0), Known, DemandedElts, Depth + 1);
Known.Zero = Known.Zero.trunc(BitWidth);
Known.One = Known.One.trunc(BitWidth);
Known = Known.trunc(BitWidth);
break;
}
case ISD::AssertZext: {
@ -2606,8 +2596,7 @@ void SelectionDAG::computeKnownBits(SDValue Op, KnownBits &Known,
uint32_t Leaders = std::max(Known.Zero.countLeadingOnes(),
Known2.Zero.countLeadingOnes());
Known.One.clearAllBits();
Known.Zero.clearAllBits();
Known.resetAll();
Known.Zero.setHighBits(Leaders);
break;
}
@ -2621,8 +2610,7 @@ void SelectionDAG::computeKnownBits(SDValue Op, KnownBits &Known,
Known.One = Known.One.getHiBits(Known.One.getBitWidth() - Index * BitWidth);
// Remove high part of known bit mask
Known.Zero = Known.Zero.trunc(BitWidth);
Known.One = Known.One.trunc(BitWidth);
Known = Known.trunc(BitWidth);
break;
}
case ISD::EXTRACT_VECTOR_ELT: {
@ -2634,10 +2622,8 @@ void SelectionDAG::computeKnownBits(SDValue Op, KnownBits &Known,
const unsigned NumSrcElts = VecVT.getVectorNumElements();
// If BitWidth > EltBitWidth the value is anyext:ed. So we do not know
// anything about the extended bits.
if (BitWidth > EltBitWidth) {
Known.Zero = Known.Zero.trunc(EltBitWidth);
Known.One = Known.One.trunc(EltBitWidth);
}
if (BitWidth > EltBitWidth)
Known = Known.trunc(EltBitWidth);
ConstantSDNode *ConstEltNo = dyn_cast<ConstantSDNode>(EltNo);
if (ConstEltNo && ConstEltNo->getAPIntValue().ult(NumSrcElts)) {
// If we know the element index, just demand that vector element.
@ -2648,10 +2634,8 @@ void SelectionDAG::computeKnownBits(SDValue Op, KnownBits &Known,
// Unknown element index, so ignore DemandedElts and demand them all.
computeKnownBits(InVec, Known, Depth + 1);
}
if (BitWidth > EltBitWidth) {
Known.Zero = Known.Zero.zext(BitWidth);
Known.One = Known.One.zext(BitWidth);
}
if (BitWidth > EltBitWidth)
Known = Known.zext(BitWidth);
break;
}
case ISD::INSERT_VECTOR_ELT: {

View File

@ -4992,45 +4992,33 @@ SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I, unsigned Intrinsic) {
SDV = DAG.getConstantDbgValue(Variable, Expression, V, Offset, dl,
SDNodeOrder);
DAG.AddDbgValue(SDV, nullptr, false);
} else {
// Do not use getValue() in here; we don't want to generate code at
// this point if it hasn't been done yet.
SDValue N = NodeMap[V];
if (!N.getNode() && isa<Argument>(V))
// Check unused arguments map.
N = UnusedArgNodeMap[V];
if (N.getNode()) {
if (!EmitFuncArgumentDbgValue(V, Variable, Expression, dl, Offset,
false, N)) {
SDV = getDbgValue(N, Variable, Expression, Offset, dl, SDNodeOrder);
DAG.AddDbgValue(SDV, N.getNode(), false);
}
} else if (!V->use_empty() ) {
// Do not call getValue(V) yet, as we don't want to generate code.
// Remember it for later.
DanglingDebugInfo DDI(&DI, dl, SDNodeOrder);
DanglingDebugInfoMap[V] = DDI;
} else {
// We may expand this to cover more cases. One case where we have no
// data available is an unreferenced parameter.
DEBUG(dbgs() << "Dropping debug info for " << DI << "\n");
}
}
// Build a debug info table entry.
if (const BitCastInst *BCI = dyn_cast<BitCastInst>(V))
V = BCI->getOperand(0);
const AllocaInst *AI = dyn_cast<AllocaInst>(V);
// Don't handle byval struct arguments or VLAs, for example.
if (!AI) {
DEBUG(dbgs() << "Dropping debug location info for:\n " << DI << "\n");
DEBUG(dbgs() << " Last seen at:\n " << *V << "\n");
return nullptr;
}
DenseMap<const AllocaInst*, int>::iterator SI =
FuncInfo.StaticAllocaMap.find(AI);
if (SI == FuncInfo.StaticAllocaMap.end())
return nullptr; // VLAs.
// Do not use getValue() in here; we don't want to generate code at
// this point if it hasn't been done yet.
SDValue N = NodeMap[V];
if (!N.getNode() && isa<Argument>(V)) // Check unused arguments map.
N = UnusedArgNodeMap[V];
if (N.getNode()) {
if (EmitFuncArgumentDbgValue(V, Variable, Expression, dl, Offset, false,
N))
return nullptr;
SDV = getDbgValue(N, Variable, Expression, Offset, dl, SDNodeOrder);
DAG.AddDbgValue(SDV, N.getNode(), false);
return nullptr;
}
if (!V->use_empty() ) {
// Do not call getValue(V) yet, as we don't want to generate code.
// Remember it for later.
DanglingDebugInfo DDI(&DI, dl, SDNodeOrder);
DanglingDebugInfoMap[V] = DDI;
return nullptr;
}
DEBUG(dbgs() << "Dropping debug location info for:\n " << DI << "\n");
DEBUG(dbgs() << " Last seen at:\n " << *V << "\n");
return nullptr;
}
@ -5715,7 +5703,37 @@ SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I, unsigned Intrinsic) {
setValue(&I, N);
return nullptr;
}
case Intrinsic::xray_customevent: {
// Here we want to make sure that the intrinsic behaves as if it has a
// specific calling convention, and only for x86_64.
// FIXME: Support other platforms later.
const auto &Triple = DAG.getTarget().getTargetTriple();
if (Triple.getArch() != Triple::x86_64 || !Triple.isOSLinux())
return nullptr;
SDLoc DL = getCurSDLoc();
SmallVector<SDValue, 8> Ops;
// We want to say that we always want the arguments in registers.
SDValue LogEntryVal = getValue(I.getArgOperand(0));
SDValue StrSizeVal = getValue(I.getArgOperand(1));
SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
SDValue Chain = getRoot();
Ops.push_back(LogEntryVal);
Ops.push_back(StrSizeVal);
Ops.push_back(Chain);
// We need to enforce the calling convention for the callsite, so that
// argument ordering is enforced correctly, and that register allocation can
// see that some registers may be assumed clobbered and have to preserve
// them across calls to the intrinsic.
MachineSDNode *MN = DAG.getMachineNode(TargetOpcode::PATCHABLE_EVENT_CALL,
DL, NodeTys, Ops);
SDValue patchableNode = SDValue(MN, 0);
DAG.setRoot(patchableNode);
setValue(&I, patchableNode);
return nullptr;
}
case Intrinsic::experimental_deoptimize:
LowerDeoptimizeCall(&I);
return nullptr;

Some files were not shown because too many files have changed in this diff Show More