Upgrade our copy of clang and llvm to 3.5.1 release. This is a bugfix
only release, no new features have been added. Please note that this version requires C++11 support to build; see UPDATING for more information. Release notes for llvm and clang can be found here: <http://llvm.org/releases/3.5.1/docs/ReleaseNotes.html> <http://llvm.org/releases/3.5.1/tools/clang/docs/ReleaseNotes.html> MFC after: 1 month X-MFC-With: 276479
This commit is contained in:
commit
9cac79b378
@ -38,6 +38,60 @@
|
||||
# xargs -n1 | sort | uniq -d;
|
||||
# done
|
||||
|
||||
# 20150118: new clang import which bumps version from 3.5.0 to 3.5.1.
|
||||
OLD_FILES+=usr/include/clang/3.5.0/__wmmintrin_aes.h
|
||||
OLD_FILES+=usr/include/clang/3.5.0/__wmmintrin_pclmul.h
|
||||
OLD_FILES+=usr/include/clang/3.5.0/altivec.h
|
||||
OLD_FILES+=usr/include/clang/3.5.0/ammintrin.h
|
||||
OLD_FILES+=usr/include/clang/3.5.0/arm_acle.h
|
||||
OLD_FILES+=usr/include/clang/3.5.0/arm_neon.h
|
||||
OLD_FILES+=usr/include/clang/3.5.0/avx2intrin.h
|
||||
OLD_FILES+=usr/include/clang/3.5.0/avxintrin.h
|
||||
OLD_FILES+=usr/include/clang/3.5.0/bmi2intrin.h
|
||||
OLD_FILES+=usr/include/clang/3.5.0/bmiintrin.h
|
||||
OLD_FILES+=usr/include/clang/3.5.0/cpuid.h
|
||||
OLD_FILES+=usr/include/clang/3.5.0/emmintrin.h
|
||||
OLD_FILES+=usr/include/clang/3.5.0/f16cintrin.h
|
||||
OLD_FILES+=usr/include/clang/3.5.0/fma4intrin.h
|
||||
OLD_FILES+=usr/include/clang/3.5.0/fmaintrin.h
|
||||
OLD_FILES+=usr/include/clang/3.5.0/ia32intrin.h
|
||||
OLD_FILES+=usr/include/clang/3.5.0/immintrin.h
|
||||
OLD_FILES+=usr/include/clang/3.5.0/lzcntintrin.h
|
||||
OLD_FILES+=usr/include/clang/3.5.0/mm3dnow.h
|
||||
OLD_FILES+=usr/include/clang/3.5.0/mm_malloc.h
|
||||
OLD_FILES+=usr/include/clang/3.5.0/mmintrin.h
|
||||
OLD_FILES+=usr/include/clang/3.5.0/module.modulemap
|
||||
OLD_FILES+=usr/include/clang/3.5.0/nmmintrin.h
|
||||
OLD_FILES+=usr/include/clang/3.5.0/pmmintrin.h
|
||||
OLD_FILES+=usr/include/clang/3.5.0/popcntintrin.h
|
||||
OLD_FILES+=usr/include/clang/3.5.0/prfchwintrin.h
|
||||
OLD_FILES+=usr/include/clang/3.5.0/rdseedintrin.h
|
||||
OLD_FILES+=usr/include/clang/3.5.0/rtmintrin.h
|
||||
OLD_FILES+=usr/include/clang/3.5.0/shaintrin.h
|
||||
OLD_FILES+=usr/include/clang/3.5.0/smmintrin.h
|
||||
OLD_FILES+=usr/include/clang/3.5.0/tbmintrin.h
|
||||
OLD_FILES+=usr/include/clang/3.5.0/tmmintrin.h
|
||||
OLD_FILES+=usr/include/clang/3.5.0/wmmintrin.h
|
||||
OLD_FILES+=usr/include/clang/3.5.0/x86intrin.h
|
||||
OLD_FILES+=usr/include/clang/3.5.0/xmmintrin.h
|
||||
OLD_FILES+=usr/include/clang/3.5.0/xopintrin.h
|
||||
OLD_DIRS+=usr/include/clang/3.5.0
|
||||
OLD_FILES+=usr/lib/clang/3.5.0/lib/freebsd/libclang_rt.asan-i386.a
|
||||
OLD_FILES+=usr/lib/clang/3.5.0/lib/freebsd/libclang_rt.asan-x86_64.a
|
||||
OLD_FILES+=usr/lib/clang/3.5.0/lib/freebsd/libclang_rt.asan_cxx-i386.a
|
||||
OLD_FILES+=usr/lib/clang/3.5.0/lib/freebsd/libclang_rt.asan_cxx-x86_64.a
|
||||
OLD_FILES+=usr/lib/clang/3.5.0/lib/freebsd/libclang_rt.profile-arm.a
|
||||
OLD_FILES+=usr/lib/clang/3.5.0/lib/freebsd/libclang_rt.profile-i386.a
|
||||
OLD_FILES+=usr/lib/clang/3.5.0/lib/freebsd/libclang_rt.profile-x86_64.a
|
||||
OLD_FILES+=usr/lib/clang/3.5.0/lib/freebsd/libclang_rt.san-i386.a
|
||||
OLD_FILES+=usr/lib/clang/3.5.0/lib/freebsd/libclang_rt.san-x86_64.a
|
||||
OLD_FILES+=usr/lib/clang/3.5.0/lib/freebsd/libclang_rt.ubsan-i386.a
|
||||
OLD_FILES+=usr/lib/clang/3.5.0/lib/freebsd/libclang_rt.ubsan-x86_64.a
|
||||
OLD_FILES+=usr/lib/clang/3.5.0/lib/freebsd/libclang_rt.ubsan_cxx-i386.a
|
||||
OLD_FILES+=usr/lib/clang/3.5.0/lib/freebsd/libclang_rt.ubsan_cxx-x86_64.a
|
||||
OLD_DIRS+=usr/lib/clang/3.5.0/lib/freebsd
|
||||
OLD_DIRS+=usr/lib/clang/3.5.0/lib
|
||||
OLD_DIRS+=usr/lib/clang/3.5.0
|
||||
# 20150102: removal of texinfo
|
||||
OLD_FILES+=usr/bin/info
|
||||
OLD_FILES+=usr/bin/infokey
|
||||
|
6
UPDATING
6
UPDATING
@ -31,6 +31,12 @@ NOTE TO PEOPLE WHO THINK THAT FreeBSD 11.x IS SLOW:
|
||||
disable the most expensive debugging functionality run
|
||||
"ln -s 'abort:false,junk:false' /etc/malloc.conf".)
|
||||
|
||||
20150118:
|
||||
Clang and llvm have been upgraded to 3.5.1 release. This is a bugfix
|
||||
only release, no new features have been added. Please see the 20141231
|
||||
entry below for information about prerequisites and upgrading, if you
|
||||
are not already using 3.5.0.
|
||||
|
||||
20150107:
|
||||
ELF tools addr2line, elfcopy (strip), nm, size, and strings are now
|
||||
taken from the ELF Tool Chain project rather than GNU binutils. They
|
||||
|
@ -253,13 +253,16 @@ class AliasSet : public ilist_node<AliasSet> {
|
||||
const MDNode *TBAAInfo,
|
||||
bool KnownMustAlias = false);
|
||||
void addUnknownInst(Instruction *I, AliasAnalysis &AA);
|
||||
void removeUnknownInst(Instruction *I) {
|
||||
void removeUnknownInst(AliasSetTracker &AST, Instruction *I) {
|
||||
bool WasEmpty = UnknownInsts.empty();
|
||||
for (size_t i = 0, e = UnknownInsts.size(); i != e; ++i)
|
||||
if (UnknownInsts[i] == I) {
|
||||
UnknownInsts[i] = UnknownInsts.back();
|
||||
UnknownInsts.pop_back();
|
||||
--i; --e; // Revisit the moved entry.
|
||||
}
|
||||
if (!WasEmpty && UnknownInsts.empty())
|
||||
dropRef(AST);
|
||||
}
|
||||
void setVolatile() { Volatile = true; }
|
||||
|
||||
|
@ -31,18 +31,25 @@ class TargetRegisterInfo;
|
||||
class CCValAssign {
|
||||
public:
|
||||
enum LocInfo {
|
||||
Full, // The value fills the full location.
|
||||
SExt, // The value is sign extended in the location.
|
||||
ZExt, // The value is zero extended in the location.
|
||||
AExt, // The value is extended with undefined upper bits.
|
||||
BCvt, // The value is bit-converted in the location.
|
||||
VExt, // The value is vector-widened in the location.
|
||||
// FIXME: Not implemented yet. Code that uses AExt to mean
|
||||
// vector-widen should be fixed to use VExt instead.
|
||||
FPExt, // The floating-point value is fp-extended in the location.
|
||||
Indirect // The location contains pointer to the value.
|
||||
Full, // The value fills the full location.
|
||||
SExt, // The value is sign extended in the location.
|
||||
ZExt, // The value is zero extended in the location.
|
||||
AExt, // The value is extended with undefined upper bits.
|
||||
BCvt, // The value is bit-converted in the location.
|
||||
VExt, // The value is vector-widened in the location.
|
||||
// FIXME: Not implemented yet. Code that uses AExt to mean
|
||||
// vector-widen should be fixed to use VExt instead.
|
||||
FPExt, // The floating-point value is fp-extended in the location.
|
||||
Indirect, // The location contains pointer to the value.
|
||||
SExtUpper, // The value is in the upper bits of the location and should be
|
||||
// sign extended when retrieved.
|
||||
ZExtUpper, // The value is in the upper bits of the location and should be
|
||||
// zero extended when retrieved.
|
||||
AExtUpper // The value is in the upper bits of the location and should be
|
||||
// extended with undefined upper bits when retrieved.
|
||||
// TODO: a subset of the value is in the location.
|
||||
};
|
||||
|
||||
private:
|
||||
/// ValNo - This is the value number begin assigned (e.g. an argument number).
|
||||
unsigned ValNo;
|
||||
@ -146,6 +153,9 @@ class CCValAssign {
|
||||
return (HTP == AExt || HTP == SExt || HTP == ZExt);
|
||||
}
|
||||
|
||||
bool isUpperBitsInLoc() const {
|
||||
return HTP == AExtUpper || HTP == SExtUpper || HTP == ZExtUpper;
|
||||
}
|
||||
};
|
||||
|
||||
/// CCAssignFn - This function assigns a location for Val, updating State to
|
||||
@ -208,10 +218,10 @@ class CCState {
|
||||
// while "%t" goes to the stack: it wouldn't be described in ByValRegs.
|
||||
//
|
||||
// Supposed use-case for this collection:
|
||||
// 1. Initially ByValRegs is empty, InRegsParamsProceed is 0.
|
||||
// 1. Initially ByValRegs is empty, InRegsParamsProcessed is 0.
|
||||
// 2. HandleByVal fillups ByValRegs.
|
||||
// 3. Argument analysis (LowerFormatArguments, for example). After
|
||||
// some byval argument was analyzed, InRegsParamsProceed is increased.
|
||||
// some byval argument was analyzed, InRegsParamsProcessed is increased.
|
||||
struct ByValInfo {
|
||||
ByValInfo(unsigned B, unsigned E, bool IsWaste = false) :
|
||||
Begin(B), End(E), Waste(IsWaste) {}
|
||||
@ -229,9 +239,9 @@ class CCState {
|
||||
};
|
||||
SmallVector<ByValInfo, 4 > ByValRegs;
|
||||
|
||||
// InRegsParamsProceed - shows how many instances of ByValRegs was proceed
|
||||
// InRegsParamsProcessed - shows how many instances of ByValRegs was proceed
|
||||
// during argument analysis.
|
||||
unsigned InRegsParamsProceed;
|
||||
unsigned InRegsParamsProcessed;
|
||||
|
||||
protected:
|
||||
ParmContext CallOrPrologue;
|
||||
@ -412,7 +422,7 @@ class CCState {
|
||||
unsigned getInRegsParamsCount() const { return ByValRegs.size(); }
|
||||
|
||||
// Returns count of byval in-regs arguments proceed.
|
||||
unsigned getInRegsParamsProceed() const { return InRegsParamsProceed; }
|
||||
unsigned getInRegsParamsProcessed() const { return InRegsParamsProcessed; }
|
||||
|
||||
// Get information about N-th byval parameter that is stored in registers.
|
||||
// Here "ByValParamIndex" is N.
|
||||
@ -436,20 +446,20 @@ class CCState {
|
||||
// Returns false, if end is reached.
|
||||
bool nextInRegsParam() {
|
||||
unsigned e = ByValRegs.size();
|
||||
if (InRegsParamsProceed < e)
|
||||
++InRegsParamsProceed;
|
||||
return InRegsParamsProceed < e;
|
||||
if (InRegsParamsProcessed < e)
|
||||
++InRegsParamsProcessed;
|
||||
return InRegsParamsProcessed < e;
|
||||
}
|
||||
|
||||
// Clear byval registers tracking info.
|
||||
void clearByValRegsInfo() {
|
||||
InRegsParamsProceed = 0;
|
||||
InRegsParamsProcessed = 0;
|
||||
ByValRegs.clear();
|
||||
}
|
||||
|
||||
// Rewind byval registers tracking info.
|
||||
void rewindByValRegsInfo() {
|
||||
InRegsParamsProceed = 0;
|
||||
InRegsParamsProcessed = 0;
|
||||
}
|
||||
|
||||
ParmContext getCallOrPrologue() const { return CallOrPrologue; }
|
||||
|
@ -67,6 +67,9 @@ class CCIfSplit<CCAction A> : CCIf<"ArgFlags.isSplit()", A> {}
|
||||
/// the specified action.
|
||||
class CCIfSRet<CCAction A> : CCIf<"ArgFlags.isSRet()", A> {}
|
||||
|
||||
/// CCIfVarArg - If the current function is vararg - apply the action
|
||||
class CCIfVarArg<CCAction A> : CCIf<"State.isVarArg()", A> {}
|
||||
|
||||
/// CCIfNotVarArg - If the current function is not vararg - apply the action
|
||||
class CCIfNotVarArg<CCAction A> : CCIf<"!State.isVarArg()", A> {}
|
||||
|
||||
@ -119,6 +122,12 @@ class CCPromoteToType<ValueType destTy> : CCAction {
|
||||
ValueType DestTy = destTy;
|
||||
}
|
||||
|
||||
/// CCPromoteToUpperBitsInType - If applied, this promotes the specified current
|
||||
/// value to the specified type and shifts the value into the upper bits.
|
||||
class CCPromoteToUpperBitsInType<ValueType destTy> : CCAction {
|
||||
ValueType DestTy = destTy;
|
||||
}
|
||||
|
||||
/// CCBitConvertToType - If applied, this bitconverts the specified current
|
||||
/// value to the specified type.
|
||||
class CCBitConvertToType<ValueType destTy> : CCAction {
|
||||
@ -141,6 +150,13 @@ class CCDelegateTo<CallingConv cc> : CCAction {
|
||||
/// that the target supports.
|
||||
class CallingConv<list<CCAction> actions> {
|
||||
list<CCAction> Actions = actions;
|
||||
bit Custom = 0;
|
||||
}
|
||||
|
||||
/// CustomCallingConv - An instance of this is used to declare calling
|
||||
/// conventions that are implemented using a custom function of the same name.
|
||||
class CustomCallingConv : CallingConv<[]> {
|
||||
let Custom = 1;
|
||||
}
|
||||
|
||||
/// CalleeSavedRegs - A list of callee saved registers for a given calling
|
||||
|
@ -55,10 +55,13 @@ void AliasSet::mergeSetIn(AliasSet &AS, AliasSetTracker &AST) {
|
||||
AliasTy = MayAlias;
|
||||
}
|
||||
|
||||
bool ASHadUnknownInsts = !AS.UnknownInsts.empty();
|
||||
if (UnknownInsts.empty()) { // Merge call sites...
|
||||
if (!AS.UnknownInsts.empty())
|
||||
if (ASHadUnknownInsts) {
|
||||
std::swap(UnknownInsts, AS.UnknownInsts);
|
||||
} else if (!AS.UnknownInsts.empty()) {
|
||||
addRef();
|
||||
}
|
||||
} else if (ASHadUnknownInsts) {
|
||||
UnknownInsts.insert(UnknownInsts.end(), AS.UnknownInsts.begin(), AS.UnknownInsts.end());
|
||||
AS.UnknownInsts.clear();
|
||||
}
|
||||
@ -76,6 +79,8 @@ void AliasSet::mergeSetIn(AliasSet &AS, AliasSetTracker &AST) {
|
||||
AS.PtrListEnd = &AS.PtrList;
|
||||
assert(*AS.PtrListEnd == nullptr && "End of list is not null?");
|
||||
}
|
||||
if (ASHadUnknownInsts)
|
||||
AS.dropRef(AST);
|
||||
}
|
||||
|
||||
void AliasSetTracker::removeAliasSet(AliasSet *AS) {
|
||||
@ -123,6 +128,8 @@ void AliasSet::addPointer(AliasSetTracker &AST, PointerRec &Entry,
|
||||
}
|
||||
|
||||
void AliasSet::addUnknownInst(Instruction *I, AliasAnalysis &AA) {
|
||||
if (UnknownInsts.empty())
|
||||
addRef();
|
||||
UnknownInsts.push_back(I);
|
||||
|
||||
if (!I->mayWriteToMemory()) {
|
||||
@ -218,13 +225,14 @@ AliasSet *AliasSetTracker::findAliasSetForPointer(const Value *Ptr,
|
||||
uint64_t Size,
|
||||
const MDNode *TBAAInfo) {
|
||||
AliasSet *FoundSet = nullptr;
|
||||
for (iterator I = begin(), E = end(); I != E; ++I) {
|
||||
if (I->Forward || !I->aliasesPointer(Ptr, Size, TBAAInfo, AA)) continue;
|
||||
for (iterator I = begin(), E = end(); I != E;) {
|
||||
iterator Cur = I++;
|
||||
if (Cur->Forward || !Cur->aliasesPointer(Ptr, Size, TBAAInfo, AA)) continue;
|
||||
|
||||
if (!FoundSet) { // If this is the first alias set ptr can go into.
|
||||
FoundSet = I; // Remember it.
|
||||
FoundSet = Cur; // Remember it.
|
||||
} else { // Otherwise, we must merge the sets.
|
||||
FoundSet->mergeSetIn(*I, *this); // Merge in contents.
|
||||
FoundSet->mergeSetIn(*Cur, *this); // Merge in contents.
|
||||
}
|
||||
}
|
||||
|
||||
@ -246,14 +254,14 @@ bool AliasSetTracker::containsPointer(Value *Ptr, uint64_t Size,
|
||||
|
||||
AliasSet *AliasSetTracker::findAliasSetForUnknownInst(Instruction *Inst) {
|
||||
AliasSet *FoundSet = nullptr;
|
||||
for (iterator I = begin(), E = end(); I != E; ++I) {
|
||||
if (I->Forward || !I->aliasesUnknownInst(Inst, AA))
|
||||
for (iterator I = begin(), E = end(); I != E;) {
|
||||
iterator Cur = I++;
|
||||
if (Cur->Forward || !Cur->aliasesUnknownInst(Inst, AA))
|
||||
continue;
|
||||
|
||||
if (!FoundSet) // If this is the first alias set ptr can go into.
|
||||
FoundSet = I; // Remember it.
|
||||
else if (!I->Forward) // Otherwise, we must merge the sets.
|
||||
FoundSet->mergeSetIn(*I, *this); // Merge in contents.
|
||||
FoundSet = Cur; // Remember it.
|
||||
else if (!Cur->Forward) // Otherwise, we must merge the sets.
|
||||
FoundSet->mergeSetIn(*Cur, *this); // Merge in contents.
|
||||
}
|
||||
return FoundSet;
|
||||
}
|
||||
@ -393,6 +401,8 @@ void AliasSetTracker::add(const AliasSetTracker &AST) {
|
||||
/// tracker.
|
||||
void AliasSetTracker::remove(AliasSet &AS) {
|
||||
// Drop all call sites.
|
||||
if (!AS.UnknownInsts.empty())
|
||||
AS.dropRef(*this);
|
||||
AS.UnknownInsts.clear();
|
||||
|
||||
// Clear the alias set.
|
||||
@ -489,10 +499,10 @@ void AliasSetTracker::deleteValue(Value *PtrVal) {
|
||||
if (Instruction *Inst = dyn_cast<Instruction>(PtrVal)) {
|
||||
if (Inst->mayReadOrWriteMemory()) {
|
||||
// Scan all the alias sets to see if this call site is contained.
|
||||
for (iterator I = begin(), E = end(); I != E; ++I) {
|
||||
if (I->Forward) continue;
|
||||
|
||||
I->removeUnknownInst(Inst);
|
||||
for (iterator I = begin(), E = end(); I != E;) {
|
||||
iterator Cur = I++;
|
||||
if (!Cur->Forward)
|
||||
Cur->removeUnknownInst(*this, Inst);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -14,7 +14,7 @@
|
||||
#include "llvm/Analysis/BlockFrequencyInfoImpl.h"
|
||||
#include "llvm/ADT/SCCIterator.h"
|
||||
#include "llvm/Support/raw_ostream.h"
|
||||
#include <deque>
|
||||
#include <numeric>
|
||||
|
||||
using namespace llvm;
|
||||
using namespace llvm::bfi_detail;
|
||||
@ -123,8 +123,12 @@ static void combineWeight(Weight &W, const Weight &OtherW) {
|
||||
}
|
||||
assert(W.Type == OtherW.Type);
|
||||
assert(W.TargetNode == OtherW.TargetNode);
|
||||
assert(W.Amount < W.Amount + OtherW.Amount && "Unexpected overflow");
|
||||
W.Amount += OtherW.Amount;
|
||||
assert(OtherW.Amount && "Expected non-zero weight");
|
||||
if (W.Amount > W.Amount + OtherW.Amount)
|
||||
// Saturate on overflow.
|
||||
W.Amount = UINT64_MAX;
|
||||
else
|
||||
W.Amount += OtherW.Amount;
|
||||
}
|
||||
static void combineWeightsBySorting(WeightList &Weights) {
|
||||
// Sort so edges to the same node are adjacent.
|
||||
@ -207,11 +211,19 @@ void Distribution::normalize() {
|
||||
Shift = 33 - countLeadingZeros(Total);
|
||||
|
||||
// Early exit if nothing needs to be scaled.
|
||||
if (!Shift)
|
||||
if (!Shift) {
|
||||
// If we didn't overflow then combineWeights() shouldn't have changed the
|
||||
// sum of the weights, but let's double-check.
|
||||
assert(Total == std::accumulate(Weights.begin(), Weights.end(), UINT64_C(0),
|
||||
[](uint64_t Sum, const Weight &W) {
|
||||
return Sum + W.Amount;
|
||||
}) &&
|
||||
"Expected total to be correct");
|
||||
return;
|
||||
}
|
||||
|
||||
// Recompute the total through accumulation (rather than shifting it) so that
|
||||
// it's accurate after shifting.
|
||||
// it's accurate after shifting and any changes combineWeights() made above.
|
||||
Total = 0;
|
||||
|
||||
// Sum the weights to each node and shift right if necessary.
|
||||
|
@ -1987,23 +1987,31 @@ bool llvm::isSafeToSpeculativelyExecute(const Value *V,
|
||||
default:
|
||||
return true;
|
||||
case Instruction::UDiv:
|
||||
case Instruction::URem:
|
||||
// x / y is undefined if y == 0, but calculations like x / 3 are safe.
|
||||
return isKnownNonZero(Inst->getOperand(1), TD);
|
||||
case Instruction::URem: {
|
||||
// x / y is undefined if y == 0.
|
||||
const APInt *V;
|
||||
if (match(Inst->getOperand(1), m_APInt(V)))
|
||||
return *V != 0;
|
||||
return false;
|
||||
}
|
||||
case Instruction::SDiv:
|
||||
case Instruction::SRem: {
|
||||
Value *Op = Inst->getOperand(1);
|
||||
// x / y is undefined if y == 0
|
||||
if (!isKnownNonZero(Op, TD))
|
||||
return false;
|
||||
// x / y might be undefined if y == -1
|
||||
unsigned BitWidth = getBitWidth(Op->getType(), TD);
|
||||
if (BitWidth == 0)
|
||||
return false;
|
||||
APInt KnownZero(BitWidth, 0);
|
||||
APInt KnownOne(BitWidth, 0);
|
||||
computeKnownBits(Op, KnownZero, KnownOne, TD);
|
||||
return !!KnownZero;
|
||||
// x / y is undefined if y == 0 or x == INT_MIN and y == -1
|
||||
const APInt *X, *Y;
|
||||
if (match(Inst->getOperand(1), m_APInt(Y))) {
|
||||
if (*Y != 0) {
|
||||
if (*Y == -1) {
|
||||
// The numerator can't be MinSignedValue if the denominator is -1.
|
||||
if (match(Inst->getOperand(0), m_APInt(X)))
|
||||
return !Y->isMinSignedValue();
|
||||
// The numerator *might* be MinSignedValue.
|
||||
return false;
|
||||
}
|
||||
// The denominator is not 0 or -1, it's safe to proceed.
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
case Instruction::Load: {
|
||||
const LoadInst *LI = cast<LoadInst>(Inst);
|
||||
|
@ -341,6 +341,8 @@ void MCObjectFileInfo::InitELFMCObjectFileInfo(Triple T) {
|
||||
break;
|
||||
case Triple::mips:
|
||||
case Triple::mipsel:
|
||||
case Triple::mips64:
|
||||
case Triple::mips64el:
|
||||
// MIPS uses indirect pointer to refer personality functions, so that the
|
||||
// eh_frame section can be read-only. DW.ref.personality will be generated
|
||||
// for relocation.
|
||||
|
@ -566,11 +566,59 @@ void ARMFrameLowering::emitPrologue(MachineFunction &MF) const {
|
||||
AFI->setShouldRestoreSPFromFP(true);
|
||||
}
|
||||
|
||||
// Resolve TCReturn pseudo-instruction
|
||||
void ARMFrameLowering::fixTCReturn(MachineFunction &MF,
|
||||
MachineBasicBlock &MBB) const {
|
||||
MachineBasicBlock::iterator MBBI = MBB.getLastNonDebugInstr();
|
||||
assert(MBBI->isReturn() && "Can only insert epilog into returning blocks");
|
||||
unsigned RetOpcode = MBBI->getOpcode();
|
||||
DebugLoc dl = MBBI->getDebugLoc();
|
||||
const ARMBaseInstrInfo &TII =
|
||||
*MF.getTarget().getSubtarget<ARMSubtarget>().getInstrInfo();
|
||||
|
||||
if (!(RetOpcode == ARM::TCRETURNdi || RetOpcode == ARM::TCRETURNri))
|
||||
return;
|
||||
|
||||
// Tail call return: adjust the stack pointer and jump to callee.
|
||||
MBBI = MBB.getLastNonDebugInstr();
|
||||
MachineOperand &JumpTarget = MBBI->getOperand(0);
|
||||
|
||||
// Jump to label or value in register.
|
||||
if (RetOpcode == ARM::TCRETURNdi) {
|
||||
unsigned TCOpcode = STI.isThumb() ?
|
||||
(STI.isTargetMachO() ? ARM::tTAILJMPd : ARM::tTAILJMPdND) :
|
||||
ARM::TAILJMPd;
|
||||
MachineInstrBuilder MIB = BuildMI(MBB, MBBI, dl, TII.get(TCOpcode));
|
||||
if (JumpTarget.isGlobal())
|
||||
MIB.addGlobalAddress(JumpTarget.getGlobal(), JumpTarget.getOffset(),
|
||||
JumpTarget.getTargetFlags());
|
||||
else {
|
||||
assert(JumpTarget.isSymbol());
|
||||
MIB.addExternalSymbol(JumpTarget.getSymbolName(),
|
||||
JumpTarget.getTargetFlags());
|
||||
}
|
||||
|
||||
// Add the default predicate in Thumb mode.
|
||||
if (STI.isThumb()) MIB.addImm(ARMCC::AL).addReg(0);
|
||||
} else if (RetOpcode == ARM::TCRETURNri) {
|
||||
BuildMI(MBB, MBBI, dl,
|
||||
TII.get(STI.isThumb() ? ARM::tTAILJMPr : ARM::TAILJMPr)).
|
||||
addReg(JumpTarget.getReg(), RegState::Kill);
|
||||
}
|
||||
|
||||
MachineInstr *NewMI = std::prev(MBBI);
|
||||
for (unsigned i = 1, e = MBBI->getNumOperands(); i != e; ++i)
|
||||
NewMI->addOperand(MBBI->getOperand(i));
|
||||
|
||||
// Delete the pseudo instruction TCRETURN.
|
||||
MBB.erase(MBBI);
|
||||
MBBI = NewMI;
|
||||
}
|
||||
|
||||
void ARMFrameLowering::emitEpilogue(MachineFunction &MF,
|
||||
MachineBasicBlock &MBB) const {
|
||||
MachineBasicBlock::iterator MBBI = MBB.getLastNonDebugInstr();
|
||||
assert(MBBI->isReturn() && "Can only insert epilog into returning blocks");
|
||||
unsigned RetOpcode = MBBI->getOpcode();
|
||||
DebugLoc dl = MBBI->getDebugLoc();
|
||||
MachineFrameInfo *MFI = MF.getFrameInfo();
|
||||
ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
|
||||
@ -588,8 +636,10 @@ void ARMFrameLowering::emitEpilogue(MachineFunction &MF,
|
||||
|
||||
// All calls are tail calls in GHC calling conv, and functions have no
|
||||
// prologue/epilogue.
|
||||
if (MF.getFunction()->getCallingConv() == CallingConv::GHC)
|
||||
if (MF.getFunction()->getCallingConv() == CallingConv::GHC) {
|
||||
fixTCReturn(MF, MBB);
|
||||
return;
|
||||
}
|
||||
|
||||
if (!AFI->hasStackFrame()) {
|
||||
if (NumBytes - ArgRegsSaveSize != 0)
|
||||
@ -661,42 +711,7 @@ void ARMFrameLowering::emitEpilogue(MachineFunction &MF,
|
||||
if (AFI->getGPRCalleeSavedArea1Size()) MBBI++;
|
||||
}
|
||||
|
||||
if (RetOpcode == ARM::TCRETURNdi || RetOpcode == ARM::TCRETURNri) {
|
||||
// Tail call return: adjust the stack pointer and jump to callee.
|
||||
MBBI = MBB.getLastNonDebugInstr();
|
||||
MachineOperand &JumpTarget = MBBI->getOperand(0);
|
||||
|
||||
// Jump to label or value in register.
|
||||
if (RetOpcode == ARM::TCRETURNdi) {
|
||||
unsigned TCOpcode = STI.isThumb() ?
|
||||
(STI.isTargetMachO() ? ARM::tTAILJMPd : ARM::tTAILJMPdND) :
|
||||
ARM::TAILJMPd;
|
||||
MachineInstrBuilder MIB = BuildMI(MBB, MBBI, dl, TII.get(TCOpcode));
|
||||
if (JumpTarget.isGlobal())
|
||||
MIB.addGlobalAddress(JumpTarget.getGlobal(), JumpTarget.getOffset(),
|
||||
JumpTarget.getTargetFlags());
|
||||
else {
|
||||
assert(JumpTarget.isSymbol());
|
||||
MIB.addExternalSymbol(JumpTarget.getSymbolName(),
|
||||
JumpTarget.getTargetFlags());
|
||||
}
|
||||
|
||||
// Add the default predicate in Thumb mode.
|
||||
if (STI.isThumb()) MIB.addImm(ARMCC::AL).addReg(0);
|
||||
} else if (RetOpcode == ARM::TCRETURNri) {
|
||||
BuildMI(MBB, MBBI, dl,
|
||||
TII.get(STI.isThumb() ? ARM::tTAILJMPr : ARM::TAILJMPr)).
|
||||
addReg(JumpTarget.getReg(), RegState::Kill);
|
||||
}
|
||||
|
||||
MachineInstr *NewMI = std::prev(MBBI);
|
||||
for (unsigned i = 1, e = MBBI->getNumOperands(); i != e; ++i)
|
||||
NewMI->addOperand(MBBI->getOperand(i));
|
||||
|
||||
// Delete the pseudo instruction TCRETURN.
|
||||
MBB.erase(MBBI);
|
||||
MBBI = NewMI;
|
||||
}
|
||||
fixTCReturn(MF, MBB);
|
||||
|
||||
if (ArgRegsSaveSize)
|
||||
emitSPUpdate(isARM, MBB, MBBI, dl, TII, ArgRegsSaveSize);
|
||||
|
@ -31,6 +31,8 @@ class ARMFrameLowering : public TargetFrameLowering {
|
||||
void emitPrologue(MachineFunction &MF) const override;
|
||||
void emitEpilogue(MachineFunction &MF, MachineBasicBlock &MBB) const override;
|
||||
|
||||
void fixTCReturn(MachineFunction &MF, MachineBasicBlock &MBB) const;
|
||||
|
||||
bool spillCalleeSavedRegisters(MachineBasicBlock &MBB,
|
||||
MachineBasicBlock::iterator MI,
|
||||
const std::vector<CalleeSavedInfo> &CSI,
|
||||
|
@ -1521,7 +1521,7 @@ ARMTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
|
||||
// True if this byval aggregate will be split between registers
|
||||
// and memory.
|
||||
unsigned ByValArgsCount = CCInfo.getInRegsParamsCount();
|
||||
unsigned CurByValIdx = CCInfo.getInRegsParamsProceed();
|
||||
unsigned CurByValIdx = CCInfo.getInRegsParamsProcessed();
|
||||
|
||||
if (CurByValIdx < ByValArgsCount) {
|
||||
|
||||
@ -2962,7 +2962,7 @@ ARMTargetLowering::LowerFormalArguments(SDValue Chain,
|
||||
if (Flags.isByVal()) {
|
||||
unsigned ExtraArgRegsSize;
|
||||
unsigned ExtraArgRegsSaveSize;
|
||||
computeRegArea(CCInfo, MF, CCInfo.getInRegsParamsProceed(),
|
||||
computeRegArea(CCInfo, MF, CCInfo.getInRegsParamsProcessed(),
|
||||
Flags.getByValSize(),
|
||||
ExtraArgRegsSize, ExtraArgRegsSaveSize);
|
||||
|
||||
@ -3086,7 +3086,7 @@ ARMTargetLowering::LowerFormalArguments(SDValue Chain,
|
||||
// Since they could be overwritten by lowering of arguments in case of
|
||||
// a tail call.
|
||||
if (Flags.isByVal()) {
|
||||
unsigned CurByValIndex = CCInfo.getInRegsParamsProceed();
|
||||
unsigned CurByValIndex = CCInfo.getInRegsParamsProcessed();
|
||||
|
||||
ByValStoreOffset = RoundUpToAlignment(ByValStoreOffset, Flags.getByValAlign());
|
||||
int FrameIndex = StoreByValRegs(
|
||||
|
@ -200,14 +200,14 @@ class MipsAsmParser : public MCTargetAsmParser {
|
||||
// Example: INSERT.B $w0[n], $1 => 16 > n >= 0
|
||||
bool validateMSAIndex(int Val, int RegKind);
|
||||
|
||||
void setFeatureBits(unsigned Feature, StringRef FeatureString) {
|
||||
void setFeatureBits(uint64_t Feature, StringRef FeatureString) {
|
||||
if (!(STI.getFeatureBits() & Feature)) {
|
||||
setAvailableFeatures(
|
||||
ComputeAvailableFeatures(STI.ToggleFeature(FeatureString)));
|
||||
}
|
||||
}
|
||||
|
||||
void clearFeatureBits(unsigned Feature, StringRef FeatureString) {
|
||||
void clearFeatureBits(uint64_t Feature, StringRef FeatureString) {
|
||||
if (STI.getFeatureBits() & Feature) {
|
||||
setAvailableFeatures(
|
||||
ComputeAvailableFeatures(STI.ToggleFeature(FeatureString)));
|
||||
|
@ -250,6 +250,11 @@ static DecodeStatus DecodeMem(MCInst &Inst,
|
||||
uint64_t Address,
|
||||
const void *Decoder);
|
||||
|
||||
static DecodeStatus DecodeCacheOp(MCInst &Inst,
|
||||
unsigned Insn,
|
||||
uint64_t Address,
|
||||
const void *Decoder);
|
||||
|
||||
static DecodeStatus DecodeMSA128Mem(MCInst &Inst, unsigned Insn,
|
||||
uint64_t Address, const void *Decoder);
|
||||
|
||||
@ -267,6 +272,14 @@ static DecodeStatus DecodeFMem(MCInst &Inst, unsigned Insn,
|
||||
uint64_t Address,
|
||||
const void *Decoder);
|
||||
|
||||
static DecodeStatus DecodeFMem2(MCInst &Inst, unsigned Insn,
|
||||
uint64_t Address,
|
||||
const void *Decoder);
|
||||
|
||||
static DecodeStatus DecodeFMem3(MCInst &Inst, unsigned Insn,
|
||||
uint64_t Address,
|
||||
const void *Decoder);
|
||||
|
||||
static DecodeStatus DecodeSpecial3LlSc(MCInst &Inst,
|
||||
unsigned Insn,
|
||||
uint64_t Address,
|
||||
@ -451,7 +464,7 @@ static DecodeStatus DecodeAddiGroupBranch(MCInst &MI, InsnType insn,
|
||||
|
||||
InsnType Rs = fieldFromInstruction(insn, 21, 5);
|
||||
InsnType Rt = fieldFromInstruction(insn, 16, 5);
|
||||
InsnType Imm = SignExtend64(fieldFromInstruction(insn, 0, 16), 16) << 2;
|
||||
InsnType Imm = SignExtend64(fieldFromInstruction(insn, 0, 16), 16) * 4;
|
||||
bool HasRs = false;
|
||||
|
||||
if (Rs >= Rt) {
|
||||
@ -490,7 +503,7 @@ static DecodeStatus DecodeDaddiGroupBranch(MCInst &MI, InsnType insn,
|
||||
|
||||
InsnType Rs = fieldFromInstruction(insn, 21, 5);
|
||||
InsnType Rt = fieldFromInstruction(insn, 16, 5);
|
||||
InsnType Imm = SignExtend64(fieldFromInstruction(insn, 0, 16), 16) << 2;
|
||||
InsnType Imm = SignExtend64(fieldFromInstruction(insn, 0, 16), 16) * 4;
|
||||
bool HasRs = false;
|
||||
|
||||
if (Rs >= Rt) {
|
||||
@ -530,7 +543,7 @@ static DecodeStatus DecodeBlezlGroupBranch(MCInst &MI, InsnType insn,
|
||||
|
||||
InsnType Rs = fieldFromInstruction(insn, 21, 5);
|
||||
InsnType Rt = fieldFromInstruction(insn, 16, 5);
|
||||
InsnType Imm = SignExtend64(fieldFromInstruction(insn, 0, 16), 16) << 2;
|
||||
InsnType Imm = SignExtend64(fieldFromInstruction(insn, 0, 16), 16) * 4;
|
||||
bool HasRs = false;
|
||||
|
||||
if (Rt == 0)
|
||||
@ -575,7 +588,7 @@ static DecodeStatus DecodeBgtzlGroupBranch(MCInst &MI, InsnType insn,
|
||||
|
||||
InsnType Rs = fieldFromInstruction(insn, 21, 5);
|
||||
InsnType Rt = fieldFromInstruction(insn, 16, 5);
|
||||
InsnType Imm = SignExtend64(fieldFromInstruction(insn, 0, 16), 16) << 2;
|
||||
InsnType Imm = SignExtend64(fieldFromInstruction(insn, 0, 16), 16) * 4;
|
||||
|
||||
if (Rt == 0)
|
||||
return MCDisassembler::Fail;
|
||||
@ -617,7 +630,7 @@ static DecodeStatus DecodeBgtzGroupBranch(MCInst &MI, InsnType insn,
|
||||
|
||||
InsnType Rs = fieldFromInstruction(insn, 21, 5);
|
||||
InsnType Rt = fieldFromInstruction(insn, 16, 5);
|
||||
InsnType Imm = SignExtend64(fieldFromInstruction(insn, 0, 16), 16) << 2;
|
||||
InsnType Imm = SignExtend64(fieldFromInstruction(insn, 0, 16), 16) * 4;
|
||||
bool HasRs = false;
|
||||
bool HasRt = false;
|
||||
|
||||
@ -666,7 +679,7 @@ static DecodeStatus DecodeBlezGroupBranch(MCInst &MI, InsnType insn,
|
||||
|
||||
InsnType Rs = fieldFromInstruction(insn, 21, 5);
|
||||
InsnType Rt = fieldFromInstruction(insn, 16, 5);
|
||||
InsnType Imm = SignExtend64(fieldFromInstruction(insn, 0, 16), 16) << 2;
|
||||
InsnType Imm = SignExtend64(fieldFromInstruction(insn, 0, 16), 16) * 4;
|
||||
bool HasRs = false;
|
||||
|
||||
if (Rt == 0)
|
||||
@ -964,6 +977,23 @@ static DecodeStatus DecodeMem(MCInst &Inst,
|
||||
return MCDisassembler::Success;
|
||||
}
|
||||
|
||||
static DecodeStatus DecodeCacheOp(MCInst &Inst,
|
||||
unsigned Insn,
|
||||
uint64_t Address,
|
||||
const void *Decoder) {
|
||||
int Offset = SignExtend32<16>(Insn & 0xffff);
|
||||
unsigned Hint = fieldFromInstruction(Insn, 16, 5);
|
||||
unsigned Base = fieldFromInstruction(Insn, 21, 5);
|
||||
|
||||
Base = getReg(Decoder, Mips::GPR32RegClassID, Base);
|
||||
|
||||
Inst.addOperand(MCOperand::CreateReg(Base));
|
||||
Inst.addOperand(MCOperand::CreateImm(Offset));
|
||||
Inst.addOperand(MCOperand::CreateImm(Hint));
|
||||
|
||||
return MCDisassembler::Success;
|
||||
}
|
||||
|
||||
static DecodeStatus DecodeMSA128Mem(MCInst &Inst, unsigned Insn,
|
||||
uint64_t Address, const void *Decoder) {
|
||||
int Offset = SignExtend32<10>(fieldFromInstruction(Insn, 16, 10));
|
||||
@ -995,15 +1025,15 @@ static DecodeStatus DecodeMSA128Mem(MCInst &Inst, unsigned Insn,
|
||||
break;
|
||||
case Mips::LD_H:
|
||||
case Mips::ST_H:
|
||||
Inst.addOperand(MCOperand::CreateImm(Offset << 1));
|
||||
Inst.addOperand(MCOperand::CreateImm(Offset * 2));
|
||||
break;
|
||||
case Mips::LD_W:
|
||||
case Mips::ST_W:
|
||||
Inst.addOperand(MCOperand::CreateImm(Offset << 2));
|
||||
Inst.addOperand(MCOperand::CreateImm(Offset * 4));
|
||||
break;
|
||||
case Mips::LD_D:
|
||||
case Mips::ST_D:
|
||||
Inst.addOperand(MCOperand::CreateImm(Offset << 3));
|
||||
Inst.addOperand(MCOperand::CreateImm(Offset * 8));
|
||||
break;
|
||||
}
|
||||
|
||||
@ -1067,6 +1097,42 @@ static DecodeStatus DecodeFMem(MCInst &Inst,
|
||||
return MCDisassembler::Success;
|
||||
}
|
||||
|
||||
static DecodeStatus DecodeFMem2(MCInst &Inst,
|
||||
unsigned Insn,
|
||||
uint64_t Address,
|
||||
const void *Decoder) {
|
||||
int Offset = SignExtend32<16>(Insn & 0xffff);
|
||||
unsigned Reg = fieldFromInstruction(Insn, 16, 5);
|
||||
unsigned Base = fieldFromInstruction(Insn, 21, 5);
|
||||
|
||||
Reg = getReg(Decoder, Mips::COP2RegClassID, Reg);
|
||||
Base = getReg(Decoder, Mips::GPR32RegClassID, Base);
|
||||
|
||||
Inst.addOperand(MCOperand::CreateReg(Reg));
|
||||
Inst.addOperand(MCOperand::CreateReg(Base));
|
||||
Inst.addOperand(MCOperand::CreateImm(Offset));
|
||||
|
||||
return MCDisassembler::Success;
|
||||
}
|
||||
|
||||
static DecodeStatus DecodeFMem3(MCInst &Inst,
|
||||
unsigned Insn,
|
||||
uint64_t Address,
|
||||
const void *Decoder) {
|
||||
int Offset = SignExtend32<16>(Insn & 0xffff);
|
||||
unsigned Reg = fieldFromInstruction(Insn, 16, 5);
|
||||
unsigned Base = fieldFromInstruction(Insn, 21, 5);
|
||||
|
||||
Reg = getReg(Decoder, Mips::COP3RegClassID, Reg);
|
||||
Base = getReg(Decoder, Mips::GPR32RegClassID, Base);
|
||||
|
||||
Inst.addOperand(MCOperand::CreateReg(Reg));
|
||||
Inst.addOperand(MCOperand::CreateReg(Base));
|
||||
Inst.addOperand(MCOperand::CreateImm(Offset));
|
||||
|
||||
return MCDisassembler::Success;
|
||||
}
|
||||
|
||||
static DecodeStatus DecodeSpecial3LlSc(MCInst &Inst,
|
||||
unsigned Insn,
|
||||
uint64_t Address,
|
||||
@ -1225,7 +1291,7 @@ static DecodeStatus DecodeBranchTarget(MCInst &Inst,
|
||||
unsigned Offset,
|
||||
uint64_t Address,
|
||||
const void *Decoder) {
|
||||
int32_t BranchOffset = (SignExtend32<16>(Offset) << 2) + 4;
|
||||
int32_t BranchOffset = (SignExtend32<16>(Offset) * 4) + 4;
|
||||
Inst.addOperand(MCOperand::CreateImm(BranchOffset));
|
||||
return MCDisassembler::Success;
|
||||
}
|
||||
@ -1244,7 +1310,7 @@ static DecodeStatus DecodeBranchTarget21(MCInst &Inst,
|
||||
unsigned Offset,
|
||||
uint64_t Address,
|
||||
const void *Decoder) {
|
||||
int32_t BranchOffset = SignExtend32<21>(Offset) << 2;
|
||||
int32_t BranchOffset = SignExtend32<21>(Offset) * 4;
|
||||
|
||||
Inst.addOperand(MCOperand::CreateImm(BranchOffset));
|
||||
return MCDisassembler::Success;
|
||||
@ -1254,7 +1320,7 @@ static DecodeStatus DecodeBranchTarget26(MCInst &Inst,
|
||||
unsigned Offset,
|
||||
uint64_t Address,
|
||||
const void *Decoder) {
|
||||
int32_t BranchOffset = SignExtend32<26>(Offset) << 2;
|
||||
int32_t BranchOffset = SignExtend32<26>(Offset) * 4;
|
||||
|
||||
Inst.addOperand(MCOperand::CreateImm(BranchOffset));
|
||||
return MCDisassembler::Success;
|
||||
@ -1264,7 +1330,7 @@ static DecodeStatus DecodeBranchTargetMM(MCInst &Inst,
|
||||
unsigned Offset,
|
||||
uint64_t Address,
|
||||
const void *Decoder) {
|
||||
int32_t BranchOffset = SignExtend32<16>(Offset) << 1;
|
||||
int32_t BranchOffset = SignExtend32<16>(Offset) * 2;
|
||||
Inst.addOperand(MCOperand::CreateImm(BranchOffset));
|
||||
return MCDisassembler::Success;
|
||||
}
|
||||
@ -1317,12 +1383,12 @@ static DecodeStatus DecodeExtSize(MCInst &Inst,
|
||||
|
||||
static DecodeStatus DecodeSimm19Lsl2(MCInst &Inst, unsigned Insn,
|
||||
uint64_t Address, const void *Decoder) {
|
||||
Inst.addOperand(MCOperand::CreateImm(SignExtend32<19>(Insn) << 2));
|
||||
Inst.addOperand(MCOperand::CreateImm(SignExtend32<19>(Insn) * 4));
|
||||
return MCDisassembler::Success;
|
||||
}
|
||||
|
||||
static DecodeStatus DecodeSimm18Lsl3(MCInst &Inst, unsigned Insn,
|
||||
uint64_t Address, const void *Decoder) {
|
||||
Inst.addOperand(MCOperand::CreateImm(SignExtend32<18>(Insn) << 3));
|
||||
Inst.addOperand(MCOperand::CreateImm(SignExtend32<18>(Insn) * 8));
|
||||
return MCDisassembler::Success;
|
||||
}
|
||||
|
@ -57,6 +57,8 @@ def MipsInstrInfo : InstrInfo;
|
||||
// Mips Subtarget features //
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
def FeatureNoABICalls : SubtargetFeature<"noabicalls", "NoABICalls", "true",
|
||||
"Disable SVR4-style position-independent code.">;
|
||||
def FeatureGP64Bit : SubtargetFeature<"gp64", "IsGP64bit", "true",
|
||||
"General Purpose Registers are 64-bit wide.">;
|
||||
def FeatureFP64Bit : SubtargetFeature<"fp64", "IsFP64bit", "true",
|
||||
@ -67,13 +69,13 @@ def FeatureNaN2008 : SubtargetFeature<"nan2008", "IsNaN2008bit", "true",
|
||||
"IEEE 754-2008 NaN encoding.">;
|
||||
def FeatureSingleFloat : SubtargetFeature<"single-float", "IsSingleFloat",
|
||||
"true", "Only supports single precision float">;
|
||||
def FeatureO32 : SubtargetFeature<"o32", "MipsABI", "O32",
|
||||
def FeatureO32 : SubtargetFeature<"o32", "ABI", "MipsABIInfo::O32()",
|
||||
"Enable o32 ABI">;
|
||||
def FeatureN32 : SubtargetFeature<"n32", "MipsABI", "N32",
|
||||
def FeatureN32 : SubtargetFeature<"n32", "ABI", "MipsABIInfo::N32()",
|
||||
"Enable n32 ABI">;
|
||||
def FeatureN64 : SubtargetFeature<"n64", "MipsABI", "N64",
|
||||
def FeatureN64 : SubtargetFeature<"n64", "ABI", "MipsABIInfo::N64()",
|
||||
"Enable n64 ABI">;
|
||||
def FeatureEABI : SubtargetFeature<"eabi", "MipsABI", "EABI",
|
||||
def FeatureEABI : SubtargetFeature<"eabi", "ABI", "MipsABIInfo::EABI()",
|
||||
"Enable eabi ABI">;
|
||||
def FeatureNoOddSPReg : SubtargetFeature<"nooddspreg", "UseOddSPReg", "false",
|
||||
"Disable odd numbered single-precision "
|
||||
|
@ -241,10 +241,9 @@ Mips16TargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
|
||||
}
|
||||
}
|
||||
|
||||
bool Mips16TargetLowering::
|
||||
isEligibleForTailCallOptimization(const MipsCC &MipsCCInfo,
|
||||
unsigned NextStackOffset,
|
||||
const MipsFunctionInfo& FI) const {
|
||||
bool Mips16TargetLowering::isEligibleForTailCallOptimization(
|
||||
const CCState &CCInfo, unsigned NextStackOffset,
|
||||
const MipsFunctionInfo &FI) const {
|
||||
// No tail call optimization for mips16.
|
||||
return false;
|
||||
}
|
||||
|
@ -30,9 +30,9 @@ namespace llvm {
|
||||
MachineBasicBlock *MBB) const override;
|
||||
|
||||
private:
|
||||
bool isEligibleForTailCallOptimization(const MipsCC &MipsCCInfo,
|
||||
unsigned NextStackOffset,
|
||||
const MipsFunctionInfo& FI) const override;
|
||||
bool isEligibleForTailCallOptimization(
|
||||
const CCState &CCInfo, unsigned NextStackOffset,
|
||||
const MipsFunctionInfo &FI) const override;
|
||||
|
||||
void setMips16HardFloatLibCalls();
|
||||
|
||||
|
@ -419,6 +419,10 @@ defm : SetgePats<GPR64, SLT64, SLTu64>;
|
||||
defm : SetgeImmPats<GPR64, SLTi64, SLTiu64>;
|
||||
|
||||
// truncate
|
||||
def : MipsPat<(trunc (assertsext GPR64:$src)),
|
||||
(EXTRACT_SUBREG GPR64:$src, sub_32)>;
|
||||
def : MipsPat<(trunc (assertzext GPR64:$src)),
|
||||
(EXTRACT_SUBREG GPR64:$src, sub_32)>;
|
||||
def : MipsPat<(i32 (trunc GPR64:$src)),
|
||||
(SLL (EXTRACT_SUBREG GPR64:$src, sub_32), 0)>;
|
||||
|
||||
|
45
contrib/llvm/lib/Target/Mips/MipsABIInfo.cpp
Normal file
45
contrib/llvm/lib/Target/Mips/MipsABIInfo.cpp
Normal file
@ -0,0 +1,45 @@
|
||||
//===---- MipsABIInfo.cpp - Information about MIPS ABI's ------------------===//
|
||||
//
|
||||
// The LLVM Compiler Infrastructure
|
||||
//
|
||||
// This file is distributed under the University of Illinois Open Source
|
||||
// License. See LICENSE.TXT for details.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#include "MipsABIInfo.h"
|
||||
#include "MipsRegisterInfo.h"
|
||||
|
||||
using namespace llvm;
|
||||
|
||||
namespace {
|
||||
static const MCPhysReg O32IntRegs[4] = {Mips::A0, Mips::A1, Mips::A2, Mips::A3};
|
||||
|
||||
static const MCPhysReg Mips64IntRegs[8] = {
|
||||
Mips::A0_64, Mips::A1_64, Mips::A2_64, Mips::A3_64,
|
||||
Mips::T0_64, Mips::T1_64, Mips::T2_64, Mips::T3_64};
|
||||
}
|
||||
|
||||
const ArrayRef<MCPhysReg> MipsABIInfo::GetByValArgRegs() const {
|
||||
if (IsO32())
|
||||
return makeArrayRef(O32IntRegs);
|
||||
if (IsN32() || IsN64())
|
||||
return makeArrayRef(Mips64IntRegs);
|
||||
llvm_unreachable("Unhandled ABI");
|
||||
}
|
||||
|
||||
const ArrayRef<MCPhysReg> MipsABIInfo::GetVarArgRegs() const {
|
||||
if (IsO32())
|
||||
return makeArrayRef(O32IntRegs);
|
||||
if (IsN32() || IsN64())
|
||||
return makeArrayRef(Mips64IntRegs);
|
||||
llvm_unreachable("Unhandled ABI");
|
||||
}
|
||||
|
||||
unsigned MipsABIInfo::GetCalleeAllocdArgSizeInBytes(CallingConv::ID CC) const {
|
||||
if (IsO32())
|
||||
return CC != CallingConv::Fast ? 16 : 0;
|
||||
if (IsN32() || IsN64() || IsEABI())
|
||||
return 0;
|
||||
llvm_unreachable("Unhandled ABI");
|
||||
}
|
61
contrib/llvm/lib/Target/Mips/MipsABIInfo.h
Normal file
61
contrib/llvm/lib/Target/Mips/MipsABIInfo.h
Normal file
@ -0,0 +1,61 @@
|
||||
//===---- MipsABIInfo.h - Information about MIPS ABI's --------------------===//
|
||||
//
|
||||
// The LLVM Compiler Infrastructure
|
||||
//
|
||||
// This file is distributed under the University of Illinois Open Source
|
||||
// License. See LICENSE.TXT for details.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#ifndef MIPSABIINFO_H
|
||||
#define MIPSABIINFO_H
|
||||
|
||||
#include "llvm/ADT/ArrayRef.h"
|
||||
#include "llvm/MC/MCRegisterInfo.h"
|
||||
#include "llvm/IR/CallingConv.h"
|
||||
|
||||
namespace llvm {
|
||||
|
||||
class MipsABIInfo {
|
||||
public:
|
||||
enum class ABI { Unknown, O32, N32, N64, EABI };
|
||||
|
||||
protected:
|
||||
ABI ThisABI;
|
||||
|
||||
public:
|
||||
MipsABIInfo(ABI ThisABI) : ThisABI(ThisABI) {}
|
||||
|
||||
static MipsABIInfo Unknown() { return MipsABIInfo(ABI::Unknown); }
|
||||
static MipsABIInfo O32() { return MipsABIInfo(ABI::O32); }
|
||||
static MipsABIInfo N32() { return MipsABIInfo(ABI::N32); }
|
||||
static MipsABIInfo N64() { return MipsABIInfo(ABI::N64); }
|
||||
static MipsABIInfo EABI() { return MipsABIInfo(ABI::EABI); }
|
||||
|
||||
bool IsKnown() const { return ThisABI != ABI::Unknown; }
|
||||
bool IsO32() const { return ThisABI == ABI::O32; }
|
||||
bool IsN32() const { return ThisABI == ABI::N32; }
|
||||
bool IsN64() const { return ThisABI == ABI::N64; }
|
||||
bool IsEABI() const { return ThisABI == ABI::EABI; }
|
||||
ABI GetEnumValue() const { return ThisABI; }
|
||||
|
||||
/// The registers to use for byval arguments.
|
||||
const ArrayRef<MCPhysReg> GetByValArgRegs() const;
|
||||
|
||||
/// The registers to use for the variable argument list.
|
||||
const ArrayRef<MCPhysReg> GetVarArgRegs() const;
|
||||
|
||||
/// Obtain the size of the area allocated by the callee for arguments.
|
||||
/// CallingConv::FastCall affects the value for O32.
|
||||
unsigned GetCalleeAllocdArgSizeInBytes(CallingConv::ID CC) const;
|
||||
|
||||
/// Ordering of ABI's
|
||||
/// MipsGenSubtargetInfo.inc will use this to resolve conflicts when given
|
||||
/// multiple ABI options.
|
||||
bool operator<(const MipsABIInfo Other) const {
|
||||
return ThisABI < Other.GetEnumValue();
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
#endif
|
@ -317,11 +317,11 @@ void MipsAsmPrinter::emitFrameDirective() {
|
||||
|
||||
/// Emit Set directives.
|
||||
const char *MipsAsmPrinter::getCurrentABIString() const {
|
||||
switch (Subtarget->getTargetABI()) {
|
||||
case MipsSubtarget::O32: return "abi32";
|
||||
case MipsSubtarget::N32: return "abiN32";
|
||||
case MipsSubtarget::N64: return "abi64";
|
||||
case MipsSubtarget::EABI: return "eabi32"; // TODO: handle eabi64
|
||||
switch (Subtarget->getABI().GetEnumValue()) {
|
||||
case MipsABIInfo::ABI::O32: return "abi32";
|
||||
case MipsABIInfo::ABI::N32: return "abiN32";
|
||||
case MipsABIInfo::ABI::N64: return "abi64";
|
||||
case MipsABIInfo::ABI::EABI: return "eabi32"; // TODO: handle eabi64
|
||||
default: llvm_unreachable("Unknown Mips ABI");
|
||||
}
|
||||
}
|
||||
@ -471,14 +471,12 @@ bool MipsAsmPrinter::PrintAsmOperand(const MachineInstr *MI, unsigned OpNum,
|
||||
return false;
|
||||
case 'z': {
|
||||
// $0 if zero, regular printing otherwise
|
||||
if (MO.getType() != MachineOperand::MO_Immediate)
|
||||
return true;
|
||||
int64_t Val = MO.getImm();
|
||||
if (Val)
|
||||
O << Val;
|
||||
else
|
||||
if (MO.getType() == MachineOperand::MO_Immediate && MO.getImm() == 0) {
|
||||
O << "$0";
|
||||
return false;
|
||||
return false;
|
||||
}
|
||||
// If not, call printOperand as normal.
|
||||
break;
|
||||
}
|
||||
case 'D': // Second part of a double word register operand
|
||||
case 'L': // Low order register of a double word register operand
|
||||
@ -669,9 +667,7 @@ printFCCOperand(const MachineInstr *MI, int opNum, raw_ostream &O,
|
||||
}
|
||||
|
||||
void MipsAsmPrinter::EmitStartOfAsmFile(Module &M) {
|
||||
// TODO: Need to add -mabicalls and -mno-abicalls flags.
|
||||
// Currently we assume that -mabicalls is the default.
|
||||
bool IsABICalls = true;
|
||||
bool IsABICalls = Subtarget->isABICalls();
|
||||
if (IsABICalls) {
|
||||
getTargetStreamer().emitDirectiveAbiCalls();
|
||||
Reloc::Model RM = TM.getRelocationModel();
|
||||
|
142
contrib/llvm/lib/Target/Mips/MipsCCState.cpp
Normal file
142
contrib/llvm/lib/Target/Mips/MipsCCState.cpp
Normal file
@ -0,0 +1,142 @@
|
||||
//===---- MipsCCState.cpp - CCState with Mips specific extensions ---------===//
|
||||
//
|
||||
// The LLVM Compiler Infrastructure
|
||||
//
|
||||
// This file is distributed under the University of Illinois Open Source
|
||||
// License. See LICENSE.TXT for details.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#include "MipsCCState.h"
|
||||
#include "MipsSubtarget.h"
|
||||
#include "llvm/IR/Module.h"
|
||||
|
||||
using namespace llvm;
|
||||
|
||||
/// This function returns true if CallSym is a long double emulation routine.
|
||||
static bool isF128SoftLibCall(const char *CallSym) {
|
||||
const char *const LibCalls[] = {
|
||||
"__addtf3", "__divtf3", "__eqtf2", "__extenddftf2",
|
||||
"__extendsftf2", "__fixtfdi", "__fixtfsi", "__fixtfti",
|
||||
"__fixunstfdi", "__fixunstfsi", "__fixunstfti", "__floatditf",
|
||||
"__floatsitf", "__floattitf", "__floatunditf", "__floatunsitf",
|
||||
"__floatuntitf", "__getf2", "__gttf2", "__letf2",
|
||||
"__lttf2", "__multf3", "__netf2", "__powitf2",
|
||||
"__subtf3", "__trunctfdf2", "__trunctfsf2", "__unordtf2",
|
||||
"ceill", "copysignl", "cosl", "exp2l",
|
||||
"expl", "floorl", "fmal", "fmodl",
|
||||
"log10l", "log2l", "logl", "nearbyintl",
|
||||
"powl", "rintl", "sinl", "sqrtl",
|
||||
"truncl"};
|
||||
|
||||
const char *const *End = LibCalls + array_lengthof(LibCalls);
|
||||
|
||||
// Check that LibCalls is sorted alphabetically.
|
||||
MipsTargetLowering::LTStr Comp;
|
||||
|
||||
#ifndef NDEBUG
|
||||
for (const char *const *I = LibCalls; I < End - 1; ++I)
|
||||
assert(Comp(*I, *(I + 1)));
|
||||
#endif
|
||||
|
||||
return std::binary_search(LibCalls, End, CallSym, Comp);
|
||||
}
|
||||
|
||||
/// This function returns true if Ty is fp128, {f128} or i128 which was
|
||||
/// originally a fp128.
|
||||
static bool originalTypeIsF128(const Type *Ty, const SDNode *CallNode) {
|
||||
if (Ty->isFP128Ty())
|
||||
return true;
|
||||
|
||||
if (Ty->isStructTy() && Ty->getStructNumElements() == 1 &&
|
||||
Ty->getStructElementType(0)->isFP128Ty())
|
||||
return true;
|
||||
|
||||
const ExternalSymbolSDNode *ES =
|
||||
dyn_cast_or_null<const ExternalSymbolSDNode>(CallNode);
|
||||
|
||||
// If the Ty is i128 and the function being called is a long double emulation
|
||||
// routine, then the original type is f128.
|
||||
return (ES && Ty->isIntegerTy(128) && isF128SoftLibCall(ES->getSymbol()));
|
||||
}
|
||||
|
||||
MipsCCState::SpecialCallingConvType
|
||||
MipsCCState::getSpecialCallingConvForCallee(const SDNode *Callee,
|
||||
const MipsSubtarget &Subtarget) {
|
||||
MipsCCState::SpecialCallingConvType SpecialCallingConv = NoSpecialCallingConv;
|
||||
if (Subtarget.inMips16HardFloat()) {
|
||||
if (const GlobalAddressSDNode *G =
|
||||
dyn_cast<const GlobalAddressSDNode>(Callee)) {
|
||||
llvm::StringRef Sym = G->getGlobal()->getName();
|
||||
Function *F = G->getGlobal()->getParent()->getFunction(Sym);
|
||||
if (F && F->hasFnAttribute("__Mips16RetHelper")) {
|
||||
SpecialCallingConv = Mips16RetHelperConv;
|
||||
}
|
||||
}
|
||||
}
|
||||
return SpecialCallingConv;
|
||||
}
|
||||
|
||||
void MipsCCState::PreAnalyzeCallResultForF128(
|
||||
const SmallVectorImpl<ISD::InputArg> &Ins,
|
||||
const TargetLowering::CallLoweringInfo &CLI) {
|
||||
for (unsigned i = 0; i < Ins.size(); ++i) {
|
||||
OriginalArgWasF128.push_back(
|
||||
originalTypeIsF128(CLI.RetTy, CLI.Callee.getNode()));
|
||||
OriginalArgWasFloat.push_back(CLI.RetTy->isFloatingPointTy());
|
||||
}
|
||||
}
|
||||
|
||||
/// Identify lowered values that originated from f128 arguments and record
|
||||
/// this for use by RetCC_MipsN.
|
||||
void MipsCCState::PreAnalyzeReturnForF128(
|
||||
const SmallVectorImpl<ISD::OutputArg> &Outs) {
|
||||
const MachineFunction &MF = getMachineFunction();
|
||||
for (unsigned i = 0; i < Outs.size(); ++i) {
|
||||
OriginalArgWasF128.push_back(
|
||||
originalTypeIsF128(MF.getFunction()->getReturnType(), nullptr));
|
||||
OriginalArgWasFloat.push_back(
|
||||
MF.getFunction()->getReturnType()->isFloatingPointTy());
|
||||
}
|
||||
}
|
||||
|
||||
/// Identify lowered values that originated from f128 arguments and record
|
||||
/// this.
|
||||
void MipsCCState::PreAnalyzeCallOperands(
|
||||
const SmallVectorImpl<ISD::OutputArg> &Outs,
|
||||
std::vector<TargetLowering::ArgListEntry> &FuncArgs,
|
||||
const SDNode *CallNode) {
|
||||
for (unsigned i = 0; i < Outs.size(); ++i) {
|
||||
OriginalArgWasF128.push_back(
|
||||
originalTypeIsF128(FuncArgs[Outs[i].OrigArgIndex].Ty, CallNode));
|
||||
OriginalArgWasFloat.push_back(
|
||||
FuncArgs[Outs[i].OrigArgIndex].Ty->isFloatingPointTy());
|
||||
CallOperandIsFixed.push_back(Outs[i].IsFixed);
|
||||
}
|
||||
}
|
||||
|
||||
/// Identify lowered values that originated from f128 arguments and record
|
||||
/// this.
|
||||
void MipsCCState::PreAnalyzeFormalArgumentsForF128(
|
||||
const SmallVectorImpl<ISD::InputArg> &Ins) {
|
||||
const MachineFunction &MF = getMachineFunction();
|
||||
for (unsigned i = 0; i < Ins.size(); ++i) {
|
||||
Function::const_arg_iterator FuncArg = MF.getFunction()->arg_begin();
|
||||
|
||||
// SRet arguments cannot originate from f128 or {f128} returns so we just
|
||||
// push false. We have to handle this specially since SRet arguments
|
||||
// aren't mapped to an original argument.
|
||||
if (Ins[i].Flags.isSRet()) {
|
||||
OriginalArgWasF128.push_back(false);
|
||||
OriginalArgWasFloat.push_back(false);
|
||||
continue;
|
||||
}
|
||||
|
||||
assert(Ins[i].OrigArgIndex < MF.getFunction()->arg_size());
|
||||
std::advance(FuncArg, Ins[i].OrigArgIndex);
|
||||
|
||||
OriginalArgWasF128.push_back(
|
||||
originalTypeIsF128(FuncArg->getType(), nullptr));
|
||||
OriginalArgWasFloat.push_back(FuncArg->getType()->isFloatingPointTy());
|
||||
}
|
||||
}
|
137
contrib/llvm/lib/Target/Mips/MipsCCState.h
Normal file
137
contrib/llvm/lib/Target/Mips/MipsCCState.h
Normal file
@ -0,0 +1,137 @@
|
||||
//===---- MipsCCState.h - CCState with Mips specific extensions -----------===//
|
||||
//
|
||||
// The LLVM Compiler Infrastructure
|
||||
//
|
||||
// This file is distributed under the University of Illinois Open Source
|
||||
// License. See LICENSE.TXT for details.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#ifndef MIPSCCSTATE_H
|
||||
#define MIPSCCSTATE_H
|
||||
|
||||
#include "llvm/ADT/SmallVector.h"
|
||||
#include "llvm/CodeGen/CallingConvLower.h"
|
||||
#include "MipsISelLowering.h"
|
||||
|
||||
namespace llvm {
|
||||
class SDNode;
|
||||
class MipsSubtarget;
|
||||
|
||||
class MipsCCState : public CCState {
|
||||
public:
|
||||
enum SpecialCallingConvType { Mips16RetHelperConv, NoSpecialCallingConv };
|
||||
|
||||
/// Determine the SpecialCallingConvType for the given callee
|
||||
static SpecialCallingConvType
|
||||
getSpecialCallingConvForCallee(const SDNode *Callee,
|
||||
const MipsSubtarget &Subtarget);
|
||||
|
||||
private:
|
||||
/// Identify lowered values that originated from f128 arguments and record
|
||||
/// this for use by RetCC_MipsN.
|
||||
void PreAnalyzeCallResultForF128(const SmallVectorImpl<ISD::InputArg> &Ins,
|
||||
const TargetLowering::CallLoweringInfo &CLI);
|
||||
|
||||
/// Identify lowered values that originated from f128 arguments and record
|
||||
/// this for use by RetCC_MipsN.
|
||||
void PreAnalyzeReturnForF128(const SmallVectorImpl<ISD::OutputArg> &Outs);
|
||||
|
||||
/// Identify lowered values that originated from f128 arguments and record
|
||||
/// this.
|
||||
void
|
||||
PreAnalyzeCallOperands(const SmallVectorImpl<ISD::OutputArg> &Outs,
|
||||
std::vector<TargetLowering::ArgListEntry> &FuncArgs,
|
||||
const SDNode *CallNode);
|
||||
|
||||
/// Identify lowered values that originated from f128 arguments and record
|
||||
/// this.
|
||||
void
|
||||
PreAnalyzeFormalArgumentsForF128(const SmallVectorImpl<ISD::InputArg> &Ins);
|
||||
|
||||
/// Records whether the value has been lowered from an f128.
|
||||
SmallVector<bool, 4> OriginalArgWasF128;
|
||||
|
||||
/// Records whether the value has been lowered from float.
|
||||
SmallVector<bool, 4> OriginalArgWasFloat;
|
||||
|
||||
/// Records whether the value was a fixed argument.
|
||||
/// See ISD::OutputArg::IsFixed,
|
||||
SmallVector<bool, 4> CallOperandIsFixed;
|
||||
|
||||
// Used to handle MIPS16-specific calling convention tweaks.
|
||||
// FIXME: This should probably be a fully fledged calling convention.
|
||||
SpecialCallingConvType SpecialCallingConv;
|
||||
|
||||
public:
|
||||
MipsCCState(CallingConv::ID CC, bool isVarArg, MachineFunction &MF,
|
||||
const TargetMachine &TM, SmallVectorImpl<CCValAssign> &locs,
|
||||
LLVMContext &C,
|
||||
SpecialCallingConvType SpecialCC = NoSpecialCallingConv)
|
||||
: CCState(CC, isVarArg, MF, TM, locs, C), SpecialCallingConv(SpecialCC) {}
|
||||
|
||||
void
|
||||
AnalyzeCallOperands(const SmallVectorImpl<ISD::OutputArg> &Outs,
|
||||
CCAssignFn Fn,
|
||||
std::vector<TargetLowering::ArgListEntry> &FuncArgs,
|
||||
const SDNode *CallNode) {
|
||||
PreAnalyzeCallOperands(Outs, FuncArgs, CallNode);
|
||||
CCState::AnalyzeCallOperands(Outs, Fn);
|
||||
OriginalArgWasF128.clear();
|
||||
OriginalArgWasFloat.clear();
|
||||
CallOperandIsFixed.clear();
|
||||
}
|
||||
|
||||
// The AnalyzeCallOperands in the base class is not usable since we must
|
||||
// provide a means of accessing ArgListEntry::IsFixed. Delete them from this
|
||||
// class. This doesn't stop them being used via the base class though.
|
||||
void AnalyzeCallOperands(const SmallVectorImpl<ISD::OutputArg> &Outs,
|
||||
CCAssignFn Fn) LLVM_DELETED_FUNCTION;
|
||||
void AnalyzeCallOperands(const SmallVectorImpl<MVT> &Outs,
|
||||
SmallVectorImpl<ISD::ArgFlagsTy> &Flags,
|
||||
CCAssignFn Fn) LLVM_DELETED_FUNCTION;
|
||||
|
||||
void AnalyzeFormalArguments(const SmallVectorImpl<ISD::InputArg> &Ins,
|
||||
CCAssignFn Fn) {
|
||||
PreAnalyzeFormalArgumentsForF128(Ins);
|
||||
CCState::AnalyzeFormalArguments(Ins, Fn);
|
||||
OriginalArgWasFloat.clear();
|
||||
OriginalArgWasF128.clear();
|
||||
}
|
||||
|
||||
void AnalyzeCallResult(const SmallVectorImpl<ISD::InputArg> &Ins,
|
||||
CCAssignFn Fn,
|
||||
const TargetLowering::CallLoweringInfo &CLI) {
|
||||
PreAnalyzeCallResultForF128(Ins, CLI);
|
||||
CCState::AnalyzeCallResult(Ins, Fn);
|
||||
OriginalArgWasFloat.clear();
|
||||
OriginalArgWasF128.clear();
|
||||
}
|
||||
|
||||
void AnalyzeReturn(const SmallVectorImpl<ISD::OutputArg> &Outs,
|
||||
CCAssignFn Fn) {
|
||||
PreAnalyzeReturnForF128(Outs);
|
||||
CCState::AnalyzeReturn(Outs, Fn);
|
||||
OriginalArgWasFloat.clear();
|
||||
OriginalArgWasF128.clear();
|
||||
}
|
||||
|
||||
bool CheckReturn(const SmallVectorImpl<ISD::OutputArg> &ArgsFlags,
|
||||
CCAssignFn Fn) {
|
||||
PreAnalyzeReturnForF128(ArgsFlags);
|
||||
bool Return = CCState::CheckReturn(ArgsFlags, Fn);
|
||||
OriginalArgWasFloat.clear();
|
||||
OriginalArgWasF128.clear();
|
||||
return Return;
|
||||
}
|
||||
|
||||
bool WasOriginalArgF128(unsigned ValNo) { return OriginalArgWasF128[ValNo]; }
|
||||
bool WasOriginalArgFloat(unsigned ValNo) {
|
||||
return OriginalArgWasFloat[ValNo];
|
||||
}
|
||||
bool IsCallOperandFixed(unsigned ValNo) { return CallOperandIsFixed[ValNo]; }
|
||||
SpecialCallingConvType getSpecialCallingConv() { return SpecialCallingConv; }
|
||||
};
|
||||
}
|
||||
|
||||
#endif
|
@ -10,8 +10,42 @@
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
/// CCIfSubtarget - Match if the current subtarget has a feature F.
|
||||
class CCIfSubtarget<string F, CCAction A>:
|
||||
CCIf<!strconcat("State.getTarget().getSubtarget<MipsSubtarget>().", F), A>;
|
||||
class CCIfSubtarget<string F, CCAction A, string Invert = "">
|
||||
: CCIf<!strconcat(Invert,
|
||||
"State.getMachineFunction().getTarget()."
|
||||
"getSubtarget<const MipsSubtarget>().",
|
||||
F),
|
||||
A>;
|
||||
|
||||
// The inverse of CCIfSubtarget
|
||||
class CCIfSubtargetNot<string F, CCAction A> : CCIfSubtarget<F, A, "!">;
|
||||
|
||||
// For soft-float, f128 values are returned in A0_64 rather than V1_64.
|
||||
def RetCC_F128SoftFloat : CallingConv<[
|
||||
CCAssignToReg<[V0_64, A0_64]>
|
||||
]>;
|
||||
|
||||
// For hard-float, f128 values are returned as a pair of f64's rather than a
|
||||
// pair of i64's.
|
||||
def RetCC_F128HardFloat : CallingConv<[
|
||||
CCBitConvertToType<f64>,
|
||||
|
||||
// Contrary to the ABI documentation, a struct containing a long double is
|
||||
// returned in $f0, and $f1 instead of the usual $f0, and $f2. This is to
|
||||
// match the de facto ABI as implemented by GCC.
|
||||
CCIfInReg<CCAssignToReg<[D0_64, D1_64]>>,
|
||||
|
||||
CCAssignToReg<[D0_64, D2_64]>
|
||||
]>;
|
||||
|
||||
// Handle F128 specially since we can't identify the original type during the
|
||||
// tablegen-erated code.
|
||||
def RetCC_F128 : CallingConv<[
|
||||
CCIfSubtarget<"abiUsesSoftFloat()",
|
||||
CCIfType<[i64], CCDelegateTo<RetCC_F128SoftFloat>>>,
|
||||
CCIfSubtargetNot<"abiUsesSoftFloat()",
|
||||
CCIfType<[i64], CCDelegateTo<RetCC_F128HardFloat>>>
|
||||
]>;
|
||||
|
||||
//===----------------------------------------------------------------------===//
|
||||
// Mips O32 Calling Convention
|
||||
@ -29,23 +63,43 @@ def RetCC_MipsO32 : CallingConv<[
|
||||
// f64 arguments are returned in D0_64 and D2_64 in FP64bit mode or
|
||||
// in D0 and D1 in FP32bit mode.
|
||||
CCIfType<[f64], CCIfSubtarget<"isFP64bit()", CCAssignToReg<[D0_64, D2_64]>>>,
|
||||
CCIfType<[f64], CCIfSubtarget<"isNotFP64bit()", CCAssignToReg<[D0, D1]>>>
|
||||
CCIfType<[f64], CCIfSubtargetNot<"isFP64bit()", CCAssignToReg<[D0, D1]>>>
|
||||
]>;
|
||||
|
||||
def CC_MipsO32_FP32 : CustomCallingConv;
|
||||
def CC_MipsO32_FP64 : CustomCallingConv;
|
||||
|
||||
def CC_MipsO32_FP : CallingConv<[
|
||||
CCIfSubtargetNot<"isFP64bit()", CCDelegateTo<CC_MipsO32_FP32>>,
|
||||
CCIfSubtarget<"isFP64bit()", CCDelegateTo<CC_MipsO32_FP64>>
|
||||
]>;
|
||||
|
||||
//===----------------------------------------------------------------------===//
|
||||
// Mips N32/64 Calling Convention
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
def CC_MipsN_SoftFloat : CallingConv<[
|
||||
CCAssignToRegWithShadow<[A0, A1, A2, A3,
|
||||
T0, T1, T2, T3],
|
||||
[D12_64, D13_64, D14_64, D15_64,
|
||||
D16_64, D17_64, D18_64, D19_64]>,
|
||||
CCAssignToStack<4, 8>
|
||||
]>;
|
||||
|
||||
def CC_MipsN : CallingConv<[
|
||||
// Promote i8/i16 arguments to i32.
|
||||
CCIfType<[i8, i16], CCPromoteToType<i32>>,
|
||||
CCIfType<[i8, i16, i32],
|
||||
CCIfSubtargetNot<"isLittle()",
|
||||
CCIfInReg<CCPromoteToUpperBitsInType<i64>>>>,
|
||||
|
||||
// All integers (except soft-float integers) are promoted to 64-bit.
|
||||
CCIfType<[i8, i16, i32],
|
||||
CCIf<"!static_cast<MipsCCState *>(&State)->WasOriginalArgFloat(ValNo)",
|
||||
CCPromoteToType<i64>>>,
|
||||
|
||||
// The only i32's we have left are soft-float arguments.
|
||||
CCIfSubtarget<"abiUsesSoftFloat()", CCIfType<[i32], CCDelegateTo<CC_MipsN_SoftFloat>>>,
|
||||
|
||||
// Integer arguments are passed in integer registers.
|
||||
CCIfType<[i32], CCAssignToRegWithShadow<[A0, A1, A2, A3,
|
||||
T0, T1, T2, T3],
|
||||
[F12, F13, F14, F15,
|
||||
F16, F17, F18, F19]>>,
|
||||
|
||||
CCIfType<[i64], CCAssignToRegWithShadow<[A0_64, A1_64, A2_64, A3_64,
|
||||
T0_64, T1_64, T2_64, T3_64],
|
||||
[D12_64, D13_64, D14_64, D15_64,
|
||||
@ -64,29 +118,49 @@ def CC_MipsN : CallingConv<[
|
||||
T0_64, T1_64, T2_64, T3_64]>>,
|
||||
|
||||
// All stack parameter slots become 64-bit doublewords and are 8-byte aligned.
|
||||
CCIfType<[i32, f32], CCAssignToStack<4, 8>>,
|
||||
CCIfType<[f32], CCAssignToStack<4, 8>>,
|
||||
CCIfType<[i64, f64], CCAssignToStack<8, 8>>
|
||||
]>;
|
||||
|
||||
// N32/64 variable arguments.
|
||||
// All arguments are passed in integer registers.
|
||||
def CC_MipsN_VarArg : CallingConv<[
|
||||
// Promote i8/i16 arguments to i32.
|
||||
CCIfType<[i8, i16], CCPromoteToType<i32>>,
|
||||
// All integers are promoted to 64-bit.
|
||||
CCIfType<[i8, i16, i32], CCPromoteToType<i64>>,
|
||||
|
||||
CCIfType<[i32, f32], CCAssignToReg<[A0, A1, A2, A3, T0, T1, T2, T3]>>,
|
||||
CCIfType<[f32], CCAssignToReg<[A0, A1, A2, A3, T0, T1, T2, T3]>>,
|
||||
|
||||
CCIfType<[i64, f64], CCAssignToReg<[A0_64, A1_64, A2_64, A3_64,
|
||||
T0_64, T1_64, T2_64, T3_64]>>,
|
||||
|
||||
// All stack parameter slots become 64-bit doublewords and are 8-byte aligned.
|
||||
CCIfType<[i32, f32], CCAssignToStack<4, 8>>,
|
||||
CCIfType<[f32], CCAssignToStack<4, 8>>,
|
||||
CCIfType<[i64, f64], CCAssignToStack<8, 8>>
|
||||
]>;
|
||||
|
||||
def RetCC_MipsN : CallingConv<[
|
||||
// i32 are returned in registers V0, V1
|
||||
CCIfType<[i32], CCAssignToReg<[V0, V1]>>,
|
||||
// f128 needs to be handled similarly to f32 and f64. However, f128 is not
|
||||
// legal and is lowered to i128 which is further lowered to a pair of i64's.
|
||||
// This presents us with a problem for the calling convention since hard-float
|
||||
// still needs to pass them in FPU registers, and soft-float needs to use $v0,
|
||||
// and $a0 instead of the usual $v0, and $v1. We therefore resort to a
|
||||
// pre-analyze (see PreAnalyzeReturnForF128()) step to pass information on
|
||||
// whether the result was originally an f128 into the tablegen-erated code.
|
||||
//
|
||||
// f128 should only occur for the N64 ABI where long double is 128-bit. On
|
||||
// N32, long double is equivalent to double.
|
||||
CCIfType<[i64],
|
||||
CCIf<"static_cast<MipsCCState *>(&State)->WasOriginalArgF128(ValNo)",
|
||||
CCDelegateTo<RetCC_F128>>>,
|
||||
|
||||
// Aggregate returns are positioned at the lowest address in the slot for
|
||||
// both little and big-endian targets. When passing in registers, this
|
||||
// requires that big-endian targets shift the value into the upper bits.
|
||||
CCIfSubtarget<"isLittle()",
|
||||
CCIfType<[i8, i16, i32, i64], CCIfInReg<CCPromoteToType<i64>>>>,
|
||||
CCIfSubtargetNot<"isLittle()",
|
||||
CCIfType<[i8, i16, i32, i64],
|
||||
CCIfInReg<CCPromoteToUpperBitsInType<i64>>>>,
|
||||
|
||||
// i64 are returned in registers V0_64, V1_64
|
||||
CCIfType<[i64], CCAssignToReg<[V0_64, V1_64]>>,
|
||||
@ -98,12 +172,6 @@ def RetCC_MipsN : CallingConv<[
|
||||
CCIfType<[f64], CCAssignToReg<[D0_64, D2_64]>>
|
||||
]>;
|
||||
|
||||
// In soft-mode, register A0_64, instead of V1_64, is used to return a long
|
||||
// double value.
|
||||
def RetCC_F128Soft : CallingConv<[
|
||||
CCIfType<[i64], CCAssignToReg<[V0_64, A0_64]>>
|
||||
]>;
|
||||
|
||||
//===----------------------------------------------------------------------===//
|
||||
// Mips EABI Calling Convention
|
||||
//===----------------------------------------------------------------------===//
|
||||
@ -119,11 +187,11 @@ def CC_MipsEABI : CallingConv<[
|
||||
CCIfType<[f32], CCIfSubtarget<"isSingleFloat()",
|
||||
CCAssignToReg<[F12, F13, F14, F15, F16, F17, F18, F19]>>>,
|
||||
|
||||
CCIfType<[f32], CCIfSubtarget<"isNotSingleFloat()",
|
||||
CCIfType<[f32], CCIfSubtargetNot<"isSingleFloat()",
|
||||
CCAssignToReg<[F12, F14, F16, F18]>>>,
|
||||
|
||||
// The first 4 double fp arguments are passed in single fp registers.
|
||||
CCIfType<[f64], CCIfSubtarget<"isNotSingleFloat()",
|
||||
CCIfType<[f64], CCIfSubtargetNot<"isSingleFloat()",
|
||||
CCAssignToReg<[D6, D7, D8, D9]>>>,
|
||||
|
||||
// Integer values get stored in stack slots that are 4 bytes in
|
||||
@ -132,7 +200,7 @@ def CC_MipsEABI : CallingConv<[
|
||||
|
||||
// Integer values get stored in stack slots that are 8 bytes in
|
||||
// size and 8-byte aligned.
|
||||
CCIfType<[f64], CCIfSubtarget<"isNotSingleFloat()", CCAssignToStack<8, 8>>>
|
||||
CCIfType<[f64], CCIfSubtargetNot<"isSingleFloat()", CCAssignToStack<8, 8>>>
|
||||
]>;
|
||||
|
||||
def RetCC_MipsEABI : CallingConv<[
|
||||
@ -143,7 +211,7 @@ def RetCC_MipsEABI : CallingConv<[
|
||||
CCIfType<[f32], CCAssignToReg<[F0, F1]>>,
|
||||
|
||||
// f64 are returned in register D0
|
||||
CCIfType<[f64], CCIfSubtarget<"isNotSingleFloat()", CCAssignToReg<[D0]>>>
|
||||
CCIfType<[f64], CCIfSubtargetNot<"isSingleFloat()", CCAssignToReg<[D0]>>>
|
||||
]>;
|
||||
|
||||
//===----------------------------------------------------------------------===//
|
||||
@ -151,16 +219,20 @@ def RetCC_MipsEABI : CallingConv<[
|
||||
//===----------------------------------------------------------------------===//
|
||||
def CC_MipsO32_FastCC : CallingConv<[
|
||||
// f64 arguments are passed in double-precision floating pointer registers.
|
||||
CCIfType<[f64], CCIfSubtarget<"isNotFP64bit()",
|
||||
CCAssignToReg<[D0, D1, D2, D3, D4, D5, D6, D7,
|
||||
D8, D9]>>>,
|
||||
CCIfType<[f64], CCIfSubtarget<"isFP64bit()",
|
||||
CCIfType<[f64], CCIfSubtargetNot<"isFP64bit()",
|
||||
CCAssignToReg<[D0, D1, D2, D3, D4, D5, D6,
|
||||
D7, D8, D9]>>>,
|
||||
CCIfType<[f64], CCIfSubtarget<"isFP64bit()", CCIfSubtarget<"useOddSPReg()",
|
||||
CCAssignToReg<[D0_64, D1_64, D2_64, D3_64,
|
||||
D4_64, D5_64, D6_64, D7_64,
|
||||
D8_64, D9_64, D10_64, D11_64,
|
||||
D12_64, D13_64, D14_64, D15_64,
|
||||
D16_64, D17_64, D18_64,
|
||||
D19_64]>>>,
|
||||
D19_64]>>>>,
|
||||
CCIfType<[f64], CCIfSubtarget<"isFP64bit()", CCIfSubtarget<"noOddSPReg()",
|
||||
CCAssignToReg<[D0_64, D2_64, D4_64, D6_64,
|
||||
D8_64, D10_64, D12_64, D14_64,
|
||||
D16_64, D18_64]>>>>,
|
||||
|
||||
// Stack parameter slots for f64 are 64-bit doublewords and 8-byte aligned.
|
||||
CCIfType<[f64], CCAssignToStack<8, 8>>
|
||||
@ -192,7 +264,7 @@ def CC_Mips_FastCC : CallingConv<[
|
||||
|
||||
// Integer arguments are passed in integer registers. All scratch registers,
|
||||
// except for AT, V0 and T9, are available to be used as argument registers.
|
||||
CCIfType<[i32], CCIfSubtarget<"isNotTargetNaCl()",
|
||||
CCIfType<[i32], CCIfSubtargetNot<"isTargetNaCl()",
|
||||
CCAssignToReg<[A0, A1, A2, A3, T0, T1, T2, T3, T4, T5, T6, T7, T8, V1]>>>,
|
||||
|
||||
// In NaCl, T6, T7 and T8 are reserved and not available as argument
|
||||
@ -219,13 +291,6 @@ def CC_Mips_FastCC : CallingConv<[
|
||||
CCDelegateTo<CC_MipsN_FastCC>
|
||||
]>;
|
||||
|
||||
//==
|
||||
|
||||
def CC_Mips16RetHelper : CallingConv<[
|
||||
// Integer arguments are passed in integer registers.
|
||||
CCIfType<[i32], CCAssignToReg<[V0, V1, A0, A1]>>
|
||||
]>;
|
||||
|
||||
//===----------------------------------------------------------------------===//
|
||||
// Mips Calling Convention Dispatch
|
||||
//===----------------------------------------------------------------------===//
|
||||
@ -237,6 +302,66 @@ def RetCC_Mips : CallingConv<[
|
||||
CCDelegateTo<RetCC_MipsO32>
|
||||
]>;
|
||||
|
||||
def CC_Mips_ByVal : CallingConv<[
|
||||
CCIfSubtarget<"isABI_O32()", CCIfByVal<CCPassByVal<4, 4>>>,
|
||||
CCIfByVal<CCPassByVal<8, 8>>
|
||||
]>;
|
||||
|
||||
def CC_Mips16RetHelper : CallingConv<[
|
||||
CCIfByVal<CCDelegateTo<CC_Mips_ByVal>>,
|
||||
|
||||
// Integer arguments are passed in integer registers.
|
||||
CCIfType<[i32], CCAssignToReg<[V0, V1, A0, A1]>>
|
||||
]>;
|
||||
|
||||
def CC_Mips_FixedArg : CallingConv<[
|
||||
// Mips16 needs special handling on some functions.
|
||||
CCIf<"State.getCallingConv() != CallingConv::Fast",
|
||||
CCIf<"static_cast<MipsCCState *>(&State)->getSpecialCallingConv() == "
|
||||
"MipsCCState::Mips16RetHelperConv",
|
||||
CCDelegateTo<CC_Mips16RetHelper>>>,
|
||||
|
||||
CCIfByVal<CCDelegateTo<CC_Mips_ByVal>>,
|
||||
|
||||
// f128 needs to be handled similarly to f32 and f64 on hard-float. However,
|
||||
// f128 is not legal and is lowered to i128 which is further lowered to a pair
|
||||
// of i64's.
|
||||
// This presents us with a problem for the calling convention since hard-float
|
||||
// still needs to pass them in FPU registers. We therefore resort to a
|
||||
// pre-analyze (see PreAnalyzeFormalArgsForF128()) step to pass information on
|
||||
// whether the argument was originally an f128 into the tablegen-erated code.
|
||||
//
|
||||
// f128 should only occur for the N64 ABI where long double is 128-bit. On
|
||||
// N32, long double is equivalent to double.
|
||||
CCIfType<[i64],
|
||||
CCIfSubtargetNot<"abiUsesSoftFloat()",
|
||||
CCIf<"static_cast<MipsCCState *>(&State)->WasOriginalArgF128(ValNo)",
|
||||
CCBitConvertToType<f64>>>>,
|
||||
|
||||
CCIfCC<"CallingConv::Fast", CCDelegateTo<CC_Mips_FastCC>>,
|
||||
|
||||
// FIXME: There wasn't an EABI case in the original code and it seems unlikely
|
||||
// that it's the same as CC_MipsN
|
||||
CCIfSubtarget<"isABI_O32()", CCDelegateTo<CC_MipsO32_FP>>,
|
||||
CCDelegateTo<CC_MipsN>
|
||||
]>;
|
||||
|
||||
def CC_Mips_VarArg : CallingConv<[
|
||||
CCIfByVal<CCDelegateTo<CC_Mips_ByVal>>,
|
||||
|
||||
// FIXME: There wasn't an EABI case in the original code and it seems unlikely
|
||||
// that it's the same as CC_MipsN_VarArg
|
||||
CCIfSubtarget<"isABI_O32()", CCDelegateTo<CC_MipsO32_FP>>,
|
||||
CCDelegateTo<CC_MipsN_VarArg>
|
||||
]>;
|
||||
|
||||
def CC_Mips : CallingConv<[
|
||||
CCIfVarArg<
|
||||
CCIf<"!static_cast<MipsCCState *>(&State)->IsCallOperandFixed(ValNo)",
|
||||
CCDelegateTo<CC_Mips_VarArg>>>,
|
||||
CCDelegateTo<CC_Mips_FixedArg>
|
||||
]>;
|
||||
|
||||
//===----------------------------------------------------------------------===//
|
||||
// Callee-saved register lists.
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
@ -343,7 +343,6 @@ namespace {
|
||||
|
||||
const TargetMachine &TM;
|
||||
bool IsPIC;
|
||||
unsigned ABI;
|
||||
const MipsSubtarget *STI;
|
||||
const Mips16InstrInfo *TII;
|
||||
MipsFunctionInfo *MFI;
|
||||
@ -366,8 +365,7 @@ namespace {
|
||||
static char ID;
|
||||
MipsConstantIslands(TargetMachine &tm)
|
||||
: MachineFunctionPass(ID), TM(tm),
|
||||
IsPIC(TM.getRelocationModel() == Reloc::PIC_),
|
||||
ABI(TM.getSubtarget<MipsSubtarget>().getTargetABI()), STI(nullptr),
|
||||
IsPIC(TM.getRelocationModel() == Reloc::PIC_), STI(nullptr),
|
||||
MF(nullptr), MCP(nullptr), PrescannedForConstants(false) {}
|
||||
|
||||
const char *getPassName() const override {
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -210,6 +210,7 @@ namespace llvm {
|
||||
//===--------------------------------------------------------------------===//
|
||||
class MipsFunctionInfo;
|
||||
class MipsSubtarget;
|
||||
class MipsCCState;
|
||||
|
||||
class MipsTargetLowering : public TargetLowering {
|
||||
bool isMicroMips;
|
||||
@ -259,6 +260,8 @@ namespace llvm {
|
||||
}
|
||||
};
|
||||
|
||||
void HandleByVal(CCState *, unsigned &, unsigned) const override;
|
||||
|
||||
protected:
|
||||
SDValue getGlobalReg(SelectionDAG &DAG, EVT Ty) const;
|
||||
|
||||
@ -338,101 +341,6 @@ namespace llvm {
|
||||
bool IsPICCall, bool GlobalOrExternal, bool InternalLinkage,
|
||||
CallLoweringInfo &CLI, SDValue Callee, SDValue Chain) const;
|
||||
|
||||
/// ByValArgInfo - Byval argument information.
|
||||
struct ByValArgInfo {
|
||||
unsigned FirstIdx; // Index of the first register used.
|
||||
unsigned NumRegs; // Number of registers used for this argument.
|
||||
unsigned Address; // Offset of the stack area used to pass this argument.
|
||||
|
||||
ByValArgInfo() : FirstIdx(0), NumRegs(0), Address(0) {}
|
||||
};
|
||||
|
||||
/// MipsCC - This class provides methods used to analyze formal and call
|
||||
/// arguments and inquire about calling convention information.
|
||||
class MipsCC {
|
||||
public:
|
||||
enum SpecialCallingConvType {
|
||||
Mips16RetHelperConv, NoSpecialCallingConv
|
||||
};
|
||||
|
||||
MipsCC(CallingConv::ID CallConv, bool IsO32, bool IsFP64, CCState &Info,
|
||||
SpecialCallingConvType SpecialCallingConv = NoSpecialCallingConv);
|
||||
|
||||
|
||||
void analyzeCallOperands(const SmallVectorImpl<ISD::OutputArg> &Outs,
|
||||
bool IsVarArg, bool IsSoftFloat,
|
||||
const SDNode *CallNode,
|
||||
std::vector<ArgListEntry> &FuncArgs);
|
||||
void analyzeFormalArguments(const SmallVectorImpl<ISD::InputArg> &Ins,
|
||||
bool IsSoftFloat,
|
||||
Function::const_arg_iterator FuncArg);
|
||||
|
||||
void analyzeCallResult(const SmallVectorImpl<ISD::InputArg> &Ins,
|
||||
bool IsSoftFloat, const SDNode *CallNode,
|
||||
const Type *RetTy) const;
|
||||
|
||||
void analyzeReturn(const SmallVectorImpl<ISD::OutputArg> &Outs,
|
||||
bool IsSoftFloat, const Type *RetTy) const;
|
||||
|
||||
const CCState &getCCInfo() const { return CCInfo; }
|
||||
|
||||
/// hasByValArg - Returns true if function has byval arguments.
|
||||
bool hasByValArg() const { return !ByValArgs.empty(); }
|
||||
|
||||
/// regSize - Size (in number of bits) of integer registers.
|
||||
unsigned regSize() const { return IsO32 ? 4 : 8; }
|
||||
|
||||
/// numIntArgRegs - Number of integer registers available for calls.
|
||||
unsigned numIntArgRegs() const;
|
||||
|
||||
/// reservedArgArea - The size of the area the caller reserves for
|
||||
/// register arguments. This is 16-byte if ABI is O32.
|
||||
unsigned reservedArgArea() const;
|
||||
|
||||
/// Return pointer to array of integer argument registers.
|
||||
const MCPhysReg *intArgRegs() const;
|
||||
|
||||
typedef SmallVectorImpl<ByValArgInfo>::const_iterator byval_iterator;
|
||||
byval_iterator byval_begin() const { return ByValArgs.begin(); }
|
||||
byval_iterator byval_end() const { return ByValArgs.end(); }
|
||||
|
||||
private:
|
||||
void handleByValArg(unsigned ValNo, MVT ValVT, MVT LocVT,
|
||||
CCValAssign::LocInfo LocInfo,
|
||||
ISD::ArgFlagsTy ArgFlags);
|
||||
|
||||
/// useRegsForByval - Returns true if the calling convention allows the
|
||||
/// use of registers to pass byval arguments.
|
||||
bool useRegsForByval() const { return CallConv != CallingConv::Fast; }
|
||||
|
||||
/// Return the function that analyzes fixed argument list functions.
|
||||
llvm::CCAssignFn *fixedArgFn() const;
|
||||
|
||||
/// Return the function that analyzes variable argument list functions.
|
||||
llvm::CCAssignFn *varArgFn() const;
|
||||
|
||||
const MCPhysReg *shadowRegs() const;
|
||||
|
||||
void allocateRegs(ByValArgInfo &ByVal, unsigned ByValSize,
|
||||
unsigned Align);
|
||||
|
||||
/// Return the type of the register which is used to pass an argument or
|
||||
/// return a value. This function returns f64 if the argument is an i64
|
||||
/// value which has been generated as a result of softening an f128 value.
|
||||
/// Otherwise, it just returns VT.
|
||||
MVT getRegVT(MVT VT, const Type *OrigTy, const SDNode *CallNode,
|
||||
bool IsSoftFloat) const;
|
||||
|
||||
template<typename Ty>
|
||||
void analyzeReturn(const SmallVectorImpl<Ty> &RetVals, bool IsSoftFloat,
|
||||
const SDNode *CallNode, const Type *RetTy) const;
|
||||
|
||||
CCState &CCInfo;
|
||||
CallingConv::ID CallConv;
|
||||
bool IsO32, IsFP64;
|
||||
SpecialCallingConvType SpecialCallingConv;
|
||||
SmallVector<ByValArgInfo, 2> ByValArgs;
|
||||
};
|
||||
protected:
|
||||
SDValue lowerLOAD(SDValue Op, SelectionDAG &DAG) const;
|
||||
SDValue lowerSTORE(SDValue Op, SelectionDAG &DAG) const;
|
||||
@ -461,14 +369,12 @@ namespace llvm {
|
||||
SDValue getTargetNode(ConstantPoolSDNode *N, EVT Ty, SelectionDAG &DAG,
|
||||
unsigned Flag) const;
|
||||
|
||||
MipsCC::SpecialCallingConvType getSpecialCallingConv(SDValue Callee) const;
|
||||
// Lower Operand helpers
|
||||
SDValue LowerCallResult(SDValue Chain, SDValue InFlag,
|
||||
CallingConv::ID CallConv, bool isVarArg,
|
||||
const SmallVectorImpl<ISD::InputArg> &Ins,
|
||||
SDLoc dl, SelectionDAG &DAG,
|
||||
SmallVectorImpl<SDValue> &InVals,
|
||||
const SDNode *CallNode, const Type *RetTy) const;
|
||||
const SmallVectorImpl<ISD::InputArg> &Ins, SDLoc dl,
|
||||
SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals,
|
||||
TargetLowering::CallLoweringInfo &CLI) const;
|
||||
|
||||
// Lower Operand specifics
|
||||
SDValue lowerBR_JT(SDValue Op, SelectionDAG &DAG) const;
|
||||
@ -482,6 +388,7 @@ namespace llvm {
|
||||
SDValue lowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const;
|
||||
SDValue lowerSETCC(SDValue Op, SelectionDAG &DAG) const;
|
||||
SDValue lowerVASTART(SDValue Op, SelectionDAG &DAG) const;
|
||||
SDValue lowerVAARG(SDValue Op, SelectionDAG &DAG) const;
|
||||
SDValue lowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG) const;
|
||||
SDValue lowerFABS(SDValue Op, SelectionDAG &DAG) const;
|
||||
SDValue lowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const;
|
||||
@ -497,33 +404,34 @@ namespace llvm {
|
||||
/// isEligibleForTailCallOptimization - Check whether the call is eligible
|
||||
/// for tail call optimization.
|
||||
virtual bool
|
||||
isEligibleForTailCallOptimization(const MipsCC &MipsCCInfo,
|
||||
isEligibleForTailCallOptimization(const CCState &CCInfo,
|
||||
unsigned NextStackOffset,
|
||||
const MipsFunctionInfo& FI) const = 0;
|
||||
const MipsFunctionInfo &FI) const = 0;
|
||||
|
||||
/// copyByValArg - Copy argument registers which were used to pass a byval
|
||||
/// argument to the stack. Create a stack frame object for the byval
|
||||
/// argument.
|
||||
void copyByValRegs(SDValue Chain, SDLoc DL,
|
||||
std::vector<SDValue> &OutChains, SelectionDAG &DAG,
|
||||
const ISD::ArgFlagsTy &Flags,
|
||||
void copyByValRegs(SDValue Chain, SDLoc DL, std::vector<SDValue> &OutChains,
|
||||
SelectionDAG &DAG, const ISD::ArgFlagsTy &Flags,
|
||||
SmallVectorImpl<SDValue> &InVals,
|
||||
const Argument *FuncArg,
|
||||
const MipsCC &CC, const ByValArgInfo &ByVal) const;
|
||||
const Argument *FuncArg, unsigned FirstReg,
|
||||
unsigned LastReg, const CCValAssign &VA,
|
||||
MipsCCState &State) const;
|
||||
|
||||
/// passByValArg - Pass a byval argument in registers or on stack.
|
||||
void passByValArg(SDValue Chain, SDLoc DL,
|
||||
std::deque< std::pair<unsigned, SDValue> > &RegsToPass,
|
||||
std::deque<std::pair<unsigned, SDValue>> &RegsToPass,
|
||||
SmallVectorImpl<SDValue> &MemOpChains, SDValue StackPtr,
|
||||
MachineFrameInfo *MFI, SelectionDAG &DAG, SDValue Arg,
|
||||
const MipsCC &CC, const ByValArgInfo &ByVal,
|
||||
const ISD::ArgFlagsTy &Flags, bool isLittle) const;
|
||||
unsigned FirstReg, unsigned LastReg,
|
||||
const ISD::ArgFlagsTy &Flags, bool isLittle,
|
||||
const CCValAssign &VA) const;
|
||||
|
||||
/// writeVarArgRegs - Write variable function arguments passed in registers
|
||||
/// to the stack. Also create a stack frame object for the first variable
|
||||
/// argument.
|
||||
void writeVarArgRegs(std::vector<SDValue> &OutChains, const MipsCC &CC,
|
||||
SDValue Chain, SDLoc DL, SelectionDAG &DAG) const;
|
||||
void writeVarArgRegs(std::vector<SDValue> &OutChains, SDValue Chain,
|
||||
SDLoc DL, SelectionDAG &DAG, CCState &State) const;
|
||||
|
||||
SDValue
|
||||
LowerFormalArguments(SDValue Chain,
|
||||
|
@ -178,6 +178,38 @@ class SW_FT<string opstr, RegisterOperand RC, InstrItinClass Itin,
|
||||
let mayStore = 1;
|
||||
}
|
||||
|
||||
class SW_FT2<string opstr, RegisterOperand RC, InstrItinClass Itin,
|
||||
SDPatternOperator OpNode= null_frag> :
|
||||
InstSE<(outs), (ins RC:$rt, mem:$addr), !strconcat(opstr, "\t$rt, $addr"),
|
||||
[(OpNode RC:$rt, addrDefault:$addr)], Itin, FrmFI, opstr> {
|
||||
let DecoderMethod = "DecodeFMem2";
|
||||
let mayStore = 1;
|
||||
}
|
||||
|
||||
class LW_FT2<string opstr, RegisterOperand RC, InstrItinClass Itin,
|
||||
SDPatternOperator OpNode= null_frag> :
|
||||
InstSE<(outs RC:$rt), (ins mem:$addr), !strconcat(opstr, "\t$rt, $addr"),
|
||||
[(set RC:$rt, (OpNode addrDefault:$addr))], Itin, FrmFI, opstr> {
|
||||
let DecoderMethod = "DecodeFMem2";
|
||||
let mayLoad = 1;
|
||||
}
|
||||
|
||||
class SW_FT3<string opstr, RegisterOperand RC, InstrItinClass Itin,
|
||||
SDPatternOperator OpNode= null_frag> :
|
||||
InstSE<(outs), (ins RC:$rt, mem:$addr), !strconcat(opstr, "\t$rt, $addr"),
|
||||
[(OpNode RC:$rt, addrDefault:$addr)], Itin, FrmFI, opstr> {
|
||||
let DecoderMethod = "DecodeFMem3";
|
||||
let mayStore = 1;
|
||||
}
|
||||
|
||||
class LW_FT3<string opstr, RegisterOperand RC, InstrItinClass Itin,
|
||||
SDPatternOperator OpNode= null_frag> :
|
||||
InstSE<(outs RC:$rt), (ins mem:$addr), !strconcat(opstr, "\t$rt, $addr"),
|
||||
[(set RC:$rt, (OpNode addrDefault:$addr))], Itin, FrmFI, opstr> {
|
||||
let DecoderMethod = "DecodeFMem3";
|
||||
let mayLoad = 1;
|
||||
}
|
||||
|
||||
class MADDS_FT<string opstr, RegisterOperand RC, InstrItinClass Itin,
|
||||
SDPatternOperator OpNode = null_frag> :
|
||||
InstSE<(outs RC:$fd), (ins RC:$fr, RC:$fs, RC:$ft),
|
||||
@ -407,24 +439,24 @@ def SDC1 : MMRel, SW_FT<"sdc1", AFGR64Opnd, II_SDC1, store>, LW_FM<0x3d>,
|
||||
// Cop2 Memory Instructions
|
||||
// FIXME: These aren't really FPU instructions and as such don't belong in this
|
||||
// file
|
||||
def LWC2 : LW_FT<"lwc2", COP2Opnd, NoItinerary, load>, LW_FM<0x32>,
|
||||
def LWC2 : LW_FT2<"lwc2", COP2Opnd, NoItinerary, load>, LW_FM<0x32>,
|
||||
ISA_MIPS1_NOT_32R6_64R6;
|
||||
def SWC2 : SW_FT<"swc2", COP2Opnd, NoItinerary, store>, LW_FM<0x3a>,
|
||||
def SWC2 : SW_FT2<"swc2", COP2Opnd, NoItinerary, store>, LW_FM<0x3a>,
|
||||
ISA_MIPS1_NOT_32R6_64R6;
|
||||
def LDC2 : LW_FT<"ldc2", COP2Opnd, NoItinerary, load>, LW_FM<0x36>,
|
||||
def LDC2 : LW_FT2<"ldc2", COP2Opnd, NoItinerary, load>, LW_FM<0x36>,
|
||||
ISA_MIPS2_NOT_32R6_64R6;
|
||||
def SDC2 : SW_FT<"sdc2", COP2Opnd, NoItinerary, store>, LW_FM<0x3e>,
|
||||
def SDC2 : SW_FT2<"sdc2", COP2Opnd, NoItinerary, store>, LW_FM<0x3e>,
|
||||
ISA_MIPS2_NOT_32R6_64R6;
|
||||
|
||||
// Cop3 Memory Instructions
|
||||
// FIXME: These aren't really FPU instructions and as such don't belong in this
|
||||
// file
|
||||
let DecoderNamespace = "COP3_" in {
|
||||
def LWC3 : LW_FT<"lwc3", COP3Opnd, NoItinerary, load>, LW_FM<0x33>;
|
||||
def SWC3 : SW_FT<"swc3", COP3Opnd, NoItinerary, store>, LW_FM<0x3b>;
|
||||
def LDC3 : LW_FT<"ldc3", COP3Opnd, NoItinerary, load>, LW_FM<0x37>,
|
||||
def LWC3 : LW_FT3<"lwc3", COP3Opnd, NoItinerary, load>, LW_FM<0x33>;
|
||||
def SWC3 : SW_FT3<"swc3", COP3Opnd, NoItinerary, store>, LW_FM<0x3b>;
|
||||
def LDC3 : LW_FT3<"ldc3", COP3Opnd, NoItinerary, load>, LW_FM<0x37>,
|
||||
ISA_MIPS2;
|
||||
def SDC3 : SW_FT<"sdc3", COP3Opnd, NoItinerary, store>, LW_FM<0x3f>,
|
||||
def SDC3 : SW_FT3<"sdc3", COP3Opnd, NoItinerary, store>, LW_FM<0x3f>,
|
||||
ISA_MIPS2;
|
||||
}
|
||||
|
||||
|
@ -1414,13 +1414,15 @@ def TLBR : TLB<"tlbr">, COP0_TLB_FM<0x01>;
|
||||
def TLBWI : TLB<"tlbwi">, COP0_TLB_FM<0x02>;
|
||||
def TLBWR : TLB<"tlbwr">, COP0_TLB_FM<0x06>;
|
||||
|
||||
class CacheOp<string instr_asm, Operand MemOpnd, RegisterOperand GPROpnd> :
|
||||
class CacheOp<string instr_asm, Operand MemOpnd> :
|
||||
InstSE<(outs), (ins MemOpnd:$addr, uimm5:$hint),
|
||||
!strconcat(instr_asm, "\t$hint, $addr"), [], NoItinerary, FrmOther>;
|
||||
!strconcat(instr_asm, "\t$hint, $addr"), [], NoItinerary, FrmOther> {
|
||||
let DecoderMethod = "DecodeCacheOp";
|
||||
}
|
||||
|
||||
def CACHE : CacheOp<"cache", mem, GPR32Opnd>, CACHEOP_FM<0b101111>,
|
||||
def CACHE : CacheOp<"cache", mem>, CACHEOP_FM<0b101111>,
|
||||
INSN_MIPS3_32_NOT_32R6_64R6;
|
||||
def PREF : CacheOp<"pref", mem, GPR32Opnd>, CACHEOP_FM<0b110011>,
|
||||
def PREF : CacheOp<"pref", mem>, CACHEOP_FM<0b110011>,
|
||||
INSN_MIPS3_32_NOT_32R6_64R6;
|
||||
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
@ -64,8 +64,8 @@ namespace {
|
||||
MipsLongBranch(TargetMachine &tm)
|
||||
: MachineFunctionPass(ID), TM(tm),
|
||||
IsPIC(TM.getRelocationModel() == Reloc::PIC_),
|
||||
ABI(TM.getSubtarget<MipsSubtarget>().getTargetABI()),
|
||||
LongBranchSeqSize(!IsPIC ? 2 : (ABI == MipsSubtarget::N64 ? 10 :
|
||||
ABI(TM.getSubtarget<MipsSubtarget>().getABI()),
|
||||
LongBranchSeqSize(!IsPIC ? 2 : (ABI.IsN64() ? 10 :
|
||||
(!TM.getSubtarget<MipsSubtarget>().isTargetNaCl() ? 9 : 10))) {}
|
||||
|
||||
const char *getPassName() const override {
|
||||
@ -86,7 +86,7 @@ namespace {
|
||||
MachineFunction *MF;
|
||||
SmallVector<MBBInfo, 16> MBBInfos;
|
||||
bool IsPIC;
|
||||
unsigned ABI;
|
||||
MipsABIInfo ABI;
|
||||
unsigned LongBranchSeqSize;
|
||||
};
|
||||
|
||||
@ -273,7 +273,7 @@ void MipsLongBranch::expandToLongBranch(MBBInfo &I) {
|
||||
const MipsSubtarget &Subtarget = TM.getSubtarget<MipsSubtarget>();
|
||||
unsigned BalOp = Subtarget.hasMips32r6() ? Mips::BAL : Mips::BAL_BR;
|
||||
|
||||
if (ABI != MipsSubtarget::N64) {
|
||||
if (!ABI.IsN64()) {
|
||||
// $longbr:
|
||||
// addiu $sp, $sp, -8
|
||||
// sw $ra, 0($sp)
|
||||
|
@ -149,6 +149,12 @@ getReservedRegs(const MachineFunction &MF) const {
|
||||
for (unsigned I = 0; I < array_lengthof(ReservedGPR64); ++I)
|
||||
Reserved.set(ReservedGPR64[I]);
|
||||
|
||||
// For mno-abicalls, GP is a program invariant!
|
||||
if (!Subtarget.isABICalls()) {
|
||||
Reserved.set(Mips::GP);
|
||||
Reserved.set(Mips::GP_64);
|
||||
}
|
||||
|
||||
if (Subtarget.isFP64bit()) {
|
||||
// Reserve all registers in AFGR64.
|
||||
for (RegIter Reg = Mips::AFGR64RegClass.begin(),
|
||||
|
@ -325,6 +325,8 @@ bool ExpandPseudo::expandBuildPairF64(MachineBasicBlock &MBB,
|
||||
// We re-use the same spill slot each time so that the stack frame doesn't
|
||||
// grow too much in functions with a large number of moves.
|
||||
int FI = MF.getInfo<MipsFunctionInfo>()->getMoveF64ViaSpillFI(RC2);
|
||||
if (!Subtarget.isLittle())
|
||||
std::swap(LoReg, HiReg);
|
||||
TII.storeRegToStack(MBB, I, LoReg, I->getOperand(1).isKill(), FI, RC, &TRI,
|
||||
0);
|
||||
TII.storeRegToStack(MBB, I, HiReg, I->getOperand(2).isKill(), FI, RC, &TRI,
|
||||
@ -369,6 +371,7 @@ bool ExpandPseudo::expandExtractElementF64(MachineBasicBlock &MBB,
|
||||
unsigned DstReg = I->getOperand(0).getReg();
|
||||
unsigned SrcReg = I->getOperand(1).getReg();
|
||||
unsigned N = I->getOperand(2).getImm();
|
||||
int64_t Offset = 4 * (Subtarget.isLittle() ? N : (1 - N));
|
||||
|
||||
// It should be impossible to have FGR64 on MIPS-II or MIPS32r1 (which are
|
||||
// the cases where mfhc1 is not available). 64-bit architectures and
|
||||
@ -385,7 +388,7 @@ bool ExpandPseudo::expandExtractElementF64(MachineBasicBlock &MBB,
|
||||
int FI = MF.getInfo<MipsFunctionInfo>()->getMoveF64ViaSpillFI(RC);
|
||||
TII.storeRegToStack(MBB, I, SrcReg, I->getOperand(1).isKill(), FI, RC, &TRI,
|
||||
0);
|
||||
TII.loadRegFromStack(MBB, I, DstReg, FI, RC2, &TRI, N * 4);
|
||||
TII.loadRegFromStack(MBB, I, DstReg, FI, RC2, &TRI, Offset);
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@ -1167,15 +1167,14 @@ MipsSETargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
|
||||
}
|
||||
}
|
||||
|
||||
bool MipsSETargetLowering::
|
||||
isEligibleForTailCallOptimization(const MipsCC &MipsCCInfo,
|
||||
unsigned NextStackOffset,
|
||||
const MipsFunctionInfo& FI) const {
|
||||
bool MipsSETargetLowering::isEligibleForTailCallOptimization(
|
||||
const CCState &CCInfo, unsigned NextStackOffset,
|
||||
const MipsFunctionInfo &FI) const {
|
||||
if (!EnableMipsTailCalls)
|
||||
return false;
|
||||
|
||||
// Return false if either the callee or caller has a byval argument.
|
||||
if (MipsCCInfo.hasByValArg() || FI.hasByvalArg())
|
||||
if (CCInfo.getInRegsParamsCount() > 0 || FI.hasByvalArg())
|
||||
return false;
|
||||
|
||||
// Return true if the callee's argument area is no larger than the
|
||||
|
@ -50,9 +50,9 @@ namespace llvm {
|
||||
const TargetRegisterClass *getRepRegClassFor(MVT VT) const override;
|
||||
|
||||
private:
|
||||
bool isEligibleForTailCallOptimization(const MipsCC &MipsCCInfo,
|
||||
unsigned NextStackOffset,
|
||||
const MipsFunctionInfo& FI) const override;
|
||||
bool isEligibleForTailCallOptimization(
|
||||
const CCState &CCInfo, unsigned NextStackOffset,
|
||||
const MipsFunctionInfo &FI) const override;
|
||||
|
||||
void
|
||||
getOpndList(SmallVectorImpl<SDValue> &Ops,
|
||||
|
@ -106,13 +106,14 @@ MipsSubtarget::MipsSubtarget(const std::string &TT, const std::string &CPU,
|
||||
const std::string &FS, bool little,
|
||||
MipsTargetMachine *_TM)
|
||||
: MipsGenSubtargetInfo(TT, CPU, FS), MipsArchVersion(Mips32),
|
||||
MipsABI(UnknownABI), IsLittle(little), IsSingleFloat(false),
|
||||
IsFPXX(false), IsFP64bit(false), UseOddSPReg(true), IsNaN2008bit(false),
|
||||
IsGP64bit(false), HasVFPU(false), HasCnMips(false), IsLinux(true),
|
||||
HasMips3_32(false), HasMips3_32r2(false), HasMips4_32(false),
|
||||
HasMips4_32r2(false), HasMips5_32r2(false), InMips16Mode(false),
|
||||
InMips16HardFloat(Mips16HardFloat), InMicroMipsMode(false), HasDSP(false),
|
||||
HasDSPR2(false), AllowMixed16_32(Mixed16_32 | Mips_Os16), Os16(Mips_Os16),
|
||||
ABI(MipsABIInfo::Unknown()), IsLittle(little), IsSingleFloat(false),
|
||||
IsFPXX(false), NoABICalls(false), IsFP64bit(false), UseOddSPReg(true),
|
||||
IsNaN2008bit(false), IsGP64bit(false), HasVFPU(false), HasCnMips(false),
|
||||
IsLinux(true), HasMips3_32(false), HasMips3_32r2(false),
|
||||
HasMips4_32(false), HasMips4_32r2(false), HasMips5_32r2(false),
|
||||
InMips16Mode(false), InMips16HardFloat(Mips16HardFloat),
|
||||
InMicroMipsMode(false), HasDSP(false), HasDSPR2(false),
|
||||
AllowMixed16_32(Mixed16_32 | Mips_Os16), Os16(Mips_Os16),
|
||||
HasMSA(false), TM(_TM), TargetTriple(TT),
|
||||
DL(computeDataLayout(initializeSubtargetDependencies(CPU, FS, TM))),
|
||||
TSInfo(DL), JITInfo(), InstrInfo(MipsInstrInfo::create(*this)),
|
||||
@ -135,7 +136,7 @@ MipsSubtarget::MipsSubtarget(const std::string &TT, const std::string &CPU,
|
||||
report_fatal_error("Code generation for MIPS-V is not implemented", false);
|
||||
|
||||
// Assert exactly one ABI was chosen.
|
||||
assert(MipsABI != UnknownABI);
|
||||
assert(ABI.IsKnown());
|
||||
assert((((getFeatureBits() & Mips::FeatureO32) != 0) +
|
||||
((getFeatureBits() & Mips::FeatureEABI) != 0) +
|
||||
((getFeatureBits() & Mips::FeatureN32) != 0) +
|
||||
|
@ -23,6 +23,7 @@
|
||||
#include "llvm/MC/MCInstrItineraries.h"
|
||||
#include "llvm/Support/ErrorHandling.h"
|
||||
#include "llvm/Target/TargetSubtargetInfo.h"
|
||||
#include "MipsABIInfo.h"
|
||||
#include <string>
|
||||
|
||||
#define GET_SUBTARGETINFO_HEADER
|
||||
@ -36,13 +37,6 @@ class MipsTargetMachine;
|
||||
class MipsSubtarget : public MipsGenSubtargetInfo {
|
||||
virtual void anchor();
|
||||
|
||||
public:
|
||||
// NOTE: O64 will not be supported.
|
||||
enum MipsABIEnum {
|
||||
UnknownABI, O32, N32, N64, EABI
|
||||
};
|
||||
|
||||
protected:
|
||||
enum MipsArchEnum {
|
||||
Mips1, Mips2, Mips32, Mips32r2, Mips32r6, Mips3, Mips4, Mips5, Mips64,
|
||||
Mips64r2, Mips64r6
|
||||
@ -51,8 +45,8 @@ class MipsSubtarget : public MipsGenSubtargetInfo {
|
||||
// Mips architecture version
|
||||
MipsArchEnum MipsArchVersion;
|
||||
|
||||
// Mips supported ABIs
|
||||
MipsABIEnum MipsABI;
|
||||
// Selected ABI
|
||||
MipsABIInfo ABI;
|
||||
|
||||
// IsLittle - The target is Little Endian
|
||||
bool IsLittle;
|
||||
@ -65,6 +59,9 @@ class MipsSubtarget : public MipsGenSubtargetInfo {
|
||||
// IsFPXX - MIPS O32 modeless ABI.
|
||||
bool IsFPXX;
|
||||
|
||||
// NoABICalls - Disable SVR4-style position-independent code.
|
||||
bool NoABICalls;
|
||||
|
||||
// IsFP64bit - The target processor has 64-bit floating point registers.
|
||||
bool IsFP64bit;
|
||||
|
||||
@ -157,12 +154,12 @@ class MipsSubtarget : public MipsGenSubtargetInfo {
|
||||
CodeGenOpt::Level getOptLevelToEnablePostRAScheduler() const override;
|
||||
|
||||
/// Only O32 and EABI supported right now.
|
||||
bool isABI_EABI() const { return MipsABI == EABI; }
|
||||
bool isABI_N64() const { return MipsABI == N64; }
|
||||
bool isABI_N32() const { return MipsABI == N32; }
|
||||
bool isABI_O32() const { return MipsABI == O32; }
|
||||
bool isABI_EABI() const { return ABI.IsEABI(); }
|
||||
bool isABI_N64() const { return ABI.IsN64(); }
|
||||
bool isABI_N32() const { return ABI.IsN32(); }
|
||||
bool isABI_O32() const { return ABI.IsO32(); }
|
||||
bool isABI_FPXX() const { return isABI_O32() && IsFPXX; }
|
||||
unsigned getTargetABI() const { return MipsABI; }
|
||||
const MipsABIInfo &getABI() const { return ABI; }
|
||||
|
||||
/// This constructor initializes the data members to match that
|
||||
/// of the specified triple.
|
||||
@ -200,16 +197,16 @@ class MipsSubtarget : public MipsGenSubtargetInfo {
|
||||
bool hasCnMips() const { return HasCnMips; }
|
||||
|
||||
bool isLittle() const { return IsLittle; }
|
||||
bool isABICalls() const { return !NoABICalls; }
|
||||
bool isFPXX() const { return IsFPXX; }
|
||||
bool isFP64bit() const { return IsFP64bit; }
|
||||
bool useOddSPReg() const { return UseOddSPReg; }
|
||||
bool noOddSPReg() const { return !UseOddSPReg; }
|
||||
bool isNaN2008() const { return IsNaN2008bit; }
|
||||
bool isNotFP64bit() const { return !IsFP64bit; }
|
||||
bool isGP64bit() const { return IsGP64bit; }
|
||||
bool isGP32bit() const { return !IsGP64bit; }
|
||||
unsigned getGPRSizeInBytes() const { return isGP64bit() ? 8 : 4; }
|
||||
bool isSingleFloat() const { return IsSingleFloat; }
|
||||
bool isNotSingleFloat() const { return !IsSingleFloat; }
|
||||
bool hasVFPU() const { return HasVFPU; }
|
||||
bool inMips16Mode() const { return InMips16Mode; }
|
||||
bool inMips16ModeDefault() const {
|
||||
@ -248,7 +245,6 @@ class MipsSubtarget : public MipsGenSubtargetInfo {
|
||||
bool os16() const { return Os16;};
|
||||
|
||||
bool isTargetNaCl() const { return TargetTriple.isOSNaCl(); }
|
||||
bool isNotTargetNaCl() const { return !TargetTriple.isOSNaCl(); }
|
||||
|
||||
// for now constant islands are on for the whole compilation unit but we only
|
||||
// really use them if in addition we are in mips16 mode
|
||||
|
@ -261,7 +261,7 @@ void PPCInstPrinter::printAbsBranchOperand(const MCInst *MI, unsigned OpNo,
|
||||
if (!MI->getOperand(OpNo).isImm())
|
||||
return printOperand(MI, OpNo, O);
|
||||
|
||||
O << (int)MI->getOperand(OpNo).getImm()*4;
|
||||
O << SignExtend32<32>((unsigned)MI->getOperand(OpNo).getImm() << 2);
|
||||
}
|
||||
|
||||
|
||||
|
@ -184,6 +184,23 @@ class PPCTargetELFStreamer : public PPCTargetStreamer {
|
||||
if ((Flags & ELF::EF_PPC64_ABI) == 0)
|
||||
MCA.setELFHeaderEFlags(Flags | 2);
|
||||
}
|
||||
void emitAssignment(MCSymbol *Symbol, const MCExpr *Value) override {
|
||||
// When encoding an assignment to set symbol A to symbol B, also copy
|
||||
// the st_other bits encoding the local entry point offset.
|
||||
if (Value->getKind() != MCExpr::SymbolRef)
|
||||
return;
|
||||
const MCSymbol &RhsSym =
|
||||
static_cast<const MCSymbolRefExpr *>(Value)->getSymbol();
|
||||
MCSymbolData &Data = getStreamer().getOrCreateSymbolData(&RhsSym);
|
||||
MCSymbolData &SymbolData = getStreamer().getOrCreateSymbolData(Symbol);
|
||||
// The "other" values are stored in the last 6 bits of the second byte.
|
||||
// The traditional defines for STO values assume the full byte and thus
|
||||
// the shift to pack it.
|
||||
unsigned Other = MCELF::getOther(SymbolData) << 2;
|
||||
Other &= ~ELF::STO_PPC64_LOCAL_MASK;
|
||||
Other |= (MCELF::getOther(Data) << 2) & ELF::STO_PPC64_LOCAL_MASK;
|
||||
MCELF::setOther(SymbolData, Other >> 2);
|
||||
}
|
||||
};
|
||||
|
||||
class PPCTargetMachOStreamer : public PPCTargetStreamer {
|
||||
|
@ -400,7 +400,7 @@ void PPCAsmPrinter::EmitInstruction(const MachineInstr *MI) {
|
||||
const MachineOperand &MO = MI->getOperand(1);
|
||||
|
||||
// Map symbol -> label of TOC entry
|
||||
assert(MO.isGlobal() || MO.isCPI() || MO.isJTI());
|
||||
assert(MO.isGlobal() || MO.isCPI() || MO.isJTI() || MO.isBlockAddress());
|
||||
MCSymbol *MOSymbol = nullptr;
|
||||
if (MO.isGlobal())
|
||||
MOSymbol = getSymbol(MO.getGlobal());
|
||||
@ -408,6 +408,8 @@ void PPCAsmPrinter::EmitInstruction(const MachineInstr *MI) {
|
||||
MOSymbol = GetCPISymbol(MO.getIndex());
|
||||
else if (MO.isJTI())
|
||||
MOSymbol = GetJTISymbol(MO.getIndex());
|
||||
else if (MO.isBlockAddress())
|
||||
MOSymbol = GetBlockAddressSymbol(MO.getBlockAddress());
|
||||
|
||||
if (PL == PICLevel::Small) {
|
||||
const MCExpr *Exp =
|
||||
@ -431,6 +433,7 @@ void PPCAsmPrinter::EmitInstruction(const MachineInstr *MI) {
|
||||
}
|
||||
case PPC::LDtocJTI:
|
||||
case PPC::LDtocCPT:
|
||||
case PPC::LDtocBA:
|
||||
case PPC::LDtoc: {
|
||||
// Transform %X3 = LDtoc <ga:@min1>, %X2
|
||||
LowerPPCMachineInstrToMCInst(MI, TmpInst, *this, isDarwin);
|
||||
@ -441,7 +444,7 @@ void PPCAsmPrinter::EmitInstruction(const MachineInstr *MI) {
|
||||
const MachineOperand &MO = MI->getOperand(1);
|
||||
|
||||
// Map symbol -> label of TOC entry
|
||||
assert(MO.isGlobal() || MO.isCPI() || MO.isJTI());
|
||||
assert(MO.isGlobal() || MO.isCPI() || MO.isJTI() || MO.isBlockAddress());
|
||||
MCSymbol *MOSymbol = nullptr;
|
||||
if (MO.isGlobal())
|
||||
MOSymbol = getSymbol(MO.getGlobal());
|
||||
@ -449,6 +452,8 @@ void PPCAsmPrinter::EmitInstruction(const MachineInstr *MI) {
|
||||
MOSymbol = GetCPISymbol(MO.getIndex());
|
||||
else if (MO.isJTI())
|
||||
MOSymbol = GetJTISymbol(MO.getIndex());
|
||||
else if (MO.isBlockAddress())
|
||||
MOSymbol = GetBlockAddressSymbol(MO.getBlockAddress());
|
||||
|
||||
MCSymbol *TOCEntry = lookUpOrCreateTOCEntry(MOSymbol);
|
||||
|
||||
@ -470,7 +475,8 @@ void PPCAsmPrinter::EmitInstruction(const MachineInstr *MI) {
|
||||
// reference the symbol directly.
|
||||
TmpInst.setOpcode(PPC::ADDIS8);
|
||||
const MachineOperand &MO = MI->getOperand(2);
|
||||
assert((MO.isGlobal() || MO.isCPI() || MO.isJTI()) &&
|
||||
assert((MO.isGlobal() || MO.isCPI() || MO.isJTI() ||
|
||||
MO.isBlockAddress()) &&
|
||||
"Invalid operand for ADDIStocHA!");
|
||||
MCSymbol *MOSymbol = nullptr;
|
||||
bool IsExternal = false;
|
||||
@ -490,9 +496,12 @@ void PPCAsmPrinter::EmitInstruction(const MachineInstr *MI) {
|
||||
MOSymbol = GetCPISymbol(MO.getIndex());
|
||||
else if (MO.isJTI())
|
||||
MOSymbol = GetJTISymbol(MO.getIndex());
|
||||
else if (MO.isBlockAddress())
|
||||
MOSymbol = GetBlockAddressSymbol(MO.getBlockAddress());
|
||||
|
||||
if (IsExternal || IsNonLocalFunction || IsCommon || IsAvailExt ||
|
||||
MO.isJTI() || TM.getCodeModel() == CodeModel::Large)
|
||||
MO.isJTI() || MO.isBlockAddress() ||
|
||||
TM.getCodeModel() == CodeModel::Large)
|
||||
MOSymbol = lookUpOrCreateTOCEntry(MOSymbol);
|
||||
|
||||
const MCExpr *Exp =
|
||||
@ -511,12 +520,17 @@ void PPCAsmPrinter::EmitInstruction(const MachineInstr *MI) {
|
||||
// associated TOC entry. Otherwise reference the symbol directly.
|
||||
TmpInst.setOpcode(PPC::LD);
|
||||
const MachineOperand &MO = MI->getOperand(1);
|
||||
assert((MO.isGlobal() || MO.isJTI() || MO.isCPI()) &&
|
||||
assert((MO.isGlobal() || MO.isCPI() || MO.isJTI() ||
|
||||
MO.isBlockAddress()) &&
|
||||
"Invalid operand for LDtocL!");
|
||||
MCSymbol *MOSymbol = nullptr;
|
||||
|
||||
if (MO.isJTI())
|
||||
MOSymbol = lookUpOrCreateTOCEntry(GetJTISymbol(MO.getIndex()));
|
||||
else if (MO.isBlockAddress()) {
|
||||
MOSymbol = GetBlockAddressSymbol(MO.getBlockAddress());
|
||||
MOSymbol = lookUpOrCreateTOCEntry(MOSymbol);
|
||||
}
|
||||
else if (MO.isCPI()) {
|
||||
MOSymbol = GetCPISymbol(MO.getIndex());
|
||||
if (TM.getCodeModel() == CodeModel::Large)
|
||||
@ -941,7 +955,7 @@ bool PPCLinuxAsmPrinter::doFinalization(Module &M) {
|
||||
for (MapVector<MCSymbol*, MCSymbol*>::iterator I = TOC.begin(),
|
||||
E = TOC.end(); I != E; ++I) {
|
||||
OutStreamer.EmitLabel(I->second);
|
||||
MCSymbol *S = OutContext.GetOrCreateSymbol(I->first->getName());
|
||||
MCSymbol *S = I->first;
|
||||
if (isPPC64)
|
||||
TS.emitTCEntry(*S);
|
||||
else
|
||||
|
@ -153,7 +153,7 @@ class PPCFastISel final : public FastISel {
|
||||
unsigned DestReg, bool IsZExt);
|
||||
unsigned PPCMaterializeFP(const ConstantFP *CFP, MVT VT);
|
||||
unsigned PPCMaterializeGV(const GlobalValue *GV, MVT VT);
|
||||
unsigned PPCMaterializeInt(const Constant *C, MVT VT);
|
||||
unsigned PPCMaterializeInt(const Constant *C, MVT VT, bool UseSExt = true);
|
||||
unsigned PPCMaterialize32BitInt(int64_t Imm,
|
||||
const TargetRegisterClass *RC);
|
||||
unsigned PPCMaterialize64BitInt(int64_t Imm,
|
||||
@ -865,7 +865,7 @@ bool PPCFastISel::SelectFPTrunc(const Instruction *I) {
|
||||
}
|
||||
|
||||
// Move an i32 or i64 value in a GPR to an f64 value in an FPR.
|
||||
// FIXME: When direct register moves are implemented (see PowerISA 2.08),
|
||||
// FIXME: When direct register moves are implemented (see PowerISA 2.07),
|
||||
// those should be used instead of moving via a stack slot when the
|
||||
// subtarget permits.
|
||||
// FIXME: The code here is sloppy for the 4-byte case. Can use a 4-byte
|
||||
@ -898,10 +898,10 @@ unsigned PPCFastISel::PPCMoveToFPReg(MVT SrcVT, unsigned SrcReg,
|
||||
if (SrcVT == MVT::i32) {
|
||||
if (!IsSigned) {
|
||||
LoadOpc = PPC::LFIWZX;
|
||||
Addr.Offset = 4;
|
||||
Addr.Offset = (PPCSubTarget->isLittleEndian()) ? 0 : 4;
|
||||
} else if (PPCSubTarget->hasLFIWAX()) {
|
||||
LoadOpc = PPC::LFIWAX;
|
||||
Addr.Offset = 4;
|
||||
Addr.Offset = (PPCSubTarget->isLittleEndian()) ? 0 : 4;
|
||||
}
|
||||
}
|
||||
|
||||
@ -985,7 +985,7 @@ bool PPCFastISel::SelectIToFP(const Instruction *I, bool IsSigned) {
|
||||
|
||||
// Move the floating-point value in SrcReg into an integer destination
|
||||
// register, and return the register (or zero if we can't handle it).
|
||||
// FIXME: When direct register moves are implemented (see PowerISA 2.08),
|
||||
// FIXME: When direct register moves are implemented (see PowerISA 2.07),
|
||||
// those should be used instead of moving via a stack slot when the
|
||||
// subtarget permits.
|
||||
unsigned PPCFastISel::PPCMoveToIntReg(const Instruction *I, MVT VT,
|
||||
@ -1548,13 +1548,23 @@ bool PPCFastISel::SelectRet(const Instruction *I) {
|
||||
|
||||
// Special case for returning a constant integer of any size.
|
||||
// Materialize the constant as an i64 and copy it to the return
|
||||
// register. This avoids an unnecessary extend or truncate.
|
||||
// register. We still need to worry about properly extending the sign. E.g:
|
||||
// If the constant has only one bit, it means it is a boolean. Therefore
|
||||
// we can't use PPCMaterializeInt because it extends the sign which will
|
||||
// cause negations of the returned value to be incorrect as they are
|
||||
// implemented as the flip of the least significant bit.
|
||||
if (isa<ConstantInt>(*RV)) {
|
||||
const Constant *C = cast<Constant>(RV);
|
||||
unsigned SrcReg = PPCMaterializeInt(C, MVT::i64);
|
||||
unsigned RetReg = ValLocs[0].getLocReg();
|
||||
|
||||
CCValAssign &VA = ValLocs[0];
|
||||
|
||||
unsigned RetReg = VA.getLocReg();
|
||||
unsigned SrcReg = PPCMaterializeInt(C, MVT::i64,
|
||||
VA.getLocInfo() == CCValAssign::SExt);
|
||||
|
||||
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
|
||||
TII.get(TargetOpcode::COPY), RetReg).addReg(SrcReg);
|
||||
TII.get(TargetOpcode::COPY), RetReg).addReg(SrcReg);
|
||||
|
||||
RetRegs.push_back(RetReg);
|
||||
|
||||
} else {
|
||||
@ -2014,7 +2024,8 @@ unsigned PPCFastISel::PPCMaterialize64BitInt(int64_t Imm,
|
||||
|
||||
// Materialize an integer constant into a register, and return
|
||||
// the register number (or zero if we failed to handle it).
|
||||
unsigned PPCFastISel::PPCMaterializeInt(const Constant *C, MVT VT) {
|
||||
unsigned PPCFastISel::PPCMaterializeInt(const Constant *C, MVT VT,
|
||||
bool UseSExt) {
|
||||
// If we're using CR bit registers for i1 values, handle that as a special
|
||||
// case first.
|
||||
if (VT == MVT::i1 && PPCSubTarget->useCRBits()) {
|
||||
@ -2038,7 +2049,7 @@ unsigned PPCFastISel::PPCMaterializeInt(const Constant *C, MVT VT) {
|
||||
unsigned Opc = (VT == MVT::i64) ? PPC::LI8 : PPC::LI;
|
||||
unsigned ImmReg = createResultReg(RC);
|
||||
BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), ImmReg)
|
||||
.addImm(CI->getSExtValue());
|
||||
.addImm( (UseSExt) ? CI->getSExtValue() : CI->getZExtValue() );
|
||||
return ImmReg;
|
||||
}
|
||||
|
||||
|
@ -505,7 +505,7 @@ void PPCFrameLowering::emitPrologue(MachineFunction &MF) const {
|
||||
MachineModuleInfo &MMI = MF.getMMI();
|
||||
const MCRegisterInfo *MRI = MMI.getContext().getRegisterInfo();
|
||||
DebugLoc dl;
|
||||
bool needsFrameMoves = MMI.hasDebugInfo() ||
|
||||
bool needsCFI = MMI.hasDebugInfo() ||
|
||||
MF.getFunction()->needsUnwindTableEntry();
|
||||
bool isPIC = MF.getTarget().getRelocationModel() == Reloc::PIC_;
|
||||
|
||||
@ -726,17 +726,28 @@ void PPCFrameLowering::emitPrologue(MachineFunction &MF) const {
|
||||
.addReg(ScratchReg);
|
||||
}
|
||||
|
||||
// Add the "machine moves" for the instructions we generated above, but in
|
||||
// reverse order.
|
||||
if (needsFrameMoves) {
|
||||
// Show update of SP.
|
||||
assert(NegFrameSize);
|
||||
unsigned CFIIndex = MMI.addFrameInst(
|
||||
MCCFIInstruction::createDefCfaOffset(nullptr, NegFrameSize));
|
||||
// Add Call Frame Information for the instructions we generated above.
|
||||
if (needsCFI) {
|
||||
unsigned CFIIndex;
|
||||
|
||||
if (HasBP) {
|
||||
// Define CFA in terms of BP. Do this in preference to using FP/SP,
|
||||
// because if the stack needed aligning then CFA won't be at a fixed
|
||||
// offset from FP/SP.
|
||||
unsigned Reg = MRI->getDwarfRegNum(BPReg, true);
|
||||
CFIIndex = MMI.addFrameInst(
|
||||
MCCFIInstruction::createDefCfaRegister(nullptr, Reg));
|
||||
} else {
|
||||
// Adjust the definition of CFA to account for the change in SP.
|
||||
assert(NegFrameSize);
|
||||
CFIIndex = MMI.addFrameInst(
|
||||
MCCFIInstruction::createDefCfaOffset(nullptr, NegFrameSize));
|
||||
}
|
||||
BuildMI(MBB, MBBI, dl, TII.get(TargetOpcode::CFI_INSTRUCTION))
|
||||
.addCFIIndex(CFIIndex);
|
||||
|
||||
if (HasFP) {
|
||||
// Describe where FP was saved, at a fixed offset from CFA.
|
||||
unsigned Reg = MRI->getDwarfRegNum(FPReg, true);
|
||||
CFIIndex = MMI.addFrameInst(
|
||||
MCCFIInstruction::createOffset(nullptr, Reg, FPOffset));
|
||||
@ -745,6 +756,7 @@ void PPCFrameLowering::emitPrologue(MachineFunction &MF) const {
|
||||
}
|
||||
|
||||
if (HasBP) {
|
||||
// Describe where BP was saved, at a fixed offset from CFA.
|
||||
unsigned Reg = MRI->getDwarfRegNum(BPReg, true);
|
||||
CFIIndex = MMI.addFrameInst(
|
||||
MCCFIInstruction::createOffset(nullptr, Reg, BPOffset));
|
||||
@ -753,6 +765,7 @@ void PPCFrameLowering::emitPrologue(MachineFunction &MF) const {
|
||||
}
|
||||
|
||||
if (MustSaveLR) {
|
||||
// Describe where LR was saved, at a fixed offset from CFA.
|
||||
unsigned Reg = MRI->getDwarfRegNum(LRReg, true);
|
||||
CFIIndex = MMI.addFrameInst(
|
||||
MCCFIInstruction::createOffset(nullptr, Reg, LROffset));
|
||||
@ -767,8 +780,9 @@ void PPCFrameLowering::emitPrologue(MachineFunction &MF) const {
|
||||
.addReg(SPReg)
|
||||
.addReg(SPReg);
|
||||
|
||||
if (needsFrameMoves) {
|
||||
// Mark effective beginning of when frame pointer is ready.
|
||||
if (!HasBP && needsCFI) {
|
||||
// Change the definition of CFA from SP+offset to FP+offset, because SP
|
||||
// will change at every alloca.
|
||||
unsigned Reg = MRI->getDwarfRegNum(FPReg, true);
|
||||
unsigned CFIIndex = MMI.addFrameInst(
|
||||
MCCFIInstruction::createDefCfaRegister(nullptr, Reg));
|
||||
@ -778,8 +792,9 @@ void PPCFrameLowering::emitPrologue(MachineFunction &MF) const {
|
||||
}
|
||||
}
|
||||
|
||||
if (needsFrameMoves) {
|
||||
// Add callee saved registers to move list.
|
||||
if (needsCFI) {
|
||||
// Describe where callee saved registers were saved, at fixed offsets from
|
||||
// CFA.
|
||||
const std::vector<CalleeSavedInfo> &CSI = MFI->getCalleeSavedInfo();
|
||||
for (unsigned I = 0, E = CSI.size(); I != E; ++I) {
|
||||
unsigned Reg = CSI[I].getReg();
|
||||
|
@ -173,10 +173,20 @@ namespace {
|
||||
/// a register. The case of adding a (possibly relocatable) constant to a
|
||||
/// register can be improved, but it is wrong to substitute Reg+Reg for
|
||||
/// Reg in an asm, because the load or store opcode would have to change.
|
||||
bool SelectInlineAsmMemoryOperand(const SDValue &Op,
|
||||
bool SelectInlineAsmMemoryOperand(const SDValue &Op,
|
||||
char ConstraintCode,
|
||||
std::vector<SDValue> &OutOps) override {
|
||||
OutOps.push_back(Op);
|
||||
// We need to make sure that this one operand does not end up in r0
|
||||
// (because we might end up lowering this as 0(%op)).
|
||||
const TargetRegisterInfo *TRI = TM.getRegisterInfo();
|
||||
const TargetRegisterClass *TRC = TRI->getPointerRegClass(*MF, /*Kind=*/1);
|
||||
SDValue RC = CurDAG->getTargetConstant(TRC->getID(), MVT::i32);
|
||||
SDValue NewOp =
|
||||
SDValue(CurDAG->getMachineNode(TargetOpcode::COPY_TO_REGCLASS,
|
||||
SDLoc(Op), Op.getValueType(),
|
||||
Op, RC), 0);
|
||||
|
||||
OutOps.push_back(NewOp);
|
||||
return false;
|
||||
}
|
||||
|
||||
@ -1446,7 +1456,7 @@ SDNode *PPCDAGToDAGISel::Select(SDNode *N) {
|
||||
|
||||
// For medium and large code model, we generate two instructions as
|
||||
// described below. Otherwise we allow SelectCodeCommon to handle this,
|
||||
// selecting one of LDtoc, LDtocJTI, and LDtocCPT.
|
||||
// selecting one of LDtoc, LDtocJTI, LDtocCPT, and LDtocBA.
|
||||
CodeModel::Model CModel = TM.getCodeModel();
|
||||
if (CModel != CodeModel::Medium && CModel != CodeModel::Large)
|
||||
break;
|
||||
@ -1463,7 +1473,8 @@ SDNode *PPCDAGToDAGISel::Select(SDNode *N) {
|
||||
SDNode *Tmp = CurDAG->getMachineNode(PPC::ADDIStocHA, dl, MVT::i64,
|
||||
TOCbase, GA);
|
||||
|
||||
if (isa<JumpTableSDNode>(GA) || CModel == CodeModel::Large)
|
||||
if (isa<JumpTableSDNode>(GA) || isa<BlockAddressSDNode>(GA) ||
|
||||
CModel == CodeModel::Large)
|
||||
return CurDAG->getMachineNode(PPC::LDtocL, dl, MVT::i64, GA,
|
||||
SDValue(Tmp, 0));
|
||||
|
||||
|
@ -1631,8 +1631,16 @@ SDValue PPCTargetLowering::LowerJumpTable(SDValue Op, SelectionDAG &DAG) const {
|
||||
SDValue PPCTargetLowering::LowerBlockAddress(SDValue Op,
|
||||
SelectionDAG &DAG) const {
|
||||
EVT PtrVT = Op.getValueType();
|
||||
BlockAddressSDNode *BASDN = cast<BlockAddressSDNode>(Op);
|
||||
const BlockAddress *BA = BASDN->getBlockAddress();
|
||||
|
||||
const BlockAddress *BA = cast<BlockAddressSDNode>(Op)->getBlockAddress();
|
||||
// 64-bit SVR4 ABI code is always position-independent.
|
||||
// The actual BlockAddress is stored in the TOC.
|
||||
if (Subtarget.isSVR4ABI() && Subtarget.isPPC64()) {
|
||||
SDValue GA = DAG.getTargetBlockAddress(BA, PtrVT, BASDN->getOffset());
|
||||
return DAG.getNode(PPCISD::TOC_ENTRY, SDLoc(BASDN), MVT::i64, GA,
|
||||
DAG.getRegister(PPC::X2, MVT::i64));
|
||||
}
|
||||
|
||||
unsigned MOHiFlag, MOLoFlag;
|
||||
bool isPIC = GetLabelAccessInfo(DAG.getTarget(), MOHiFlag, MOLoFlag);
|
||||
@ -2695,7 +2703,7 @@ PPCTargetLowering::LowerFormalArguments_64SVR4(
|
||||
int FI;
|
||||
if (HasParameterArea ||
|
||||
ArgSize + ArgOffset > LinkageSize + Num_GPR_Regs * PtrByteSize)
|
||||
FI = MFI->CreateFixedObject(ArgSize, ArgOffset, true);
|
||||
FI = MFI->CreateFixedObject(ArgSize, ArgOffset, false);
|
||||
else
|
||||
FI = MFI->CreateStackObject(ArgSize, Align, false);
|
||||
SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
|
||||
@ -3061,7 +3069,7 @@ PPCTargetLowering::LowerFormalArguments_Darwin(
|
||||
CurArgOffset = CurArgOffset + (4 - ObjSize);
|
||||
}
|
||||
// The value of the object is its address.
|
||||
int FI = MFI->CreateFixedObject(ObjSize, CurArgOffset, true);
|
||||
int FI = MFI->CreateFixedObject(ObjSize, CurArgOffset, false);
|
||||
SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
|
||||
InVals.push_back(FIN);
|
||||
if (ObjSize==1 || ObjSize==2) {
|
||||
@ -8974,6 +8982,12 @@ PPCTargetLowering::getRegForInlineAsmConstraint(const std::string &Constraint,
|
||||
&PPC::G8RCRegClass);
|
||||
}
|
||||
|
||||
// GCC accepts 'cc' as an alias for 'cr0', and we need to do the same.
|
||||
if (!R.second && StringRef("{cc}").equals_lower(Constraint)) {
|
||||
R.first = PPC::CR0;
|
||||
R.second = &PPC::CRRCRegClass;
|
||||
}
|
||||
|
||||
return R;
|
||||
}
|
||||
|
||||
@ -9002,37 +9016,42 @@ void PPCTargetLowering::LowerAsmOperandForConstraint(SDValue Op,
|
||||
case 'P': {
|
||||
ConstantSDNode *CST = dyn_cast<ConstantSDNode>(Op);
|
||||
if (!CST) return; // Must be an immediate to match.
|
||||
unsigned Value = CST->getZExtValue();
|
||||
int64_t Value = CST->getSExtValue();
|
||||
EVT TCVT = MVT::i64; // All constants taken to be 64 bits so that negative
|
||||
// numbers are printed as such.
|
||||
switch (Letter) {
|
||||
default: llvm_unreachable("Unknown constraint letter!");
|
||||
case 'I': // "I" is a signed 16-bit constant.
|
||||
if ((short)Value == (int)Value)
|
||||
Result = DAG.getTargetConstant(Value, Op.getValueType());
|
||||
if (isInt<16>(Value))
|
||||
Result = DAG.getTargetConstant(Value, TCVT);
|
||||
break;
|
||||
case 'J': // "J" is a constant with only the high-order 16 bits nonzero.
|
||||
if (isShiftedUInt<16, 16>(Value))
|
||||
Result = DAG.getTargetConstant(Value, TCVT);
|
||||
break;
|
||||
case 'L': // "L" is a signed 16-bit constant shifted left 16 bits.
|
||||
if ((short)Value == 0)
|
||||
Result = DAG.getTargetConstant(Value, Op.getValueType());
|
||||
if (isShiftedInt<16, 16>(Value))
|
||||
Result = DAG.getTargetConstant(Value, TCVT);
|
||||
break;
|
||||
case 'K': // "K" is a constant with only the low-order 16 bits nonzero.
|
||||
if ((Value >> 16) == 0)
|
||||
Result = DAG.getTargetConstant(Value, Op.getValueType());
|
||||
if (isUInt<16>(Value))
|
||||
Result = DAG.getTargetConstant(Value, TCVT);
|
||||
break;
|
||||
case 'M': // "M" is a constant that is greater than 31.
|
||||
if (Value > 31)
|
||||
Result = DAG.getTargetConstant(Value, Op.getValueType());
|
||||
Result = DAG.getTargetConstant(Value, TCVT);
|
||||
break;
|
||||
case 'N': // "N" is a positive constant that is an exact power of two.
|
||||
if ((int)Value > 0 && isPowerOf2_32(Value))
|
||||
Result = DAG.getTargetConstant(Value, Op.getValueType());
|
||||
if (Value > 0 && isPowerOf2_64(Value))
|
||||
Result = DAG.getTargetConstant(Value, TCVT);
|
||||
break;
|
||||
case 'O': // "O" is the constant zero.
|
||||
if (Value == 0)
|
||||
Result = DAG.getTargetConstant(Value, Op.getValueType());
|
||||
Result = DAG.getTargetConstant(Value, TCVT);
|
||||
break;
|
||||
case 'P': // "P" is a constant whose negation is a signed 16-bit constant.
|
||||
if ((short)-Value == (int)-Value)
|
||||
Result = DAG.getTargetConstant(Value, Op.getValueType());
|
||||
if (isInt<16>(-Value))
|
||||
Result = DAG.getTargetConstant(Value, TCVT);
|
||||
break;
|
||||
}
|
||||
break;
|
||||
|
@ -789,7 +789,7 @@ let canFoldAsLoad = 1, PPC970_Unit = 2 in {
|
||||
def LD : DSForm_1<58, 0, (outs g8rc:$rD), (ins memrix:$src),
|
||||
"ld $rD, $src", IIC_LdStLD,
|
||||
[(set i64:$rD, (aligned4load ixaddr:$src))]>, isPPC64;
|
||||
// The following three definitions are selected for small code model only.
|
||||
// The following four definitions are selected for small code model only.
|
||||
// Otherwise, we need to create two instructions to form a 32-bit offset,
|
||||
// so we have a custom matcher for TOC_ENTRY in PPCDAGToDAGIsel::Select().
|
||||
def LDtoc: Pseudo<(outs g8rc:$rD), (ins tocentry:$disp, g8rc:$reg),
|
||||
@ -804,6 +804,10 @@ def LDtocCPT: Pseudo<(outs g8rc:$rD), (ins tocentry:$disp, g8rc:$reg),
|
||||
"#LDtocCPT",
|
||||
[(set i64:$rD,
|
||||
(PPCtoc_entry tconstpool:$disp, i64:$reg))]>, isPPC64;
|
||||
def LDtocBA: Pseudo<(outs g8rc:$rD), (ins tocentry:$disp, g8rc:$reg),
|
||||
"#LDtocCPT",
|
||||
[(set i64:$rD,
|
||||
(PPCtoc_entry tblockaddress:$disp, i64:$reg))]>, isPPC64;
|
||||
|
||||
let hasSideEffects = 1, isCodeGenOnly = 1, RST = 2, Defs = [X2] in
|
||||
def LDinto_toc: DSForm_1<58, 0, (outs), (ins memrix:$src),
|
||||
|
@ -188,13 +188,6 @@ def CR6 : CR<6, "cr6", [CR6LT, CR6GT, CR6EQ, CR6UN]>, DwarfRegNum<[74, 74]>;
|
||||
def CR7 : CR<7, "cr7", [CR7LT, CR7GT, CR7EQ, CR7UN]>, DwarfRegNum<[75, 75]>;
|
||||
}
|
||||
|
||||
// The full condition-code register. This is not modeled fully, but defined
|
||||
// here primarily, for compatibility with gcc, to allow the inline asm "cc"
|
||||
// clobber specification to work.
|
||||
def CC : PPCReg<"cc">, DwarfRegAlias<CR0> {
|
||||
let Aliases = [CR0, CR1, CR2, CR3, CR4, CR5, CR6, CR7];
|
||||
}
|
||||
|
||||
// Link register
|
||||
def LR : SPR<8, "lr">, DwarfRegNum<[-2, 65]>;
|
||||
//let Aliases = [LR] in
|
||||
@ -308,7 +301,3 @@ def CARRYRC : RegisterClass<"PPC", [i32], 32, (add CARRY)> {
|
||||
let CopyCost = -1;
|
||||
}
|
||||
|
||||
def CCRC : RegisterClass<"PPC", [i32], 32, (add CC)> {
|
||||
let isAllocatable = 0;
|
||||
}
|
||||
|
||||
|
@ -2829,6 +2829,9 @@ bool X86FastISel::FastLowerCall(CallLoweringInfo &CLI) {
|
||||
// VExt has not been implemented, so this should be impossible to reach
|
||||
// for now. However, fallback to Selection DAG isel once implemented.
|
||||
return false;
|
||||
case CCValAssign::AExtUpper:
|
||||
case CCValAssign::SExtUpper:
|
||||
case CCValAssign::ZExtUpper:
|
||||
case CCValAssign::FPExt:
|
||||
llvm_unreachable("Unexpected loc info!");
|
||||
case CCValAssign::Indirect:
|
||||
|
@ -1623,15 +1623,30 @@ LinearFunctionTestReplace(Loop *L,
|
||||
// compare against the post-incremented value, otherwise we must compare
|
||||
// against the preincremented value.
|
||||
if (L->getExitingBlock() == L->getLoopLatch()) {
|
||||
// Add one to the "backedge-taken" count to get the trip count.
|
||||
// This addition may overflow, which is valid as long as the comparison is
|
||||
// truncated to BackedgeTakenCount->getType().
|
||||
IVCount = SE->getAddExpr(BackedgeTakenCount,
|
||||
SE->getConstant(BackedgeTakenCount->getType(), 1));
|
||||
// The BackedgeTaken expression contains the number of times that the
|
||||
// backedge branches to the loop header. This is one less than the
|
||||
// number of times the loop executes, so use the incremented indvar.
|
||||
CmpIndVar = IndVar->getIncomingValueForBlock(L->getExitingBlock());
|
||||
llvm::Value *IncrementedIndvar =
|
||||
IndVar->getIncomingValueForBlock(L->getExitingBlock());
|
||||
const auto *IncrementedIndvarSCEV =
|
||||
cast<SCEVAddRecExpr>(SE->getSCEV(IncrementedIndvar));
|
||||
// It is unsafe to use the incremented indvar if it has a wrapping flag, we
|
||||
// don't want to compare against a poison value. Check the SCEV that
|
||||
// corresponds to the incremented indvar, the SCEVExpander will only insert
|
||||
// flags in the IR if the SCEV originally had wrapping flags.
|
||||
// FIXME: In theory, SCEV could drop flags even though they exist in IR.
|
||||
// A more robust solution would involve getting a new expression for
|
||||
// CmpIndVar by applying non-NSW/NUW AddExprs.
|
||||
if (!ScalarEvolution::maskFlags(IncrementedIndvarSCEV->getNoWrapFlags(),
|
||||
SCEV::FlagNUW | SCEV::FlagNSW)) {
|
||||
// Add one to the "backedge-taken" count to get the trip count.
|
||||
// This addition may overflow, which is valid as long as the comparison is
|
||||
// truncated to BackedgeTakenCount->getType().
|
||||
IVCount =
|
||||
SE->getAddExpr(BackedgeTakenCount,
|
||||
SE->getConstant(BackedgeTakenCount->getType(), 1));
|
||||
CmpIndVar = IncrementedIndvar;
|
||||
}
|
||||
}
|
||||
|
||||
Value *ExitCnt = genLoopLimit(IndVar, IVCount, L, Rewriter, SE);
|
||||
|
@ -27,18 +27,18 @@
|
||||
//
|
||||
// header:
|
||||
// br %cond, label %if.then, label %if.else
|
||||
// / \
|
||||
// / \
|
||||
// / \
|
||||
// + +
|
||||
// + +
|
||||
// + +
|
||||
// if.then: if.else:
|
||||
// %lt = load %addr_l %le = load %addr_l
|
||||
// <use %lt> <use %le>
|
||||
// <...> <...>
|
||||
// store %st, %addr_s store %se, %addr_s
|
||||
// br label %if.end br label %if.end
|
||||
// \ /
|
||||
// \ /
|
||||
// \ /
|
||||
// + +
|
||||
// + +
|
||||
// + +
|
||||
// if.end ("footer"):
|
||||
// <...>
|
||||
//
|
||||
@ -47,16 +47,16 @@
|
||||
// header:
|
||||
// %l = load %addr_l
|
||||
// br %cond, label %if.then, label %if.else
|
||||
// / \
|
||||
// / \
|
||||
// / \
|
||||
// + +
|
||||
// + +
|
||||
// + +
|
||||
// if.then: if.else:
|
||||
// <use %l> <use %l>
|
||||
// <...> <...>
|
||||
// br label %if.end br label %if.end
|
||||
// \ /
|
||||
// \ /
|
||||
// \ /
|
||||
// + +
|
||||
// + +
|
||||
// + +
|
||||
// if.end ("footer"):
|
||||
// %s.sink = phi [%st, if.then], [%se, if.else]
|
||||
// <...>
|
||||
|
@ -2697,7 +2697,10 @@ class AllocaSliceRewriter : public InstVisitor<AllocaSliceRewriter, bool> {
|
||||
// the old pointer, which necessarily must be in the right position to
|
||||
// dominate the PHI.
|
||||
IRBuilderTy PtrBuilder(IRB);
|
||||
PtrBuilder.SetInsertPoint(OldPtr);
|
||||
if (isa<PHINode>(OldPtr))
|
||||
PtrBuilder.SetInsertPoint(OldPtr->getParent()->getFirstInsertionPt());
|
||||
else
|
||||
PtrBuilder.SetInsertPoint(OldPtr);
|
||||
PtrBuilder.SetCurrentDebugLocation(OldPtr->getDebugLoc());
|
||||
|
||||
Value *NewPtr = getNewAllocaSlicePtr(PtrBuilder, OldPtr->getType());
|
||||
|
@ -3357,7 +3357,7 @@ void InnerLoopVectorizer::updateAnalysis() {
|
||||
DT->addNewBlock(LoopMiddleBlock, LoopBypassBlocks[1]);
|
||||
DT->addNewBlock(LoopScalarPreHeader, LoopBypassBlocks[0]);
|
||||
DT->changeImmediateDominator(LoopScalarBody, LoopScalarPreHeader);
|
||||
DT->changeImmediateDominator(LoopExitBlock, LoopMiddleBlock);
|
||||
DT->changeImmediateDominator(LoopExitBlock, LoopBypassBlocks[0]);
|
||||
|
||||
DEBUG(DT->verifyDomTree());
|
||||
}
|
||||
@ -5201,7 +5201,13 @@ LoopVectorizationLegality::isInductionVariable(PHINode *Phi) {
|
||||
return IK_NoInduction;
|
||||
|
||||
assert(PhiTy->isPointerTy() && "The PHI must be a pointer");
|
||||
uint64_t Size = DL->getTypeAllocSize(PhiTy->getPointerElementType());
|
||||
Type *PointerElementType = PhiTy->getPointerElementType();
|
||||
// The pointer stride cannot be determined if the pointer element type is not
|
||||
// sized.
|
||||
if (!PointerElementType->isSized())
|
||||
return IK_NoInduction;
|
||||
|
||||
uint64_t Size = DL->getTypeAllocSize(PointerElementType);
|
||||
if (C->getValue()->equalsInt(Size))
|
||||
return IK_PtrInduction;
|
||||
else if (C->getValue()->equalsInt(0 - Size))
|
||||
|
@ -1,11 +1,11 @@
|
||||
This is a set of individual patches, which contain all the customizations to
|
||||
llvm/clang currently in the FreeBSD base system. These can be applied in
|
||||
alphabetical order to a pristine llvm/clang 3.5.0 release source tree, for
|
||||
alphabetical order to a pristine llvm/clang 3.5.1 release source tree, for
|
||||
example by doing:
|
||||
|
||||
svn co https://llvm.org/svn/llvm-project/llvm/tags/RELEASE_350/final llvm-3.5.0-final
|
||||
svn co https://llvm.org/svn/llvm-project/cfe/tags/RELEASE_350/final llvm-3.5.0-final/tools/clang
|
||||
cd llvm-3.5.0-final
|
||||
svn co https://llvm.org/svn/llvm-project/llvm/tags/RELEASE_351/final llvm-3.5.1-final
|
||||
svn co https://llvm.org/svn/llvm-project/cfe/tags/RELEASE_351/final llvm-3.5.1-final/tools/clang
|
||||
cd llvm-3.5.1-final
|
||||
for p in /usr/src/contrib/llvm/patches/patch-*.diff; do
|
||||
patch -p0 -f -F0 -E -i $p -s || break
|
||||
done
|
||||
|
@ -1,547 +0,0 @@
|
||||
Pull in r213960 from upstream llvm trunk (by Hal Finkel):
|
||||
|
||||
[PowerPC] Support TLS on PPC32/ELF
|
||||
|
||||
Patch by Justin Hibbits!
|
||||
|
||||
Introduced here: http://svnweb.freebsd.org/changeset/base/270147
|
||||
|
||||
Index: lib/Target/PowerPC/PPCISelLowering.h
|
||||
===================================================================
|
||||
--- lib/Target/PowerPC/PPCISelLowering.h
|
||||
+++ lib/Target/PowerPC/PPCISelLowering.h
|
||||
@@ -181,6 +181,10 @@ namespace llvm {
|
||||
/// on PPC32.
|
||||
PPC32_GOT,
|
||||
|
||||
+ /// GPRC = address of _GLOBAL_OFFSET_TABLE_. Used by general dynamic and
|
||||
+ /// local dynamic TLS on PPC32.
|
||||
+ PPC32_PICGOT,
|
||||
+
|
||||
/// G8RC = ADDIS_GOT_TPREL_HA %X2, Symbol - Used by the initial-exec
|
||||
/// TLS model, produces an ADDIS8 instruction that adds the GOT
|
||||
/// base to sym\@got\@tprel\@ha.
|
||||
Index: lib/Target/PowerPC/InstPrinter/PPCInstPrinter.cpp
|
||||
===================================================================
|
||||
--- lib/Target/PowerPC/InstPrinter/PPCInstPrinter.cpp
|
||||
+++ lib/Target/PowerPC/InstPrinter/PPCInstPrinter.cpp
|
||||
@@ -17,6 +17,7 @@
|
||||
#include "llvm/MC/MCExpr.h"
|
||||
#include "llvm/MC/MCInst.h"
|
||||
#include "llvm/MC/MCInstrInfo.h"
|
||||
+#include "llvm/MC/MCSymbol.h"
|
||||
#include "llvm/Support/CommandLine.h"
|
||||
#include "llvm/Support/raw_ostream.h"
|
||||
#include "llvm/Target/TargetOpcodes.h"
|
||||
@@ -308,10 +309,16 @@ void PPCInstPrinter::printMemRegReg(const MCInst *
|
||||
|
||||
void PPCInstPrinter::printTLSCall(const MCInst *MI, unsigned OpNo,
|
||||
raw_ostream &O) {
|
||||
- printBranchOperand(MI, OpNo, O);
|
||||
+ // On PPC64, VariantKind is VK_None, but on PPC32, it's VK_PLT, and it must
|
||||
+ // come at the _end_ of the expression.
|
||||
+ const MCOperand &Op = MI->getOperand(OpNo);
|
||||
+ const MCSymbolRefExpr &refExp = cast<MCSymbolRefExpr>(*Op.getExpr());
|
||||
+ O << refExp.getSymbol().getName();
|
||||
O << '(';
|
||||
printOperand(MI, OpNo+1, O);
|
||||
O << ')';
|
||||
+ if (refExp.getKind() != MCSymbolRefExpr::VK_None)
|
||||
+ O << '@' << MCSymbolRefExpr::getVariantKindName(refExp.getKind());
|
||||
}
|
||||
|
||||
|
||||
Index: lib/Target/PowerPC/PPCInstrInfo.td
|
||||
===================================================================
|
||||
--- lib/Target/PowerPC/PPCInstrInfo.td
|
||||
+++ lib/Target/PowerPC/PPCInstrInfo.td
|
||||
@@ -588,6 +588,12 @@ def tlsreg32 : Operand<i32> {
|
||||
let EncoderMethod = "getTLSRegEncoding";
|
||||
let ParserMatchClass = PPCTLSRegOperand;
|
||||
}
|
||||
+def tlsgd32 : Operand<i32> {}
|
||||
+def tlscall32 : Operand<i32> {
|
||||
+ let PrintMethod = "printTLSCall";
|
||||
+ let MIOperandInfo = (ops calltarget:$func, tlsgd32:$sym);
|
||||
+ let EncoderMethod = "getTLSCallEncoding";
|
||||
+}
|
||||
|
||||
// PowerPC Predicate operand.
|
||||
def pred : Operand<OtherVT> {
|
||||
@@ -1071,6 +1077,8 @@ let isCall = 1, PPC970_Unit = 7, Defs = [LR] in {
|
||||
"bla $func", IIC_BrB, [(PPCcall (i32 imm:$func))]>;
|
||||
|
||||
let isCodeGenOnly = 1 in {
|
||||
+ def BL_TLS : IForm<18, 0, 1, (outs), (ins tlscall32:$func),
|
||||
+ "bl $func", IIC_BrB, []>;
|
||||
def BCCL : BForm<16, 0, 1, (outs), (ins pred:$cond, condbrtarget:$dst),
|
||||
"b${cond:cc}l${cond:pm} ${cond:reg}, $dst">;
|
||||
def BCCLA : BForm<16, 1, 1, (outs), (ins pred:$cond, abscondbrtarget:$dst),
|
||||
@@ -2396,13 +2404,45 @@ def : Pat<(add i32:$in, (PPChi tblockaddress:$g, 0
|
||||
def PPC32GOT: Pseudo<(outs gprc:$rD), (ins), "#PPC32GOT",
|
||||
[(set i32:$rD, (PPCppc32GOT))]>;
|
||||
|
||||
+// Get the _GLOBAL_OFFSET_TABLE_ in PIC mode.
|
||||
+// This uses two output registers, the first as the real output, the second as a
|
||||
+// temporary register, used internally in code generation.
|
||||
+def PPC32PICGOT: Pseudo<(outs gprc:$rD, gprc:$rT), (ins), "#PPC32PICGOT",
|
||||
+ []>, NoEncode<"$rT">;
|
||||
+
|
||||
def LDgotTprelL32: Pseudo<(outs gprc:$rD), (ins s16imm:$disp, gprc_nor0:$reg),
|
||||
- "#LDgotTprelL32",
|
||||
- [(set i32:$rD,
|
||||
- (PPCldGotTprelL tglobaltlsaddr:$disp, i32:$reg))]>;
|
||||
+ "#LDgotTprelL32",
|
||||
+ [(set i32:$rD,
|
||||
+ (PPCldGotTprelL tglobaltlsaddr:$disp, i32:$reg))]>;
|
||||
def : Pat<(PPCaddTls i32:$in, tglobaltlsaddr:$g),
|
||||
(ADD4TLS $in, tglobaltlsaddr:$g)>;
|
||||
|
||||
+def ADDItlsgdL32 : Pseudo<(outs gprc:$rD), (ins gprc_nor0:$reg, s16imm:$disp),
|
||||
+ "#ADDItlsgdL32",
|
||||
+ [(set i32:$rD,
|
||||
+ (PPCaddiTlsgdL i32:$reg, tglobaltlsaddr:$disp))]>;
|
||||
+def GETtlsADDR32 : Pseudo<(outs gprc:$rD), (ins gprc:$reg, tlsgd32:$sym),
|
||||
+ "#GETtlsADDR32",
|
||||
+ [(set i32:$rD,
|
||||
+ (PPCgetTlsAddr i32:$reg, tglobaltlsaddr:$sym))]>;
|
||||
+def ADDItlsldL32 : Pseudo<(outs gprc:$rD), (ins gprc_nor0:$reg, s16imm:$disp),
|
||||
+ "#ADDItlsldL32",
|
||||
+ [(set i32:$rD,
|
||||
+ (PPCaddiTlsldL i32:$reg, tglobaltlsaddr:$disp))]>;
|
||||
+def GETtlsldADDR32 : Pseudo<(outs gprc:$rD), (ins gprc:$reg, tlsgd32:$sym),
|
||||
+ "#GETtlsldADDR32",
|
||||
+ [(set i32:$rD,
|
||||
+ (PPCgetTlsldAddr i32:$reg, tglobaltlsaddr:$sym))]>;
|
||||
+def ADDIdtprelL32 : Pseudo<(outs gprc:$rD), (ins gprc_nor0:$reg, s16imm:$disp),
|
||||
+ "#ADDIdtprelL32",
|
||||
+ [(set i32:$rD,
|
||||
+ (PPCaddiDtprelL i32:$reg, tglobaltlsaddr:$disp))]>;
|
||||
+def ADDISdtprelHA32 : Pseudo<(outs gprc:$rD), (ins gprc_nor0:$reg, s16imm:$disp),
|
||||
+ "#ADDISdtprelHA32",
|
||||
+ [(set i32:$rD,
|
||||
+ (PPCaddisDtprelHA i32:$reg,
|
||||
+ tglobaltlsaddr:$disp))]>;
|
||||
+
|
||||
// Support for Position-independent code
|
||||
def LWZtoc: Pseudo<(outs gprc:$rD), (ins tocentry32:$disp, gprc:$reg),
|
||||
"#LWZtoc",
|
||||
Index: lib/Target/PowerPC/PPCISelLowering.cpp
|
||||
===================================================================
|
||||
--- lib/Target/PowerPC/PPCISelLowering.cpp
|
||||
+++ lib/Target/PowerPC/PPCISelLowering.cpp
|
||||
@@ -1685,47 +1685,61 @@ SDValue PPCTargetLowering::LowerGlobalTLSAddress(S
|
||||
|
||||
if (Model == TLSModel::GeneralDynamic) {
|
||||
SDValue TGA = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, 0);
|
||||
- SDValue GOTReg = DAG.getRegister(PPC::X2, MVT::i64);
|
||||
- SDValue GOTEntryHi = DAG.getNode(PPCISD::ADDIS_TLSGD_HA, dl, PtrVT,
|
||||
- GOTReg, TGA);
|
||||
+ SDValue GOTPtr;
|
||||
+ if (is64bit) {
|
||||
+ SDValue GOTReg = DAG.getRegister(PPC::X2, MVT::i64);
|
||||
+ GOTPtr = DAG.getNode(PPCISD::ADDIS_TLSGD_HA, dl, PtrVT,
|
||||
+ GOTReg, TGA);
|
||||
+ } else {
|
||||
+ GOTPtr = DAG.getNode(PPCISD::PPC32_PICGOT, dl, PtrVT);
|
||||
+ }
|
||||
SDValue GOTEntry = DAG.getNode(PPCISD::ADDI_TLSGD_L, dl, PtrVT,
|
||||
- GOTEntryHi, TGA);
|
||||
+ GOTPtr, TGA);
|
||||
|
||||
// We need a chain node, and don't have one handy. The underlying
|
||||
// call has no side effects, so using the function entry node
|
||||
// suffices.
|
||||
SDValue Chain = DAG.getEntryNode();
|
||||
- Chain = DAG.getCopyToReg(Chain, dl, PPC::X3, GOTEntry);
|
||||
- SDValue ParmReg = DAG.getRegister(PPC::X3, MVT::i64);
|
||||
+ Chain = DAG.getCopyToReg(Chain, dl,
|
||||
+ is64bit ? PPC::X3 : PPC::R3, GOTEntry);
|
||||
+ SDValue ParmReg = DAG.getRegister(is64bit ? PPC::X3 : PPC::R3,
|
||||
+ is64bit ? MVT::i64 : MVT::i32);
|
||||
SDValue TLSAddr = DAG.getNode(PPCISD::GET_TLS_ADDR, dl,
|
||||
PtrVT, ParmReg, TGA);
|
||||
// The return value from GET_TLS_ADDR really is in X3 already, but
|
||||
// some hacks are needed here to tie everything together. The extra
|
||||
// copies dissolve during subsequent transforms.
|
||||
- Chain = DAG.getCopyToReg(Chain, dl, PPC::X3, TLSAddr);
|
||||
- return DAG.getCopyFromReg(Chain, dl, PPC::X3, PtrVT);
|
||||
+ Chain = DAG.getCopyToReg(Chain, dl, is64bit ? PPC::X3 : PPC::R3, TLSAddr);
|
||||
+ return DAG.getCopyFromReg(Chain, dl, is64bit ? PPC::X3 : PPC::R3, PtrVT);
|
||||
}
|
||||
|
||||
if (Model == TLSModel::LocalDynamic) {
|
||||
SDValue TGA = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, 0);
|
||||
- SDValue GOTReg = DAG.getRegister(PPC::X2, MVT::i64);
|
||||
- SDValue GOTEntryHi = DAG.getNode(PPCISD::ADDIS_TLSLD_HA, dl, PtrVT,
|
||||
- GOTReg, TGA);
|
||||
+ SDValue GOTPtr;
|
||||
+ if (is64bit) {
|
||||
+ SDValue GOTReg = DAG.getRegister(PPC::X2, MVT::i64);
|
||||
+ GOTPtr = DAG.getNode(PPCISD::ADDIS_TLSLD_HA, dl, PtrVT,
|
||||
+ GOTReg, TGA);
|
||||
+ } else {
|
||||
+ GOTPtr = DAG.getNode(PPCISD::PPC32_PICGOT, dl, PtrVT);
|
||||
+ }
|
||||
SDValue GOTEntry = DAG.getNode(PPCISD::ADDI_TLSLD_L, dl, PtrVT,
|
||||
- GOTEntryHi, TGA);
|
||||
+ GOTPtr, TGA);
|
||||
|
||||
// We need a chain node, and don't have one handy. The underlying
|
||||
// call has no side effects, so using the function entry node
|
||||
// suffices.
|
||||
SDValue Chain = DAG.getEntryNode();
|
||||
- Chain = DAG.getCopyToReg(Chain, dl, PPC::X3, GOTEntry);
|
||||
- SDValue ParmReg = DAG.getRegister(PPC::X3, MVT::i64);
|
||||
+ Chain = DAG.getCopyToReg(Chain, dl,
|
||||
+ is64bit ? PPC::X3 : PPC::R3, GOTEntry);
|
||||
+ SDValue ParmReg = DAG.getRegister(is64bit ? PPC::X3 : PPC::R3,
|
||||
+ is64bit ? MVT::i64 : MVT::i32);
|
||||
SDValue TLSAddr = DAG.getNode(PPCISD::GET_TLSLD_ADDR, dl,
|
||||
PtrVT, ParmReg, TGA);
|
||||
// The return value from GET_TLSLD_ADDR really is in X3 already, but
|
||||
// some hacks are needed here to tie everything together. The extra
|
||||
// copies dissolve during subsequent transforms.
|
||||
- Chain = DAG.getCopyToReg(Chain, dl, PPC::X3, TLSAddr);
|
||||
+ Chain = DAG.getCopyToReg(Chain, dl, is64bit ? PPC::X3 : PPC::R3, TLSAddr);
|
||||
SDValue DtvOffsetHi = DAG.getNode(PPCISD::ADDIS_DTPREL_HA, dl, PtrVT,
|
||||
Chain, ParmReg, TGA);
|
||||
return DAG.getNode(PPCISD::ADDI_DTPREL_L, dl, PtrVT, DtvOffsetHi, TGA);
|
||||
Index: lib/Target/PowerPC/MCTargetDesc/PPCELFObjectWriter.cpp
|
||||
===================================================================
|
||||
--- lib/Target/PowerPC/MCTargetDesc/PPCELFObjectWriter.cpp
|
||||
+++ lib/Target/PowerPC/MCTargetDesc/PPCELFObjectWriter.cpp
|
||||
@@ -236,7 +236,10 @@ unsigned PPCELFObjectWriter::getRelocTypeInner(con
|
||||
Type = ELF::R_PPC64_DTPREL16_HIGHESTA;
|
||||
break;
|
||||
case MCSymbolRefExpr::VK_PPC_GOT_TLSGD:
|
||||
- Type = ELF::R_PPC64_GOT_TLSGD16;
|
||||
+ if (is64Bit())
|
||||
+ Type = ELF::R_PPC64_GOT_TLSGD16;
|
||||
+ else
|
||||
+ Type = ELF::R_PPC_GOT_TLSGD16;
|
||||
break;
|
||||
case MCSymbolRefExpr::VK_PPC_GOT_TLSGD_LO:
|
||||
Type = ELF::R_PPC64_GOT_TLSGD16_LO;
|
||||
@@ -248,7 +251,10 @@ unsigned PPCELFObjectWriter::getRelocTypeInner(con
|
||||
Type = ELF::R_PPC64_GOT_TLSGD16_HA;
|
||||
break;
|
||||
case MCSymbolRefExpr::VK_PPC_GOT_TLSLD:
|
||||
- Type = ELF::R_PPC64_GOT_TLSLD16;
|
||||
+ if (is64Bit())
|
||||
+ Type = ELF::R_PPC64_GOT_TLSLD16;
|
||||
+ else
|
||||
+ Type = ELF::R_PPC_GOT_TLSLD16;
|
||||
break;
|
||||
case MCSymbolRefExpr::VK_PPC_GOT_TLSLD_LO:
|
||||
Type = ELF::R_PPC64_GOT_TLSLD16_LO;
|
||||
@@ -344,13 +350,22 @@ unsigned PPCELFObjectWriter::getRelocTypeInner(con
|
||||
switch (Modifier) {
|
||||
default: llvm_unreachable("Unsupported Modifier");
|
||||
case MCSymbolRefExpr::VK_PPC_TLSGD:
|
||||
- Type = ELF::R_PPC64_TLSGD;
|
||||
+ if (is64Bit())
|
||||
+ Type = ELF::R_PPC64_TLSGD;
|
||||
+ else
|
||||
+ Type = ELF::R_PPC_TLSGD;
|
||||
break;
|
||||
case MCSymbolRefExpr::VK_PPC_TLSLD:
|
||||
- Type = ELF::R_PPC64_TLSLD;
|
||||
+ if (is64Bit())
|
||||
+ Type = ELF::R_PPC64_TLSLD;
|
||||
+ else
|
||||
+ Type = ELF::R_PPC_TLSLD;
|
||||
break;
|
||||
case MCSymbolRefExpr::VK_PPC_TLS:
|
||||
- Type = ELF::R_PPC64_TLS;
|
||||
+ if (is64Bit())
|
||||
+ Type = ELF::R_PPC64_TLS;
|
||||
+ else
|
||||
+ Type = ELF::R_PPC_TLS;
|
||||
break;
|
||||
}
|
||||
break;
|
||||
Index: lib/Target/PowerPC/PPCAsmPrinter.cpp
|
||||
===================================================================
|
||||
--- lib/Target/PowerPC/PPCAsmPrinter.cpp
|
||||
+++ lib/Target/PowerPC/PPCAsmPrinter.cpp
|
||||
@@ -573,6 +573,34 @@ void PPCAsmPrinter::EmitInstruction(const MachineI
|
||||
return;
|
||||
}
|
||||
|
||||
+ case PPC::PPC32PICGOT: {
|
||||
+ MCSymbol *GOTSymbol = OutContext.GetOrCreateSymbol(StringRef("_GLOBAL_OFFSET_TABLE_"));
|
||||
+ MCSymbol *GOTRef = OutContext.CreateTempSymbol();
|
||||
+ MCSymbol *NextInstr = OutContext.CreateTempSymbol();
|
||||
+
|
||||
+ EmitToStreamer(OutStreamer, MCInstBuilder(PPC::BL)
|
||||
+ // FIXME: We would like an efficient form for this, so we don't have to do
|
||||
+ // a lot of extra uniquing.
|
||||
+ .addExpr(MCSymbolRefExpr::Create(NextInstr, OutContext)));
|
||||
+ const MCExpr *OffsExpr =
|
||||
+ MCBinaryExpr::CreateSub(MCSymbolRefExpr::Create(GOTSymbol, OutContext),
|
||||
+ MCSymbolRefExpr::Create(GOTRef, OutContext),
|
||||
+ OutContext);
|
||||
+ OutStreamer.EmitLabel(GOTRef);
|
||||
+ OutStreamer.EmitValue(OffsExpr, 4);
|
||||
+ OutStreamer.EmitLabel(NextInstr);
|
||||
+ EmitToStreamer(OutStreamer, MCInstBuilder(PPC::MFLR)
|
||||
+ .addReg(MI->getOperand(0).getReg()));
|
||||
+ EmitToStreamer(OutStreamer, MCInstBuilder(PPC::LWZ)
|
||||
+ .addReg(MI->getOperand(1).getReg())
|
||||
+ .addImm(0)
|
||||
+ .addReg(MI->getOperand(0).getReg()));
|
||||
+ EmitToStreamer(OutStreamer, MCInstBuilder(PPC::ADD4)
|
||||
+ .addReg(MI->getOperand(0).getReg())
|
||||
+ .addReg(MI->getOperand(1).getReg())
|
||||
+ .addReg(MI->getOperand(0).getReg()));
|
||||
+ return;
|
||||
+ }
|
||||
case PPC::PPC32GOT: {
|
||||
MCSymbol *GOTSymbol = OutContext.GetOrCreateSymbol(StringRef("_GLOBAL_OFFSET_TABLE_"));
|
||||
const MCExpr *SymGotTlsL =
|
||||
@@ -606,31 +634,43 @@ void PPCAsmPrinter::EmitInstruction(const MachineI
|
||||
.addExpr(SymGotTlsGD));
|
||||
return;
|
||||
}
|
||||
- case PPC::ADDItlsgdL: {
|
||||
+ case PPC::ADDItlsgdL:
|
||||
// Transform: %Xd = ADDItlsgdL %Xs, <ga:@sym>
|
||||
// Into: %Xd = ADDI8 %Xs, sym@got@tlsgd@l
|
||||
- assert(Subtarget.isPPC64() && "Not supported for 32-bit PowerPC");
|
||||
+ case PPC::ADDItlsgdL32: {
|
||||
+ // Transform: %Rd = ADDItlsgdL32 %Rs, <ga:@sym>
|
||||
+ // Into: %Rd = ADDI %Rs, sym@got@tlsgd
|
||||
const MachineOperand &MO = MI->getOperand(2);
|
||||
const GlobalValue *GValue = MO.getGlobal();
|
||||
MCSymbol *MOSymbol = getSymbol(GValue);
|
||||
const MCExpr *SymGotTlsGD =
|
||||
- MCSymbolRefExpr::Create(MOSymbol, MCSymbolRefExpr::VK_PPC_GOT_TLSGD_LO,
|
||||
+ MCSymbolRefExpr::Create(MOSymbol, Subtarget.isPPC64() ?
|
||||
+ MCSymbolRefExpr::VK_PPC_GOT_TLSGD_LO :
|
||||
+ MCSymbolRefExpr::VK_PPC_GOT_TLSGD,
|
||||
OutContext);
|
||||
- EmitToStreamer(OutStreamer, MCInstBuilder(PPC::ADDI8)
|
||||
- .addReg(MI->getOperand(0).getReg())
|
||||
- .addReg(MI->getOperand(1).getReg())
|
||||
- .addExpr(SymGotTlsGD));
|
||||
+ EmitToStreamer(OutStreamer,
|
||||
+ MCInstBuilder(Subtarget.isPPC64() ? PPC::ADDI8 : PPC::ADDI)
|
||||
+ .addReg(MI->getOperand(0).getReg())
|
||||
+ .addReg(MI->getOperand(1).getReg())
|
||||
+ .addExpr(SymGotTlsGD));
|
||||
return;
|
||||
}
|
||||
- case PPC::GETtlsADDR: {
|
||||
+ case PPC::GETtlsADDR:
|
||||
// Transform: %X3 = GETtlsADDR %X3, <ga:@sym>
|
||||
// Into: BL8_NOP_TLS __tls_get_addr(sym@tlsgd)
|
||||
- assert(Subtarget.isPPC64() && "Not supported for 32-bit PowerPC");
|
||||
+ case PPC::GETtlsADDR32: {
|
||||
+ // Transform: %R3 = GETtlsADDR32 %R3, <ga:@sym>
|
||||
+ // Into: BL_TLS __tls_get_addr(sym@tlsgd)@PLT
|
||||
|
||||
StringRef Name = "__tls_get_addr";
|
||||
MCSymbol *TlsGetAddr = OutContext.GetOrCreateSymbol(Name);
|
||||
+ MCSymbolRefExpr::VariantKind Kind = MCSymbolRefExpr::VK_None;
|
||||
+
|
||||
+ if (!Subtarget.isPPC64() && !Subtarget.isDarwin() &&
|
||||
+ TM.getRelocationModel() == Reloc::PIC_)
|
||||
+ Kind = MCSymbolRefExpr::VK_PLT;
|
||||
const MCSymbolRefExpr *TlsRef =
|
||||
- MCSymbolRefExpr::Create(TlsGetAddr, MCSymbolRefExpr::VK_None, OutContext);
|
||||
+ MCSymbolRefExpr::Create(TlsGetAddr, Kind, OutContext);
|
||||
const MachineOperand &MO = MI->getOperand(2);
|
||||
const GlobalValue *GValue = MO.getGlobal();
|
||||
MCSymbol *MOSymbol = getSymbol(GValue);
|
||||
@@ -637,9 +677,11 @@ void PPCAsmPrinter::EmitInstruction(const MachineI
|
||||
const MCExpr *SymVar =
|
||||
MCSymbolRefExpr::Create(MOSymbol, MCSymbolRefExpr::VK_PPC_TLSGD,
|
||||
OutContext);
|
||||
- EmitToStreamer(OutStreamer, MCInstBuilder(PPC::BL8_NOP_TLS)
|
||||
- .addExpr(TlsRef)
|
||||
- .addExpr(SymVar));
|
||||
+ EmitToStreamer(OutStreamer,
|
||||
+ MCInstBuilder(Subtarget.isPPC64() ?
|
||||
+ PPC::BL8_NOP_TLS : PPC::BL_TLS)
|
||||
+ .addExpr(TlsRef)
|
||||
+ .addExpr(SymVar));
|
||||
return;
|
||||
}
|
||||
case PPC::ADDIStlsldHA: {
|
||||
@@ -658,31 +700,44 @@ void PPCAsmPrinter::EmitInstruction(const MachineI
|
||||
.addExpr(SymGotTlsLD));
|
||||
return;
|
||||
}
|
||||
- case PPC::ADDItlsldL: {
|
||||
+ case PPC::ADDItlsldL:
|
||||
// Transform: %Xd = ADDItlsldL %Xs, <ga:@sym>
|
||||
// Into: %Xd = ADDI8 %Xs, sym@got@tlsld@l
|
||||
- assert(Subtarget.isPPC64() && "Not supported for 32-bit PowerPC");
|
||||
+ case PPC::ADDItlsldL32: {
|
||||
+ // Transform: %Rd = ADDItlsldL32 %Rs, <ga:@sym>
|
||||
+ // Into: %Rd = ADDI %Rs, sym@got@tlsld
|
||||
const MachineOperand &MO = MI->getOperand(2);
|
||||
const GlobalValue *GValue = MO.getGlobal();
|
||||
MCSymbol *MOSymbol = getSymbol(GValue);
|
||||
const MCExpr *SymGotTlsLD =
|
||||
- MCSymbolRefExpr::Create(MOSymbol, MCSymbolRefExpr::VK_PPC_GOT_TLSLD_LO,
|
||||
+ MCSymbolRefExpr::Create(MOSymbol, Subtarget.isPPC64() ?
|
||||
+ MCSymbolRefExpr::VK_PPC_GOT_TLSLD_LO :
|
||||
+ MCSymbolRefExpr::VK_PPC_GOT_TLSLD,
|
||||
OutContext);
|
||||
- EmitToStreamer(OutStreamer, MCInstBuilder(PPC::ADDI8)
|
||||
- .addReg(MI->getOperand(0).getReg())
|
||||
- .addReg(MI->getOperand(1).getReg())
|
||||
- .addExpr(SymGotTlsLD));
|
||||
+ EmitToStreamer(OutStreamer,
|
||||
+ MCInstBuilder(Subtarget.isPPC64() ? PPC::ADDI8 : PPC::ADDI)
|
||||
+ .addReg(MI->getOperand(0).getReg())
|
||||
+ .addReg(MI->getOperand(1).getReg())
|
||||
+ .addExpr(SymGotTlsLD));
|
||||
return;
|
||||
}
|
||||
- case PPC::GETtlsldADDR: {
|
||||
+ case PPC::GETtlsldADDR:
|
||||
// Transform: %X3 = GETtlsldADDR %X3, <ga:@sym>
|
||||
// Into: BL8_NOP_TLS __tls_get_addr(sym@tlsld)
|
||||
- assert(Subtarget.isPPC64() && "Not supported for 32-bit PowerPC");
|
||||
+ case PPC::GETtlsldADDR32: {
|
||||
+ // Transform: %R3 = GETtlsldADDR32 %R3, <ga:@sym>
|
||||
+ // Into: BL_TLS __tls_get_addr(sym@tlsld)@PLT
|
||||
|
||||
StringRef Name = "__tls_get_addr";
|
||||
MCSymbol *TlsGetAddr = OutContext.GetOrCreateSymbol(Name);
|
||||
+ MCSymbolRefExpr::VariantKind Kind = MCSymbolRefExpr::VK_None;
|
||||
+
|
||||
+ if (!Subtarget.isPPC64() && !Subtarget.isDarwin() &&
|
||||
+ TM.getRelocationModel() == Reloc::PIC_)
|
||||
+ Kind = MCSymbolRefExpr::VK_PLT;
|
||||
+
|
||||
const MCSymbolRefExpr *TlsRef =
|
||||
- MCSymbolRefExpr::Create(TlsGetAddr, MCSymbolRefExpr::VK_None, OutContext);
|
||||
+ MCSymbolRefExpr::Create(TlsGetAddr, Kind, OutContext);
|
||||
const MachineOperand &MO = MI->getOperand(2);
|
||||
const GlobalValue *GValue = MO.getGlobal();
|
||||
MCSymbol *MOSymbol = getSymbol(GValue);
|
||||
@@ -689,15 +744,19 @@ void PPCAsmPrinter::EmitInstruction(const MachineI
|
||||
const MCExpr *SymVar =
|
||||
MCSymbolRefExpr::Create(MOSymbol, MCSymbolRefExpr::VK_PPC_TLSLD,
|
||||
OutContext);
|
||||
- EmitToStreamer(OutStreamer, MCInstBuilder(PPC::BL8_NOP_TLS)
|
||||
- .addExpr(TlsRef)
|
||||
- .addExpr(SymVar));
|
||||
+ EmitToStreamer(OutStreamer,
|
||||
+ MCInstBuilder(Subtarget.isPPC64() ?
|
||||
+ PPC::BL8_NOP_TLS : PPC::BL_TLS)
|
||||
+ .addExpr(TlsRef)
|
||||
+ .addExpr(SymVar));
|
||||
return;
|
||||
}
|
||||
- case PPC::ADDISdtprelHA: {
|
||||
+ case PPC::ADDISdtprelHA:
|
||||
// Transform: %Xd = ADDISdtprelHA %X3, <ga:@sym>
|
||||
// Into: %Xd = ADDIS8 %X3, sym@dtprel@ha
|
||||
- assert(Subtarget.isPPC64() && "Not supported for 32-bit PowerPC");
|
||||
+ case PPC::ADDISdtprelHA32: {
|
||||
+ // Transform: %Rd = ADDISdtprelHA32 %R3, <ga:@sym>
|
||||
+ // Into: %Rd = ADDIS %R3, sym@dtprel@ha
|
||||
const MachineOperand &MO = MI->getOperand(2);
|
||||
const GlobalValue *GValue = MO.getGlobal();
|
||||
MCSymbol *MOSymbol = getSymbol(GValue);
|
||||
@@ -704,16 +763,19 @@ void PPCAsmPrinter::EmitInstruction(const MachineI
|
||||
const MCExpr *SymDtprel =
|
||||
MCSymbolRefExpr::Create(MOSymbol, MCSymbolRefExpr::VK_PPC_DTPREL_HA,
|
||||
OutContext);
|
||||
- EmitToStreamer(OutStreamer, MCInstBuilder(PPC::ADDIS8)
|
||||
- .addReg(MI->getOperand(0).getReg())
|
||||
- .addReg(PPC::X3)
|
||||
- .addExpr(SymDtprel));
|
||||
+ EmitToStreamer(OutStreamer,
|
||||
+ MCInstBuilder(Subtarget.isPPC64() ? PPC::ADDIS8 : PPC::ADDIS)
|
||||
+ .addReg(MI->getOperand(0).getReg())
|
||||
+ .addReg(Subtarget.isPPC64() ? PPC::X3 : PPC::R3)
|
||||
+ .addExpr(SymDtprel));
|
||||
return;
|
||||
}
|
||||
- case PPC::ADDIdtprelL: {
|
||||
+ case PPC::ADDIdtprelL:
|
||||
// Transform: %Xd = ADDIdtprelL %Xs, <ga:@sym>
|
||||
// Into: %Xd = ADDI8 %Xs, sym@dtprel@l
|
||||
- assert(Subtarget.isPPC64() && "Not supported for 32-bit PowerPC");
|
||||
+ case PPC::ADDIdtprelL32: {
|
||||
+ // Transform: %Rd = ADDIdtprelL32 %Rs, <ga:@sym>
|
||||
+ // Into: %Rd = ADDI %Rs, sym@dtprel@l
|
||||
const MachineOperand &MO = MI->getOperand(2);
|
||||
const GlobalValue *GValue = MO.getGlobal();
|
||||
MCSymbol *MOSymbol = getSymbol(GValue);
|
||||
@@ -720,10 +782,11 @@ void PPCAsmPrinter::EmitInstruction(const MachineI
|
||||
const MCExpr *SymDtprel =
|
||||
MCSymbolRefExpr::Create(MOSymbol, MCSymbolRefExpr::VK_PPC_DTPREL_LO,
|
||||
OutContext);
|
||||
- EmitToStreamer(OutStreamer, MCInstBuilder(PPC::ADDI8)
|
||||
- .addReg(MI->getOperand(0).getReg())
|
||||
- .addReg(MI->getOperand(1).getReg())
|
||||
- .addExpr(SymDtprel));
|
||||
+ EmitToStreamer(OutStreamer,
|
||||
+ MCInstBuilder(Subtarget.isPPC64() ? PPC::ADDI8 : PPC::ADDI)
|
||||
+ .addReg(MI->getOperand(0).getReg())
|
||||
+ .addReg(MI->getOperand(1).getReg())
|
||||
+ .addExpr(SymDtprel));
|
||||
return;
|
||||
}
|
||||
case PPC::MFOCRF:
|
||||
Index: lib/Target/PowerPC/PPCISelDAGToDAG.cpp
|
||||
===================================================================
|
||||
--- lib/Target/PowerPC/PPCISelDAGToDAG.cpp
|
||||
+++ lib/Target/PowerPC/PPCISelDAGToDAG.cpp
|
||||
@@ -1473,6 +1473,12 @@ SDNode *PPCDAGToDAGISel::Select(SDNode *N) {
|
||||
return CurDAG->getMachineNode(PPC::ADDItocL, dl, MVT::i64,
|
||||
SDValue(Tmp, 0), GA);
|
||||
}
|
||||
+ case PPCISD::PPC32_PICGOT: {
|
||||
+ // Generate a PIC-safe GOT reference.
|
||||
+ assert(!PPCSubTarget->isPPC64() && PPCSubTarget->isSVR4ABI() &&
|
||||
+ "PPCISD::PPC32_PICGOT is only supported for 32-bit SVR4");
|
||||
+ return CurDAG->SelectNodeTo(N, PPC::PPC32PICGOT, PPCLowering->getPointerTy(), MVT::i32);
|
||||
+ }
|
||||
case PPCISD::VADD_SPLAT: {
|
||||
// This expands into one of three sequences, depending on whether
|
||||
// the first operand is odd or even, positive or negative.
|
||||
Index: test/CodeGen/PowerPC/tls-pic.ll
|
||||
===================================================================
|
||||
--- test/CodeGen/PowerPC/tls-pic.ll
|
||||
+++ test/CodeGen/PowerPC/tls-pic.ll
|
||||
@@ -1,5 +1,7 @@
|
||||
; RUN: llc -march=ppc64 -mcpu=pwr7 -O0 -relocation-model=pic < %s | FileCheck -check-prefix=OPT0 %s
|
||||
; RUN: llc -march=ppc64 -mcpu=pwr7 -O1 -relocation-model=pic < %s | FileCheck -check-prefix=OPT1 %s
|
||||
+; RUN: llc -march=ppc32 -O0 -relocation-model=pic < %s | FileCheck -check-prefix=OPT0-32 %s
|
||||
+; RUN: llc -march=ppc32 -O1 -relocation-model=pic < %s | FileCheck -check-prefix=OPT1-32 %s
|
||||
|
||||
target triple = "powerpc64-unknown-linux-gnu"
|
||||
; Test correct assembly code generation for thread-local storage using
|
||||
@@ -22,6 +24,16 @@ entry:
|
||||
; OPT0-NEXT: nop
|
||||
; OPT0: addis [[REG2:[0-9]+]], 3, a@dtprel@ha
|
||||
; OPT0-NEXT: addi {{[0-9]+}}, [[REG2]], a@dtprel@l
|
||||
+; OPT0-32-LABEL: main
|
||||
+; OPT0-32: addi {{[0-9]+}}, {{[0-9]+}}, a@got@tlsld
|
||||
+; OPT0-32: bl __tls_get_addr(a@tlsld)@PLT
|
||||
+; OPT0-32: addis [[REG:[0-9]+]], 3, a@dtprel@ha
|
||||
+; OPT0-32-NEXT: addi {{[0-9]+}}, [[REG]], a@dtprel@l
|
||||
+; OPT1-32-LABEL: main
|
||||
+; OPT1-32: addi 3, {{[0-9]+}}, a@got@tlsld
|
||||
+; OPT1-32: bl __tls_get_addr(a@tlsld)@PLT
|
||||
+; OPT1-32: addis [[REG:[0-9]+]], 3, a@dtprel@ha
|
||||
+; OPT1-32-NEXT: addi {{[0-9]+}}, [[REG]], a@dtprel@l
|
||||
|
||||
; Test peephole optimization for thread-local storage using the
|
||||
; local dynamic model.
|
||||
@@ -52,4 +64,6 @@ entry:
|
||||
; OPT1-NEXT: addi 3, [[REG]], a2@got@tlsgd@l
|
||||
; OPT1: bl __tls_get_addr(a2@tlsgd)
|
||||
; OPT1-NEXT: nop
|
||||
-
|
||||
+; OPT1-32-LABEL: main2
|
||||
+; OPT1-32: addi 3, {{[0-9]+}}, a2@got@tlsgd
|
||||
+; OPT1-32: bl __tls_get_addr(a2@tlsgd)@PLT
|
@ -1,617 +0,0 @@
|
||||
Pull in r223170 from upstream llvm trunk (by Michael Zolotukhin):
|
||||
|
||||
Apply loop-rotate to several vectorizer tests.
|
||||
|
||||
Such loops shouldn't be vectorized due to the loops form.
|
||||
After applying loop-rotate (+simplifycfg) the tests again start to check
|
||||
what they are intended to check.
|
||||
|
||||
Pull in r223171 from upstream llvm trunk (by Michael Zolotukhin):
|
||||
|
||||
PR21302. Vectorize only bottom-tested loops.
|
||||
|
||||
rdar://problem/18886083
|
||||
|
||||
This fixes a bug in the llvm vectorizer, which could sometimes cause
|
||||
vectorized loops to perform an additional iteration, leading to possible
|
||||
buffer overruns. Symptoms of this, which are usually segfaults, were
|
||||
first noticed when building gcc ports, here:
|
||||
|
||||
https://lists.freebsd.org/pipermail/freebsd-ports/2014-September/095466.html
|
||||
https://lists.freebsd.org/pipermail/freebsd-toolchain/2014-September/001211.html
|
||||
|
||||
Note: because this is applied on top of llvm/clang 3.5.0, this fix is
|
||||
slightly different from the one just checked into head in r275633.
|
||||
|
||||
Introduced here: http://svnweb.freebsd.org/changeset/base/275635
|
||||
|
||||
Index: lib/Transforms/Vectorize/LoopVectorize.cpp
|
||||
===================================================================
|
||||
--- lib/Transforms/Vectorize/LoopVectorize.cpp
|
||||
+++ lib/Transforms/Vectorize/LoopVectorize.cpp
|
||||
@@ -3466,6 +3466,15 @@ bool LoopVectorizationLegality::canVectorize() {
|
||||
return false;
|
||||
}
|
||||
|
||||
+ // We only handle bottom-tested loops, i.e. loop in which the condition is
|
||||
+ // checked at the end of each iteration. With that we can assume that all
|
||||
+ // instructions in the loop are executed the same number of times.
|
||||
+ if (TheLoop->getExitingBlock() != TheLoop->getLoopLatch()) {
|
||||
+ emitAnalysis(
|
||||
+ Report() << "loop control flow is not understood by vectorizer");
|
||||
+ return false;
|
||||
+ }
|
||||
+
|
||||
// We need to have a loop header.
|
||||
DEBUG(dbgs() << "LV: Found a loop: " <<
|
||||
TheLoop->getHeader()->getName() << '\n');
|
||||
Index: test/Transforms/LoopVectorize/vect.stats.ll
|
||||
===================================================================
|
||||
--- test/Transforms/LoopVectorize/vect.stats.ll
|
||||
+++ test/Transforms/LoopVectorize/vect.stats.ll
|
||||
@@ -13,53 +13,47 @@ target triple = "x86_64-unknown-linux-gnu"
|
||||
|
||||
define void @vectorized(float* nocapture %a, i64 %size) {
|
||||
entry:
|
||||
- %cmp1 = icmp sgt i64 %size, 0
|
||||
- br i1 %cmp1, label %for.header, label %for.end
|
||||
+ %cmp1 = icmp sle i64 %size, 0
|
||||
+ %cmp21 = icmp sgt i64 0, %size
|
||||
+ %or.cond = or i1 %cmp1, %cmp21
|
||||
+ br i1 %or.cond, label %for.end, label %for.body
|
||||
|
||||
-for.header:
|
||||
- %indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
|
||||
- %cmp2 = icmp sgt i64 %indvars.iv, %size
|
||||
- br i1 %cmp2, label %for.end, label %for.body
|
||||
-
|
||||
-for.body:
|
||||
-
|
||||
- %arrayidx = getelementptr inbounds float* %a, i64 %indvars.iv
|
||||
+for.body: ; preds = %entry, %for.body
|
||||
+ %indvars.iv2 = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
|
||||
+ %arrayidx = getelementptr inbounds float* %a, i64 %indvars.iv2
|
||||
%0 = load float* %arrayidx, align 4
|
||||
%mul = fmul float %0, %0
|
||||
store float %mul, float* %arrayidx, align 4
|
||||
+ %indvars.iv.next = add nuw nsw i64 %indvars.iv2, 1
|
||||
+ %cmp2 = icmp sgt i64 %indvars.iv.next, %size
|
||||
+ br i1 %cmp2, label %for.end, label %for.body
|
||||
|
||||
- %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
|
||||
- br label %for.header
|
||||
-
|
||||
-for.end:
|
||||
+for.end: ; preds = %entry, %for.body
|
||||
ret void
|
||||
}
|
||||
|
||||
define void @not_vectorized(float* nocapture %a, i64 %size) {
|
||||
entry:
|
||||
- %cmp1 = icmp sgt i64 %size, 0
|
||||
- br i1 %cmp1, label %for.header, label %for.end
|
||||
+ %cmp1 = icmp sle i64 %size, 0
|
||||
+ %cmp21 = icmp sgt i64 0, %size
|
||||
+ %or.cond = or i1 %cmp1, %cmp21
|
||||
+ br i1 %or.cond, label %for.end, label %for.body
|
||||
|
||||
-for.header:
|
||||
- %indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
|
||||
- %cmp2 = icmp sgt i64 %indvars.iv, %size
|
||||
- br i1 %cmp2, label %for.end, label %for.body
|
||||
-
|
||||
-for.body:
|
||||
-
|
||||
- %0 = add nsw i64 %indvars.iv, -5
|
||||
+for.body: ; preds = %entry, %for.body
|
||||
+ %indvars.iv2 = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
|
||||
+ %0 = add nsw i64 %indvars.iv2, -5
|
||||
%arrayidx = getelementptr inbounds float* %a, i64 %0
|
||||
%1 = load float* %arrayidx, align 4
|
||||
- %2 = add nsw i64 %indvars.iv, 2
|
||||
+ %2 = add nsw i64 %indvars.iv2, 2
|
||||
%arrayidx2 = getelementptr inbounds float* %a, i64 %2
|
||||
%3 = load float* %arrayidx2, align 4
|
||||
%mul = fmul float %1, %3
|
||||
- %arrayidx4 = getelementptr inbounds float* %a, i64 %indvars.iv
|
||||
+ %arrayidx4 = getelementptr inbounds float* %a, i64 %indvars.iv2
|
||||
store float %mul, float* %arrayidx4, align 4
|
||||
+ %indvars.iv.next = add nuw nsw i64 %indvars.iv2, 1
|
||||
+ %cmp2 = icmp sgt i64 %indvars.iv.next, %size
|
||||
+ br i1 %cmp2, label %for.end, label %for.body
|
||||
|
||||
- %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
|
||||
- br label %for.header
|
||||
-
|
||||
-for.end:
|
||||
+for.end: ; preds = %entry, %for.body
|
||||
ret void
|
||||
-}
|
||||
\ No newline at end of file
|
||||
+}
|
||||
Index: test/Transforms/LoopVectorize/loop-form.ll
|
||||
===================================================================
|
||||
--- test/Transforms/LoopVectorize/loop-form.ll
|
||||
+++ test/Transforms/LoopVectorize/loop-form.ll
|
||||
@@ -0,0 +1,31 @@
|
||||
+; RUN: opt -S -loop-vectorize < %s | FileCheck %s
|
||||
+target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128"
|
||||
+
|
||||
+; Check that we vectorize only bottom-tested loops.
|
||||
+; This is a reduced testcase from PR21302.
|
||||
+;
|
||||
+; rdar://problem/18886083
|
||||
+
|
||||
+%struct.X = type { i32, i16 }
|
||||
+; CHECK-LABEL: @foo(
|
||||
+; CHECK-NOT: vector.body
|
||||
+
|
||||
+define void @foo(i32 %n) {
|
||||
+entry:
|
||||
+ br label %for.cond
|
||||
+
|
||||
+for.cond:
|
||||
+ %i = phi i32 [ 0, %entry ], [ %inc, %for.body ]
|
||||
+ %cmp = icmp slt i32 %i, %n
|
||||
+ br i1 %cmp, label %for.body, label %if.end
|
||||
+
|
||||
+for.body:
|
||||
+ %iprom = sext i32 %i to i64
|
||||
+ %b = getelementptr inbounds %struct.X* undef, i64 %iprom, i32 1
|
||||
+ store i16 0, i16* %b, align 4
|
||||
+ %inc = add nsw i32 %i, 1
|
||||
+ br label %for.cond
|
||||
+
|
||||
+if.end:
|
||||
+ ret void
|
||||
+}
|
||||
Index: test/Transforms/LoopVectorize/runtime-check-readonly-address-space.ll
|
||||
===================================================================
|
||||
--- test/Transforms/LoopVectorize/runtime-check-readonly-address-space.ll
|
||||
+++ test/Transforms/LoopVectorize/runtime-check-readonly-address-space.ll
|
||||
@@ -8,26 +8,24 @@ define void @add_ints_1_1_1(i32 addrspace(1)* %a,
|
||||
; CHECK-LABEL: @add_ints_1_1_1(
|
||||
; CHECK: <4 x i32>
|
||||
; CHECK: ret
|
||||
+
|
||||
entry:
|
||||
- br label %for.cond
|
||||
+ br label %for.body
|
||||
|
||||
-for.cond: ; preds = %for.body, %entry
|
||||
- %i.0 = phi i64 [ 0, %entry ], [ %inc, %for.body ]
|
||||
- %cmp = icmp ult i64 %i.0, 200
|
||||
- br i1 %cmp, label %for.body, label %for.end
|
||||
-
|
||||
-for.body: ; preds = %for.cond
|
||||
- %arrayidx = getelementptr inbounds i32 addrspace(1)* %b, i64 %i.0
|
||||
+for.body: ; preds = %entry, %for.body
|
||||
+ %i.01 = phi i64 [ 0, %entry ], [ %inc, %for.body ]
|
||||
+ %arrayidx = getelementptr inbounds i32 addrspace(1)* %b, i64 %i.01
|
||||
%0 = load i32 addrspace(1)* %arrayidx, align 4
|
||||
- %arrayidx1 = getelementptr inbounds i32 addrspace(1)* %c, i64 %i.0
|
||||
+ %arrayidx1 = getelementptr inbounds i32 addrspace(1)* %c, i64 %i.01
|
||||
%1 = load i32 addrspace(1)* %arrayidx1, align 4
|
||||
%add = add nsw i32 %0, %1
|
||||
- %arrayidx2 = getelementptr inbounds i32 addrspace(1)* %a, i64 %i.0
|
||||
+ %arrayidx2 = getelementptr inbounds i32 addrspace(1)* %a, i64 %i.01
|
||||
store i32 %add, i32 addrspace(1)* %arrayidx2, align 4
|
||||
- %inc = add i64 %i.0, 1
|
||||
- br label %for.cond
|
||||
+ %inc = add i64 %i.01, 1
|
||||
+ %cmp = icmp ult i64 %inc, 200
|
||||
+ br i1 %cmp, label %for.body, label %for.end
|
||||
|
||||
-for.end: ; preds = %for.cond
|
||||
+for.end: ; preds = %for.body
|
||||
ret void
|
||||
}
|
||||
|
||||
@@ -35,26 +33,24 @@ define void @add_ints_as_1_0_0(i32 addrspace(1)* %
|
||||
; CHECK-LABEL: @add_ints_as_1_0_0(
|
||||
; CHECK-NOT: <4 x i32>
|
||||
; CHECK: ret
|
||||
+
|
||||
entry:
|
||||
- br label %for.cond
|
||||
+ br label %for.body
|
||||
|
||||
-for.cond: ; preds = %for.body, %entry
|
||||
- %i.0 = phi i64 [ 0, %entry ], [ %inc, %for.body ]
|
||||
- %cmp = icmp ult i64 %i.0, 200
|
||||
- br i1 %cmp, label %for.body, label %for.end
|
||||
-
|
||||
-for.body: ; preds = %for.cond
|
||||
- %arrayidx = getelementptr inbounds i32* %b, i64 %i.0
|
||||
+for.body: ; preds = %entry, %for.body
|
||||
+ %i.01 = phi i64 [ 0, %entry ], [ %inc, %for.body ]
|
||||
+ %arrayidx = getelementptr inbounds i32* %b, i64 %i.01
|
||||
%0 = load i32* %arrayidx, align 4
|
||||
- %arrayidx1 = getelementptr inbounds i32* %c, i64 %i.0
|
||||
+ %arrayidx1 = getelementptr inbounds i32* %c, i64 %i.01
|
||||
%1 = load i32* %arrayidx1, align 4
|
||||
%add = add nsw i32 %0, %1
|
||||
- %arrayidx2 = getelementptr inbounds i32 addrspace(1)* %a, i64 %i.0
|
||||
+ %arrayidx2 = getelementptr inbounds i32 addrspace(1)* %a, i64 %i.01
|
||||
store i32 %add, i32 addrspace(1)* %arrayidx2, align 4
|
||||
- %inc = add i64 %i.0, 1
|
||||
- br label %for.cond
|
||||
+ %inc = add i64 %i.01, 1
|
||||
+ %cmp = icmp ult i64 %inc, 200
|
||||
+ br i1 %cmp, label %for.body, label %for.end
|
||||
|
||||
-for.end: ; preds = %for.cond
|
||||
+for.end: ; preds = %for.body
|
||||
ret void
|
||||
}
|
||||
|
||||
@@ -62,26 +58,24 @@ define void @add_ints_as_0_1_0(i32* %a, i32 addrsp
|
||||
; CHECK-LABEL: @add_ints_as_0_1_0(
|
||||
; CHECK-NOT: <4 x i32>
|
||||
; CHECK: ret
|
||||
+
|
||||
entry:
|
||||
- br label %for.cond
|
||||
+ br label %for.body
|
||||
|
||||
-for.cond: ; preds = %for.body, %entry
|
||||
- %i.0 = phi i64 [ 0, %entry ], [ %inc, %for.body ]
|
||||
- %cmp = icmp ult i64 %i.0, 200
|
||||
- br i1 %cmp, label %for.body, label %for.end
|
||||
-
|
||||
-for.body: ; preds = %for.cond
|
||||
- %arrayidx = getelementptr inbounds i32 addrspace(1)* %b, i64 %i.0
|
||||
+for.body: ; preds = %entry, %for.body
|
||||
+ %i.01 = phi i64 [ 0, %entry ], [ %inc, %for.body ]
|
||||
+ %arrayidx = getelementptr inbounds i32 addrspace(1)* %b, i64 %i.01
|
||||
%0 = load i32 addrspace(1)* %arrayidx, align 4
|
||||
- %arrayidx1 = getelementptr inbounds i32* %c, i64 %i.0
|
||||
+ %arrayidx1 = getelementptr inbounds i32* %c, i64 %i.01
|
||||
%1 = load i32* %arrayidx1, align 4
|
||||
%add = add nsw i32 %0, %1
|
||||
- %arrayidx2 = getelementptr inbounds i32* %a, i64 %i.0
|
||||
+ %arrayidx2 = getelementptr inbounds i32* %a, i64 %i.01
|
||||
store i32 %add, i32* %arrayidx2, align 4
|
||||
- %inc = add i64 %i.0, 1
|
||||
- br label %for.cond
|
||||
+ %inc = add i64 %i.01, 1
|
||||
+ %cmp = icmp ult i64 %inc, 200
|
||||
+ br i1 %cmp, label %for.body, label %for.end
|
||||
|
||||
-for.end: ; preds = %for.cond
|
||||
+for.end: ; preds = %for.body
|
||||
ret void
|
||||
}
|
||||
|
||||
@@ -89,26 +83,24 @@ define void @add_ints_as_0_1_1(i32* %a, i32 addrsp
|
||||
; CHECK-LABEL: @add_ints_as_0_1_1(
|
||||
; CHECK-NOT: <4 x i32>
|
||||
; CHECK: ret
|
||||
+
|
||||
entry:
|
||||
- br label %for.cond
|
||||
+ br label %for.body
|
||||
|
||||
-for.cond: ; preds = %for.body, %entry
|
||||
- %i.0 = phi i64 [ 0, %entry ], [ %inc, %for.body ]
|
||||
- %cmp = icmp ult i64 %i.0, 200
|
||||
- br i1 %cmp, label %for.body, label %for.end
|
||||
-
|
||||
-for.body: ; preds = %for.cond
|
||||
- %arrayidx = getelementptr inbounds i32 addrspace(1)* %b, i64 %i.0
|
||||
+for.body: ; preds = %entry, %for.body
|
||||
+ %i.01 = phi i64 [ 0, %entry ], [ %inc, %for.body ]
|
||||
+ %arrayidx = getelementptr inbounds i32 addrspace(1)* %b, i64 %i.01
|
||||
%0 = load i32 addrspace(1)* %arrayidx, align 4
|
||||
- %arrayidx1 = getelementptr inbounds i32 addrspace(1)* %c, i64 %i.0
|
||||
+ %arrayidx1 = getelementptr inbounds i32 addrspace(1)* %c, i64 %i.01
|
||||
%1 = load i32 addrspace(1)* %arrayidx1, align 4
|
||||
%add = add nsw i32 %0, %1
|
||||
- %arrayidx2 = getelementptr inbounds i32* %a, i64 %i.0
|
||||
+ %arrayidx2 = getelementptr inbounds i32* %a, i64 %i.01
|
||||
store i32 %add, i32* %arrayidx2, align 4
|
||||
- %inc = add i64 %i.0, 1
|
||||
- br label %for.cond
|
||||
+ %inc = add i64 %i.01, 1
|
||||
+ %cmp = icmp ult i64 %inc, 200
|
||||
+ br i1 %cmp, label %for.body, label %for.end
|
||||
|
||||
-for.end: ; preds = %for.cond
|
||||
+for.end: ; preds = %for.body
|
||||
ret void
|
||||
}
|
||||
|
||||
@@ -116,26 +108,24 @@ define void @add_ints_as_0_1_2(i32* %a, i32 addrsp
|
||||
; CHECK-LABEL: @add_ints_as_0_1_2(
|
||||
; CHECK-NOT: <4 x i32>
|
||||
; CHECK: ret
|
||||
+
|
||||
entry:
|
||||
- br label %for.cond
|
||||
+ br label %for.body
|
||||
|
||||
-for.cond: ; preds = %for.body, %entry
|
||||
- %i.0 = phi i64 [ 0, %entry ], [ %inc, %for.body ]
|
||||
- %cmp = icmp ult i64 %i.0, 200
|
||||
- br i1 %cmp, label %for.body, label %for.end
|
||||
-
|
||||
-for.body: ; preds = %for.cond
|
||||
- %arrayidx = getelementptr inbounds i32 addrspace(1)* %b, i64 %i.0
|
||||
+for.body: ; preds = %entry, %for.body
|
||||
+ %i.01 = phi i64 [ 0, %entry ], [ %inc, %for.body ]
|
||||
+ %arrayidx = getelementptr inbounds i32 addrspace(1)* %b, i64 %i.01
|
||||
%0 = load i32 addrspace(1)* %arrayidx, align 4
|
||||
- %arrayidx1 = getelementptr inbounds i32 addrspace(2)* %c, i64 %i.0
|
||||
+ %arrayidx1 = getelementptr inbounds i32 addrspace(2)* %c, i64 %i.01
|
||||
%1 = load i32 addrspace(2)* %arrayidx1, align 4
|
||||
%add = add nsw i32 %0, %1
|
||||
- %arrayidx2 = getelementptr inbounds i32* %a, i64 %i.0
|
||||
+ %arrayidx2 = getelementptr inbounds i32* %a, i64 %i.01
|
||||
store i32 %add, i32* %arrayidx2, align 4
|
||||
- %inc = add i64 %i.0, 1
|
||||
- br label %for.cond
|
||||
+ %inc = add i64 %i.01, 1
|
||||
+ %cmp = icmp ult i64 %inc, 200
|
||||
+ br i1 %cmp, label %for.body, label %for.end
|
||||
|
||||
-for.end: ; preds = %for.cond
|
||||
+for.end: ; preds = %for.body
|
||||
ret void
|
||||
}
|
||||
|
||||
Index: test/Transforms/LoopVectorize/runtime-check-address-space.ll
|
||||
===================================================================
|
||||
--- test/Transforms/LoopVectorize/runtime-check-address-space.ll
|
||||
+++ test/Transforms/LoopVectorize/runtime-check-address-space.ll
|
||||
@@ -31,25 +31,23 @@ define void @foo(i32 addrspace(1)* %a, i32 addrspa
|
||||
; CHECK: ret
|
||||
|
||||
entry:
|
||||
- br label %for.cond
|
||||
+ %cmp1 = icmp slt i32 0, %n
|
||||
+ br i1 %cmp1, label %for.body, label %for.end
|
||||
|
||||
-for.cond: ; preds = %for.body, %entry
|
||||
- %i.0 = phi i32 [ 0, %entry ], [ %inc, %for.body ]
|
||||
- %cmp = icmp slt i32 %i.0, %n
|
||||
- br i1 %cmp, label %for.body, label %for.end
|
||||
-
|
||||
-for.body: ; preds = %for.cond
|
||||
- %idxprom = sext i32 %i.0 to i64
|
||||
+for.body: ; preds = %entry, %for.body
|
||||
+ %i.02 = phi i32 [ %inc, %for.body ], [ 0, %entry ]
|
||||
+ %idxprom = sext i32 %i.02 to i64
|
||||
%arrayidx = getelementptr inbounds i32 addrspace(1)* %b, i64 %idxprom
|
||||
%0 = load i32 addrspace(1)* %arrayidx, align 4
|
||||
%mul = mul nsw i32 %0, 3
|
||||
- %idxprom1 = sext i32 %i.0 to i64
|
||||
+ %idxprom1 = sext i32 %i.02 to i64
|
||||
%arrayidx2 = getelementptr inbounds i32 addrspace(1)* %a, i64 %idxprom1
|
||||
store i32 %mul, i32 addrspace(1)* %arrayidx2, align 4
|
||||
- %inc = add nsw i32 %i.0, 1
|
||||
- br label %for.cond
|
||||
+ %inc = add nsw i32 %i.02, 1
|
||||
+ %cmp = icmp slt i32 %inc, %n
|
||||
+ br i1 %cmp, label %for.body, label %for.end
|
||||
|
||||
-for.end: ; preds = %for.cond
|
||||
+for.end: ; preds = %for.body, %entry
|
||||
ret void
|
||||
}
|
||||
|
||||
@@ -60,25 +58,23 @@ define void @bar0(i32* %a, i32 addrspace(1)* %b, i
|
||||
; CHECK: ret
|
||||
|
||||
entry:
|
||||
- br label %for.cond
|
||||
+ %cmp1 = icmp slt i32 0, %n
|
||||
+ br i1 %cmp1, label %for.body, label %for.end
|
||||
|
||||
-for.cond: ; preds = %for.body, %entry
|
||||
- %i.0 = phi i32 [ 0, %entry ], [ %inc, %for.body ]
|
||||
- %cmp = icmp slt i32 %i.0, %n
|
||||
- br i1 %cmp, label %for.body, label %for.end
|
||||
-
|
||||
-for.body: ; preds = %for.cond
|
||||
- %idxprom = sext i32 %i.0 to i64
|
||||
+for.body: ; preds = %entry, %for.body
|
||||
+ %i.02 = phi i32 [ %inc, %for.body ], [ 0, %entry ]
|
||||
+ %idxprom = sext i32 %i.02 to i64
|
||||
%arrayidx = getelementptr inbounds i32 addrspace(1)* %b, i64 %idxprom
|
||||
%0 = load i32 addrspace(1)* %arrayidx, align 4
|
||||
%mul = mul nsw i32 %0, 3
|
||||
- %idxprom1 = sext i32 %i.0 to i64
|
||||
+ %idxprom1 = sext i32 %i.02 to i64
|
||||
%arrayidx2 = getelementptr inbounds i32* %a, i64 %idxprom1
|
||||
store i32 %mul, i32* %arrayidx2, align 4
|
||||
- %inc = add nsw i32 %i.0, 1
|
||||
- br label %for.cond
|
||||
+ %inc = add nsw i32 %i.02, 1
|
||||
+ %cmp = icmp slt i32 %inc, %n
|
||||
+ br i1 %cmp, label %for.body, label %for.end
|
||||
|
||||
-for.end: ; preds = %for.cond
|
||||
+for.end: ; preds = %for.body, %entry
|
||||
ret void
|
||||
}
|
||||
|
||||
@@ -89,25 +85,23 @@ define void @bar1(i32 addrspace(1)* %a, i32* %b, i
|
||||
; CHECK: ret
|
||||
|
||||
entry:
|
||||
- br label %for.cond
|
||||
+ %cmp1 = icmp slt i32 0, %n
|
||||
+ br i1 %cmp1, label %for.body, label %for.end
|
||||
|
||||
-for.cond: ; preds = %for.body, %entry
|
||||
- %i.0 = phi i32 [ 0, %entry ], [ %inc, %for.body ]
|
||||
- %cmp = icmp slt i32 %i.0, %n
|
||||
- br i1 %cmp, label %for.body, label %for.end
|
||||
-
|
||||
-for.body: ; preds = %for.cond
|
||||
- %idxprom = sext i32 %i.0 to i64
|
||||
+for.body: ; preds = %entry, %for.body
|
||||
+ %i.02 = phi i32 [ %inc, %for.body ], [ 0, %entry ]
|
||||
+ %idxprom = sext i32 %i.02 to i64
|
||||
%arrayidx = getelementptr inbounds i32* %b, i64 %idxprom
|
||||
%0 = load i32* %arrayidx, align 4
|
||||
%mul = mul nsw i32 %0, 3
|
||||
- %idxprom1 = sext i32 %i.0 to i64
|
||||
+ %idxprom1 = sext i32 %i.02 to i64
|
||||
%arrayidx2 = getelementptr inbounds i32 addrspace(1)* %a, i64 %idxprom1
|
||||
store i32 %mul, i32 addrspace(1)* %arrayidx2, align 4
|
||||
- %inc = add nsw i32 %i.0, 1
|
||||
- br label %for.cond
|
||||
+ %inc = add nsw i32 %i.02, 1
|
||||
+ %cmp = icmp slt i32 %inc, %n
|
||||
+ br i1 %cmp, label %for.body, label %for.end
|
||||
|
||||
-for.end: ; preds = %for.cond
|
||||
+for.end: ; preds = %for.body, %entry
|
||||
ret void
|
||||
}
|
||||
|
||||
@@ -119,25 +113,23 @@ define void @bar2(i32* noalias %a, i32 addrspace(1
|
||||
; CHECK: ret
|
||||
|
||||
entry:
|
||||
- br label %for.cond
|
||||
+ %cmp1 = icmp slt i32 0, %n
|
||||
+ br i1 %cmp1, label %for.body, label %for.end
|
||||
|
||||
-for.cond: ; preds = %for.body, %entry
|
||||
- %i.0 = phi i32 [ 0, %entry ], [ %inc, %for.body ]
|
||||
- %cmp = icmp slt i32 %i.0, %n
|
||||
- br i1 %cmp, label %for.body, label %for.end
|
||||
-
|
||||
-for.body: ; preds = %for.cond
|
||||
- %idxprom = sext i32 %i.0 to i64
|
||||
+for.body: ; preds = %entry, %for.body
|
||||
+ %i.02 = phi i32 [ %inc, %for.body ], [ 0, %entry ]
|
||||
+ %idxprom = sext i32 %i.02 to i64
|
||||
%arrayidx = getelementptr inbounds i32 addrspace(1)* %b, i64 %idxprom
|
||||
%0 = load i32 addrspace(1)* %arrayidx, align 4
|
||||
%mul = mul nsw i32 %0, 3
|
||||
- %idxprom1 = sext i32 %i.0 to i64
|
||||
+ %idxprom1 = sext i32 %i.02 to i64
|
||||
%arrayidx2 = getelementptr inbounds i32* %a, i64 %idxprom1
|
||||
store i32 %mul, i32* %arrayidx2, align 4
|
||||
- %inc = add nsw i32 %i.0, 1
|
||||
- br label %for.cond
|
||||
+ %inc = add nsw i32 %i.02, 1
|
||||
+ %cmp = icmp slt i32 %inc, %n
|
||||
+ br i1 %cmp, label %for.body, label %for.end
|
||||
|
||||
-for.end: ; preds = %for.cond
|
||||
+for.end: ; preds = %for.body, %entry
|
||||
ret void
|
||||
}
|
||||
|
||||
@@ -149,25 +141,23 @@ define void @arst0(i32* %b, i32 %n) #0 {
|
||||
; CHECK: ret
|
||||
|
||||
entry:
|
||||
- br label %for.cond
|
||||
+ %cmp1 = icmp slt i32 0, %n
|
||||
+ br i1 %cmp1, label %for.body, label %for.end
|
||||
|
||||
-for.cond: ; preds = %for.body, %entry
|
||||
- %i.0 = phi i32 [ 0, %entry ], [ %inc, %for.body ]
|
||||
- %cmp = icmp slt i32 %i.0, %n
|
||||
- br i1 %cmp, label %for.body, label %for.end
|
||||
-
|
||||
-for.body: ; preds = %for.cond
|
||||
- %idxprom = sext i32 %i.0 to i64
|
||||
+for.body: ; preds = %entry, %for.body
|
||||
+ %i.02 = phi i32 [ %inc, %for.body ], [ 0, %entry ]
|
||||
+ %idxprom = sext i32 %i.02 to i64
|
||||
%arrayidx = getelementptr inbounds i32* %b, i64 %idxprom
|
||||
%0 = load i32* %arrayidx, align 4
|
||||
%mul = mul nsw i32 %0, 3
|
||||
- %idxprom1 = sext i32 %i.0 to i64
|
||||
+ %idxprom1 = sext i32 %i.02 to i64
|
||||
%arrayidx2 = getelementptr inbounds [1024 x i32] addrspace(1)* @g_as1, i64 0, i64 %idxprom1
|
||||
store i32 %mul, i32 addrspace(1)* %arrayidx2, align 4
|
||||
- %inc = add nsw i32 %i.0, 1
|
||||
- br label %for.cond
|
||||
+ %inc = add nsw i32 %i.02, 1
|
||||
+ %cmp = icmp slt i32 %inc, %n
|
||||
+ br i1 %cmp, label %for.body, label %for.end
|
||||
|
||||
-for.end: ; preds = %for.cond
|
||||
+for.end: ; preds = %for.body, %entry
|
||||
ret void
|
||||
}
|
||||
|
||||
@@ -180,25 +170,23 @@ define void @arst1(i32* %b, i32 %n) #0 {
|
||||
; CHECK: ret
|
||||
|
||||
entry:
|
||||
- br label %for.cond
|
||||
+ %cmp1 = icmp slt i32 0, %n
|
||||
+ br i1 %cmp1, label %for.body, label %for.end
|
||||
|
||||
-for.cond: ; preds = %for.body, %entry
|
||||
- %i.0 = phi i32 [ 0, %entry ], [ %inc, %for.body ]
|
||||
- %cmp = icmp slt i32 %i.0, %n
|
||||
- br i1 %cmp, label %for.body, label %for.end
|
||||
-
|
||||
-for.body: ; preds = %for.cond
|
||||
- %idxprom = sext i32 %i.0 to i64
|
||||
+for.body: ; preds = %entry, %for.body
|
||||
+ %i.02 = phi i32 [ %inc, %for.body ], [ 0, %entry ]
|
||||
+ %idxprom = sext i32 %i.02 to i64
|
||||
%arrayidx = getelementptr inbounds [1024 x i32] addrspace(1)* @g_as1, i64 0, i64 %idxprom
|
||||
%0 = load i32 addrspace(1)* %arrayidx, align 4
|
||||
%mul = mul nsw i32 %0, 3
|
||||
- %idxprom1 = sext i32 %i.0 to i64
|
||||
+ %idxprom1 = sext i32 %i.02 to i64
|
||||
%arrayidx2 = getelementptr inbounds i32* %b, i64 %idxprom1
|
||||
store i32 %mul, i32* %arrayidx2, align 4
|
||||
- %inc = add nsw i32 %i.0, 1
|
||||
- br label %for.cond
|
||||
+ %inc = add nsw i32 %i.02, 1
|
||||
+ %cmp = icmp slt i32 %inc, %n
|
||||
+ br i1 %cmp, label %for.body, label %for.end
|
||||
|
||||
-for.end: ; preds = %for.cond
|
||||
+for.end: ; preds = %for.body, %entry
|
||||
ret void
|
||||
}
|
||||
|
||||
@@ -210,25 +198,23 @@ define void @aoeu(i32 %n) #0 {
|
||||
; CHECK: ret
|
||||
|
||||
entry:
|
||||
- br label %for.cond
|
||||
+ %cmp1 = icmp slt i32 0, %n
|
||||
+ br i1 %cmp1, label %for.body, label %for.end
|
||||
|
||||
-for.cond: ; preds = %for.body, %entry
|
||||
- %i.0 = phi i32 [ 0, %entry ], [ %inc, %for.body ]
|
||||
- %cmp = icmp slt i32 %i.0, %n
|
||||
- br i1 %cmp, label %for.body, label %for.end
|
||||
-
|
||||
-for.body: ; preds = %for.cond
|
||||
- %idxprom = sext i32 %i.0 to i64
|
||||
+for.body: ; preds = %entry, %for.body
|
||||
+ %i.02 = phi i32 [ %inc, %for.body ], [ 0, %entry ]
|
||||
+ %idxprom = sext i32 %i.02 to i64
|
||||
%arrayidx = getelementptr inbounds [1024 x i32] addrspace(2)* @q_as2, i64 0, i64 %idxprom
|
||||
%0 = load i32 addrspace(2)* %arrayidx, align 4
|
||||
%mul = mul nsw i32 %0, 3
|
||||
- %idxprom1 = sext i32 %i.0 to i64
|
||||
+ %idxprom1 = sext i32 %i.02 to i64
|
||||
%arrayidx2 = getelementptr inbounds [1024 x i32] addrspace(1)* @g_as1, i64 0, i64 %idxprom1
|
||||
store i32 %mul, i32 addrspace(1)* %arrayidx2, align 4
|
||||
- %inc = add nsw i32 %i.0, 1
|
||||
- br label %for.cond
|
||||
+ %inc = add nsw i32 %i.02, 1
|
||||
+ %cmp = icmp slt i32 %inc, %n
|
||||
+ br i1 %cmp, label %for.body, label %for.end
|
||||
|
||||
-for.end: ; preds = %for.cond
|
||||
+for.end: ; preds = %for.body, %entry
|
||||
ret void
|
||||
}
|
||||
|
@ -318,9 +318,9 @@ Index: lib/Target/PowerPC/PPCAsmPrinter.cpp
|
||||
|
||||
// Change the opcode to LWZ, and the global address operand to be a
|
||||
// reference to the GOT entry we will synthesize later.
|
||||
@@ -382,16 +409,23 @@ void PPCAsmPrinter::EmitInstruction(const MachineI
|
||||
else if (MO.isJTI())
|
||||
MOSymbol = GetJTISymbol(MO.getIndex());
|
||||
@@ -384,16 +411,23 @@ void PPCAsmPrinter::EmitInstruction(const MachineI
|
||||
else if (MO.isBlockAddress())
|
||||
MOSymbol = GetBlockAddressSymbol(MO.getBlockAddress());
|
||||
|
||||
- MCSymbol *TOCEntry = lookUpOrCreateTOCEntry(MOSymbol);
|
||||
+ if (PL == PICLevel::Small) {
|
||||
@ -351,8 +351,8 @@ Index: lib/Target/PowerPC/PPCAsmPrinter.cpp
|
||||
EmitToStreamer(OutStreamer, TmpInst);
|
||||
return;
|
||||
}
|
||||
@@ -399,7 +433,7 @@ void PPCAsmPrinter::EmitInstruction(const MachineI
|
||||
case PPC::LDtocCPT:
|
||||
@@ -402,7 +436,7 @@ void PPCAsmPrinter::EmitInstruction(const MachineI
|
||||
case PPC::LDtocBA:
|
||||
case PPC::LDtoc: {
|
||||
// Transform %X3 = LDtoc <ga:@min1>, %X2
|
||||
- LowerPPCMachineInstrToMCInst(MI, TmpInst, *this, Subtarget.isDarwin());
|
||||
@ -360,7 +360,7 @@ Index: lib/Target/PowerPC/PPCAsmPrinter.cpp
|
||||
|
||||
// Change the opcode to LD, and the global address operand to be a
|
||||
// reference to the TOC entry we will synthesize later.
|
||||
@@ -428,7 +462,7 @@ void PPCAsmPrinter::EmitInstruction(const MachineI
|
||||
@@ -433,7 +467,7 @@ void PPCAsmPrinter::EmitInstruction(const MachineI
|
||||
|
||||
case PPC::ADDIStocHA: {
|
||||
// Transform %Xd = ADDIStocHA %X2, <ga:@sym>
|
||||
@ -369,7 +369,7 @@ Index: lib/Target/PowerPC/PPCAsmPrinter.cpp
|
||||
|
||||
// Change the opcode to ADDIS8. If the global address is external, has
|
||||
// common linkage, is a non-local function address, or is a jump table
|
||||
@@ -470,7 +504,7 @@ void PPCAsmPrinter::EmitInstruction(const MachineI
|
||||
@@ -479,7 +513,7 @@ void PPCAsmPrinter::EmitInstruction(const MachineI
|
||||
}
|
||||
case PPC::LDtocL: {
|
||||
// Transform %Xd = LDtocL <ga:@sym>, %Xs
|
||||
@ -378,7 +378,7 @@ Index: lib/Target/PowerPC/PPCAsmPrinter.cpp
|
||||
|
||||
// Change the opcode to LD. If the global address is external, has
|
||||
// common linkage, or is a jump table address, then reference the
|
||||
@@ -507,7 +541,7 @@ void PPCAsmPrinter::EmitInstruction(const MachineI
|
||||
@@ -521,7 +555,7 @@ void PPCAsmPrinter::EmitInstruction(const MachineI
|
||||
}
|
||||
case PPC::ADDItocL: {
|
||||
// Transform %Xd = ADDItocL %Xs, <ga:@sym>
|
||||
@ -387,7 +387,7 @@ Index: lib/Target/PowerPC/PPCAsmPrinter.cpp
|
||||
|
||||
// Change the opcode to ADDI8. If the global address is external, then
|
||||
// generate a TOC entry and reference that. Otherwise reference the
|
||||
@@ -558,7 +592,7 @@ void PPCAsmPrinter::EmitInstruction(const MachineI
|
||||
@@ -572,7 +606,7 @@ void PPCAsmPrinter::EmitInstruction(const MachineI
|
||||
case PPC::LDgotTprelL:
|
||||
case PPC::LDgotTprelL32: {
|
||||
// Transform %Xd = LDgotTprelL <ga:@sym>, %Xs
|
||||
@ -396,7 +396,7 @@ Index: lib/Target/PowerPC/PPCAsmPrinter.cpp
|
||||
|
||||
// Change the opcode to LD.
|
||||
TmpInst.setOpcode(isPPC64 ? PPC::LD : PPC::LWZ);
|
||||
@@ -841,7 +875,7 @@ void PPCAsmPrinter::EmitInstruction(const MachineI
|
||||
@@ -796,7 +830,7 @@ void PPCAsmPrinter::EmitInstruction(const MachineI
|
||||
}
|
||||
}
|
||||
|
||||
@ -405,7 +405,7 @@ Index: lib/Target/PowerPC/PPCAsmPrinter.cpp
|
||||
EmitToStreamer(OutStreamer, TmpInst);
|
||||
}
|
||||
|
||||
@@ -857,16 +891,14 @@ void PPCLinuxAsmPrinter::EmitStartOfAsmFile(Module
|
||||
@@ -812,16 +846,14 @@ void PPCLinuxAsmPrinter::EmitStartOfAsmFile(Module
|
||||
if (Subtarget.isPPC64() || TM.getRelocationModel() != Reloc::PIC_)
|
||||
return AsmPrinter::EmitStartOfAsmFile(M);
|
||||
|
||||
@ -426,7 +426,7 @@ Index: lib/Target/PowerPC/PPCAsmPrinter.cpp
|
||||
MCSymbol *CurrentPos = OutContext.CreateTempSymbol();
|
||||
|
||||
OutStreamer.EmitLabel(CurrentPos);
|
||||
@@ -885,7 +917,9 @@ void PPCLinuxAsmPrinter::EmitStartOfAsmFile(Module
|
||||
@@ -840,7 +872,9 @@ void PPCLinuxAsmPrinter::EmitStartOfAsmFile(Module
|
||||
|
||||
void PPCLinuxAsmPrinter::EmitFunctionEntryLabel() {
|
||||
// linux/ppc32 - Normal entry label.
|
||||
@ -437,7 +437,7 @@ Index: lib/Target/PowerPC/PPCAsmPrinter.cpp
|
||||
return AsmPrinter::EmitFunctionEntryLabel();
|
||||
|
||||
if (!Subtarget.isPPC64()) {
|
||||
@@ -897,7 +931,7 @@ void PPCLinuxAsmPrinter::EmitFunctionEntryLabel()
|
||||
@@ -852,7 +886,7 @@ void PPCLinuxAsmPrinter::EmitFunctionEntryLabel()
|
||||
|
||||
const MCExpr *OffsExpr =
|
||||
MCBinaryExpr::CreateSub(
|
||||
@ -458,7 +458,7 @@ Index: lib/Target/PowerPC/PPCISelDAGToDAG.cpp
|
||||
#include "llvm/Support/CommandLine.h"
|
||||
#include "llvm/Support/Debug.h"
|
||||
#include "llvm/Support/ErrorHandling.h"
|
||||
@@ -273,23 +274,29 @@ SDNode *PPCDAGToDAGISel::getGlobalBaseReg() {
|
||||
@@ -283,23 +284,29 @@ SDNode *PPCDAGToDAGISel::getGlobalBaseReg() {
|
||||
// Insert the set of GlobalBaseReg into the first MBB of the function
|
||||
MachineBasicBlock &FirstMBB = MF->front();
|
||||
MachineBasicBlock::iterator MBBI = FirstMBB.begin();
|
||||
@ -499,7 +499,7 @@ Index: lib/Target/PowerPC/PPCISelDAGToDAG.cpp
|
||||
}
|
||||
} else {
|
||||
GlobalBaseReg = RegInfo->createVirtualRegister(&PPC::G8RC_NOX0RegClass);
|
||||
@@ -1429,13 +1436,13 @@ SDNode *PPCDAGToDAGISel::Select(SDNode *N) {
|
||||
@@ -1439,13 +1446,13 @@ SDNode *PPCDAGToDAGISel::Select(SDNode *N) {
|
||||
return CurDAG->SelectNodeTo(N, Reg, MVT::Other, Chain);
|
||||
}
|
||||
case PPCISD::TOC_ENTRY: {
|
||||
@ -519,7 +519,7 @@ Index: lib/Target/PowerPC/PPCISelLowering.cpp
|
||||
===================================================================
|
||||
--- lib/Target/PowerPC/PPCISelLowering.cpp
|
||||
+++ lib/Target/PowerPC/PPCISelLowering.cpp
|
||||
@@ -1653,6 +1653,8 @@ SDValue PPCTargetLowering::LowerGlobalTLSAddress(S
|
||||
@@ -1682,6 +1682,8 @@ SDValue PPCTargetLowering::LowerGlobalTLSAddress(S
|
||||
const GlobalValue *GV = GA->getGlobal();
|
||||
EVT PtrVT = getPointerTy();
|
||||
bool is64bit = Subtarget.isPPC64();
|
||||
@ -528,7 +528,7 @@ Index: lib/Target/PowerPC/PPCISelLowering.cpp
|
||||
|
||||
TLSModel::Model Model = getTargetMachine().getTLSModel(GV);
|
||||
|
||||
@@ -1691,7 +1693,10 @@ SDValue PPCTargetLowering::LowerGlobalTLSAddress(S
|
||||
@@ -1721,7 +1723,10 @@ SDValue PPCTargetLowering::LowerGlobalTLSAddress(S
|
||||
GOTPtr = DAG.getNode(PPCISD::ADDIS_TLSGD_HA, dl, PtrVT,
|
||||
GOTReg, TGA);
|
||||
} else {
|
||||
@ -540,7 +540,7 @@ Index: lib/Target/PowerPC/PPCISelLowering.cpp
|
||||
}
|
||||
SDValue GOTEntry = DAG.getNode(PPCISD::ADDI_TLSGD_L, dl, PtrVT,
|
||||
GOTPtr, TGA);
|
||||
@@ -1721,7 +1726,10 @@ SDValue PPCTargetLowering::LowerGlobalTLSAddress(S
|
||||
@@ -1738,7 +1743,10 @@ SDValue PPCTargetLowering::LowerGlobalTLSAddress(S
|
||||
GOTPtr = DAG.getNode(PPCISD::ADDIS_TLSLD_HA, dl, PtrVT,
|
||||
GOTReg, TGA);
|
||||
} else {
|
||||
@ -556,7 +556,7 @@ Index: lib/Target/PowerPC/PPCInstrInfo.td
|
||||
===================================================================
|
||||
--- lib/Target/PowerPC/PPCInstrInfo.td
|
||||
+++ lib/Target/PowerPC/PPCInstrInfo.td
|
||||
@@ -976,6 +976,9 @@ let isTerminator = 1, isBarrier = 1, PPC970_Unit =
|
||||
@@ -980,6 +980,9 @@ let isTerminator = 1, isBarrier = 1, PPC970_Unit =
|
||||
let Defs = [LR] in
|
||||
def MovePCtoLR : Pseudo<(outs), (ins), "#MovePCtoLR", []>,
|
||||
PPC970_Unit_BRU;
|
||||
@ -566,7 +566,7 @@ Index: lib/Target/PowerPC/PPCInstrInfo.td
|
||||
|
||||
let isBranch = 1, isTerminator = 1, hasCtrlDep = 1, PPC970_Unit = 7 in {
|
||||
let isBarrier = 1 in {
|
||||
@@ -2444,15 +2447,13 @@ def ADDISdtprelHA32 : Pseudo<(outs gprc:$rD), (ins
|
||||
@@ -2442,15 +2445,13 @@ def ADDISdtprelHA32 : Pseudo<(outs gprc:$rD), (ins
|
||||
tglobaltlsaddr:$disp))]>;
|
||||
|
||||
// Support for Position-independent code
|
@ -1,504 +0,0 @@
|
||||
Pull in r221703 from upstream llvm trunk (by Bill Schmidt):
|
||||
|
||||
[PowerPC] Replace foul hackery with real calls to __tls_get_addr
|
||||
|
||||
My original support for the general dynamic and local dynamic TLS
|
||||
models contained some fairly obtuse hacks to generate calls to
|
||||
__tls_get_addr when lowering a TargetGlobalAddress. Rather than
|
||||
generating real calls, special GET_TLS_ADDR nodes were used to wrap
|
||||
the calls and only reveal them at assembly time. I attempted to
|
||||
provide correct parameter and return values by chaining CopyToReg and
|
||||
CopyFromReg nodes onto the GET_TLS_ADDR nodes, but this was also not
|
||||
fully correct. Problems were seen with two back-to-back stores to TLS
|
||||
variables, where the call sequences ended up overlapping with unhappy
|
||||
results. Additionally, since these weren't real calls, the proper
|
||||
register side effects of a call were not recorded, so clobbered values
|
||||
were kept live across the calls.
|
||||
|
||||
The proper thing to do is to lower these into calls in the first
|
||||
place. This is relatively straightforward; see the changes to
|
||||
PPCTargetLowering::LowerGlobalTLSAddress() in PPCISelLowering.cpp.
|
||||
The changes here are standard call lowering, except that we need to
|
||||
track the fact that these calls will require a relocation. This is
|
||||
done by adding a machine operand flag of MO_TLSLD or MO_TLSGD to the
|
||||
TargetGlobalAddress operand that appears earlier in the sequence.
|
||||
|
||||
The calls to LowerCallTo() eventually find their way to
|
||||
LowerCall_64SVR4() or LowerCall_32SVR4(), which call FinishCall(),
|
||||
which calls PrepareCall(). In PrepareCall(), we detect the calls to
|
||||
__tls_get_addr and immediately snag the TargetGlobalTLSAddress with
|
||||
the annotated relocation information. This becomes an extra operand
|
||||
on the call following the callee, which is expected for nodes of type
|
||||
tlscall. We change the call opcode to CALL_TLS for this case. Back
|
||||
in FinishCall(), we change it again to CALL_NOP_TLS for 64-bit only,
|
||||
since we require a TOC-restore nop following the call for the 64-bit
|
||||
ABIs.
|
||||
|
||||
During selection, patterns in PPCInstrInfo.td and PPCInstr64Bit.td
|
||||
convert the CALL_TLS nodes into BL_TLS nodes, and convert the
|
||||
CALL_NOP_TLS nodes into BL8_NOP_TLS nodes. This replaces the code
|
||||
removed from PPCAsmPrinter.cpp, as the BL_TLS or BL8_NOP_TLS
|
||||
nodes can now be emitted normally using their patterns and the
|
||||
associated printTLSCall print method.
|
||||
|
||||
Finally, as a result of these changes, all references to get-tls-addr
|
||||
in its various guises are no longer used, so they have been removed.
|
||||
|
||||
There are existing TLS tests to verify the changes haven't messed
|
||||
anything up). I've added one new test that verifies that the problem
|
||||
with the original code has been fixed.
|
||||
|
||||
This fixes a fatal "Bad machine code" error when compiling parts of
|
||||
libgomp for 32-bit PowerPC.
|
||||
|
||||
Introduced here: http://svnweb.freebsd.org/changeset/base/276301
|
||||
|
||||
Index: lib/Target/PowerPC/PPC.h
|
||||
===================================================================
|
||||
--- lib/Target/PowerPC/PPC.h
|
||||
+++ lib/Target/PowerPC/PPC.h
|
||||
@@ -96,7 +96,12 @@ namespace llvm {
|
||||
MO_TOC_LO = 7 << 4,
|
||||
|
||||
// Symbol for VK_PPC_TLS fixup attached to an ADD instruction
|
||||
- MO_TLS = 8 << 4
|
||||
+ MO_TLS = 8 << 4,
|
||||
+
|
||||
+ // Symbols for VK_PPC_TLSGD and VK_PPC_TLSLD in __tls_get_addr
|
||||
+ // call sequences.
|
||||
+ MO_TLSLD = 9 << 4,
|
||||
+ MO_TLSGD = 10 << 4
|
||||
};
|
||||
} // end namespace PPCII
|
||||
|
||||
Index: lib/Target/PowerPC/PPCAsmPrinter.cpp
|
||||
===================================================================
|
||||
--- lib/Target/PowerPC/PPCAsmPrinter.cpp
|
||||
+++ lib/Target/PowerPC/PPCAsmPrinter.cpp
|
||||
@@ -689,35 +689,6 @@ void PPCAsmPrinter::EmitInstruction(const MachineI
|
||||
.addExpr(SymGotTlsGD));
|
||||
return;
|
||||
}
|
||||
- case PPC::GETtlsADDR:
|
||||
- // Transform: %X3 = GETtlsADDR %X3, <ga:@sym>
|
||||
- // Into: BL8_NOP_TLS __tls_get_addr(sym@tlsgd)
|
||||
- case PPC::GETtlsADDR32: {
|
||||
- // Transform: %R3 = GETtlsADDR32 %R3, <ga:@sym>
|
||||
- // Into: BL_TLS __tls_get_addr(sym@tlsgd)@PLT
|
||||
-
|
||||
- StringRef Name = "__tls_get_addr";
|
||||
- MCSymbol *TlsGetAddr = OutContext.GetOrCreateSymbol(Name);
|
||||
- MCSymbolRefExpr::VariantKind Kind = MCSymbolRefExpr::VK_None;
|
||||
-
|
||||
- if (!Subtarget.isPPC64() && !Subtarget.isDarwin() &&
|
||||
- TM.getRelocationModel() == Reloc::PIC_)
|
||||
- Kind = MCSymbolRefExpr::VK_PLT;
|
||||
- const MCSymbolRefExpr *TlsRef =
|
||||
- MCSymbolRefExpr::Create(TlsGetAddr, Kind, OutContext);
|
||||
- const MachineOperand &MO = MI->getOperand(2);
|
||||
- const GlobalValue *GValue = MO.getGlobal();
|
||||
- MCSymbol *MOSymbol = getSymbol(GValue);
|
||||
- const MCExpr *SymVar =
|
||||
- MCSymbolRefExpr::Create(MOSymbol, MCSymbolRefExpr::VK_PPC_TLSGD,
|
||||
- OutContext);
|
||||
- EmitToStreamer(OutStreamer,
|
||||
- MCInstBuilder(Subtarget.isPPC64() ?
|
||||
- PPC::BL8_NOP_TLS : PPC::BL_TLS)
|
||||
- .addExpr(TlsRef)
|
||||
- .addExpr(SymVar));
|
||||
- return;
|
||||
- }
|
||||
case PPC::ADDIStlsldHA: {
|
||||
// Transform: %Xd = ADDIStlsldHA %X2, <ga:@sym>
|
||||
// Into: %Xd = ADDIS8 %X2, sym@got@tlsld@ha
|
||||
@@ -755,36 +726,6 @@ void PPCAsmPrinter::EmitInstruction(const MachineI
|
||||
.addExpr(SymGotTlsLD));
|
||||
return;
|
||||
}
|
||||
- case PPC::GETtlsldADDR:
|
||||
- // Transform: %X3 = GETtlsldADDR %X3, <ga:@sym>
|
||||
- // Into: BL8_NOP_TLS __tls_get_addr(sym@tlsld)
|
||||
- case PPC::GETtlsldADDR32: {
|
||||
- // Transform: %R3 = GETtlsldADDR32 %R3, <ga:@sym>
|
||||
- // Into: BL_TLS __tls_get_addr(sym@tlsld)@PLT
|
||||
-
|
||||
- StringRef Name = "__tls_get_addr";
|
||||
- MCSymbol *TlsGetAddr = OutContext.GetOrCreateSymbol(Name);
|
||||
- MCSymbolRefExpr::VariantKind Kind = MCSymbolRefExpr::VK_None;
|
||||
-
|
||||
- if (!Subtarget.isPPC64() && !Subtarget.isDarwin() &&
|
||||
- TM.getRelocationModel() == Reloc::PIC_)
|
||||
- Kind = MCSymbolRefExpr::VK_PLT;
|
||||
-
|
||||
- const MCSymbolRefExpr *TlsRef =
|
||||
- MCSymbolRefExpr::Create(TlsGetAddr, Kind, OutContext);
|
||||
- const MachineOperand &MO = MI->getOperand(2);
|
||||
- const GlobalValue *GValue = MO.getGlobal();
|
||||
- MCSymbol *MOSymbol = getSymbol(GValue);
|
||||
- const MCExpr *SymVar =
|
||||
- MCSymbolRefExpr::Create(MOSymbol, MCSymbolRefExpr::VK_PPC_TLSLD,
|
||||
- OutContext);
|
||||
- EmitToStreamer(OutStreamer,
|
||||
- MCInstBuilder(Subtarget.isPPC64() ?
|
||||
- PPC::BL8_NOP_TLS : PPC::BL_TLS)
|
||||
- .addExpr(TlsRef)
|
||||
- .addExpr(SymVar));
|
||||
- return;
|
||||
- }
|
||||
case PPC::ADDISdtprelHA:
|
||||
// Transform: %Xd = ADDISdtprelHA %X3, <ga:@sym>
|
||||
// Into: %Xd = ADDIS8 %X3, sym@dtprel@ha
|
||||
Index: lib/Target/PowerPC/PPCISelLowering.cpp
|
||||
===================================================================
|
||||
--- lib/Target/PowerPC/PPCISelLowering.cpp
|
||||
+++ lib/Target/PowerPC/PPCISelLowering.cpp
|
||||
@@ -781,6 +781,8 @@ const char *PPCTargetLowering::getTargetNodeName(u
|
||||
case PPCISD::SHL: return "PPCISD::SHL";
|
||||
case PPCISD::CALL: return "PPCISD::CALL";
|
||||
case PPCISD::CALL_NOP: return "PPCISD::CALL_NOP";
|
||||
+ case PPCISD::CALL_TLS: return "PPCISD::CALL_TLS";
|
||||
+ case PPCISD::CALL_NOP_TLS: return "PPCISD::CALL_NOP_TLS";
|
||||
case PPCISD::MTCTR: return "PPCISD::MTCTR";
|
||||
case PPCISD::BCTRL: return "PPCISD::BCTRL";
|
||||
case PPCISD::RET_FLAG: return "PPCISD::RET_FLAG";
|
||||
@@ -810,10 +812,8 @@ const char *PPCTargetLowering::getTargetNodeName(u
|
||||
case PPCISD::ADD_TLS: return "PPCISD::ADD_TLS";
|
||||
case PPCISD::ADDIS_TLSGD_HA: return "PPCISD::ADDIS_TLSGD_HA";
|
||||
case PPCISD::ADDI_TLSGD_L: return "PPCISD::ADDI_TLSGD_L";
|
||||
- case PPCISD::GET_TLS_ADDR: return "PPCISD::GET_TLS_ADDR";
|
||||
case PPCISD::ADDIS_TLSLD_HA: return "PPCISD::ADDIS_TLSLD_HA";
|
||||
case PPCISD::ADDI_TLSLD_L: return "PPCISD::ADDI_TLSLD_L";
|
||||
- case PPCISD::GET_TLSLD_ADDR: return "PPCISD::GET_TLSLD_ADDR";
|
||||
case PPCISD::ADDIS_DTPREL_HA: return "PPCISD::ADDIS_DTPREL_HA";
|
||||
case PPCISD::ADDI_DTPREL_L: return "PPCISD::ADDI_DTPREL_L";
|
||||
case PPCISD::VADD_SPLAT: return "PPCISD::VADD_SPLAT";
|
||||
@@ -1641,6 +1641,27 @@ SDValue PPCTargetLowering::LowerBlockAddress(SDVal
|
||||
return LowerLabelRef(TgtBAHi, TgtBALo, isPIC, DAG);
|
||||
}
|
||||
|
||||
+// Generate a call to __tls_get_addr for the given GOT entry Op.
|
||||
+std::pair<SDValue,SDValue>
|
||||
+PPCTargetLowering::lowerTLSCall(SDValue Op, SDLoc dl,
|
||||
+ SelectionDAG &DAG) const {
|
||||
+
|
||||
+ Type *IntPtrTy = getDataLayout()->getIntPtrType(*DAG.getContext());
|
||||
+ TargetLowering::ArgListTy Args;
|
||||
+ TargetLowering::ArgListEntry Entry;
|
||||
+ Entry.Node = Op;
|
||||
+ Entry.Ty = IntPtrTy;
|
||||
+ Args.push_back(Entry);
|
||||
+
|
||||
+ TargetLowering::CallLoweringInfo CLI(DAG);
|
||||
+ CLI.setDebugLoc(dl).setChain(DAG.getEntryNode())
|
||||
+ .setCallee(CallingConv::C, IntPtrTy,
|
||||
+ DAG.getTargetExternalSymbol("__tls_get_addr", getPointerTy()),
|
||||
+ std::move(Args), 0);
|
||||
+
|
||||
+ return LowerCallTo(CLI);
|
||||
+}
|
||||
+
|
||||
SDValue PPCTargetLowering::LowerGlobalTLSAddress(SDValue Op,
|
||||
SelectionDAG &DAG) const {
|
||||
|
||||
@@ -1686,7 +1707,8 @@ SDValue PPCTargetLowering::LowerGlobalTLSAddress(S
|
||||
}
|
||||
|
||||
if (Model == TLSModel::GeneralDynamic) {
|
||||
- SDValue TGA = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, 0);
|
||||
+ SDValue TGA = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0,
|
||||
+ PPCII::MO_TLSGD);
|
||||
SDValue GOTPtr;
|
||||
if (is64bit) {
|
||||
SDValue GOTReg = DAG.getRegister(PPC::X2, MVT::i64);
|
||||
@@ -1700,26 +1722,13 @@ SDValue PPCTargetLowering::LowerGlobalTLSAddress(S
|
||||
}
|
||||
SDValue GOTEntry = DAG.getNode(PPCISD::ADDI_TLSGD_L, dl, PtrVT,
|
||||
GOTPtr, TGA);
|
||||
-
|
||||
- // We need a chain node, and don't have one handy. The underlying
|
||||
- // call has no side effects, so using the function entry node
|
||||
- // suffices.
|
||||
- SDValue Chain = DAG.getEntryNode();
|
||||
- Chain = DAG.getCopyToReg(Chain, dl,
|
||||
- is64bit ? PPC::X3 : PPC::R3, GOTEntry);
|
||||
- SDValue ParmReg = DAG.getRegister(is64bit ? PPC::X3 : PPC::R3,
|
||||
- is64bit ? MVT::i64 : MVT::i32);
|
||||
- SDValue TLSAddr = DAG.getNode(PPCISD::GET_TLS_ADDR, dl,
|
||||
- PtrVT, ParmReg, TGA);
|
||||
- // The return value from GET_TLS_ADDR really is in X3 already, but
|
||||
- // some hacks are needed here to tie everything together. The extra
|
||||
- // copies dissolve during subsequent transforms.
|
||||
- Chain = DAG.getCopyToReg(Chain, dl, is64bit ? PPC::X3 : PPC::R3, TLSAddr);
|
||||
- return DAG.getCopyFromReg(Chain, dl, is64bit ? PPC::X3 : PPC::R3, PtrVT);
|
||||
+ std::pair<SDValue, SDValue> CallResult = lowerTLSCall(GOTEntry, dl, DAG);
|
||||
+ return CallResult.first;
|
||||
}
|
||||
|
||||
if (Model == TLSModel::LocalDynamic) {
|
||||
- SDValue TGA = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, 0);
|
||||
+ SDValue TGA = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0,
|
||||
+ PPCII::MO_TLSLD);
|
||||
SDValue GOTPtr;
|
||||
if (is64bit) {
|
||||
SDValue GOTReg = DAG.getRegister(PPC::X2, MVT::i64);
|
||||
@@ -1733,23 +1742,11 @@ SDValue PPCTargetLowering::LowerGlobalTLSAddress(S
|
||||
}
|
||||
SDValue GOTEntry = DAG.getNode(PPCISD::ADDI_TLSLD_L, dl, PtrVT,
|
||||
GOTPtr, TGA);
|
||||
-
|
||||
- // We need a chain node, and don't have one handy. The underlying
|
||||
- // call has no side effects, so using the function entry node
|
||||
- // suffices.
|
||||
- SDValue Chain = DAG.getEntryNode();
|
||||
- Chain = DAG.getCopyToReg(Chain, dl,
|
||||
- is64bit ? PPC::X3 : PPC::R3, GOTEntry);
|
||||
- SDValue ParmReg = DAG.getRegister(is64bit ? PPC::X3 : PPC::R3,
|
||||
- is64bit ? MVT::i64 : MVT::i32);
|
||||
- SDValue TLSAddr = DAG.getNode(PPCISD::GET_TLSLD_ADDR, dl,
|
||||
- PtrVT, ParmReg, TGA);
|
||||
- // The return value from GET_TLSLD_ADDR really is in X3 already, but
|
||||
- // some hacks are needed here to tie everything together. The extra
|
||||
- // copies dissolve during subsequent transforms.
|
||||
- Chain = DAG.getCopyToReg(Chain, dl, is64bit ? PPC::X3 : PPC::R3, TLSAddr);
|
||||
+ std::pair<SDValue, SDValue> CallResult = lowerTLSCall(GOTEntry, dl, DAG);
|
||||
+ SDValue TLSAddr = CallResult.first;
|
||||
+ SDValue Chain = CallResult.second;
|
||||
SDValue DtvOffsetHi = DAG.getNode(PPCISD::ADDIS_DTPREL_HA, dl, PtrVT,
|
||||
- Chain, ParmReg, TGA);
|
||||
+ Chain, TLSAddr, TGA);
|
||||
return DAG.getNode(PPCISD::ADDI_DTPREL_L, dl, PtrVT, DtvOffsetHi, TGA);
|
||||
}
|
||||
|
||||
@@ -3712,6 +3709,23 @@ unsigned PrepareCall(SelectionDAG &DAG, SDValue &C
|
||||
if (Callee.getNode()) {
|
||||
Ops.push_back(Chain);
|
||||
Ops.push_back(Callee);
|
||||
+
|
||||
+ // If this is a call to __tls_get_addr, find the symbol whose address
|
||||
+ // is to be taken and add it to the list. This will be used to
|
||||
+ // generate __tls_get_addr(<sym>@tlsgd) or __tls_get_addr(<sym>@tlsld).
|
||||
+ // We find the symbol by walking the chain to the CopyFromReg, walking
|
||||
+ // back from the CopyFromReg to the ADDI_TLSGD_L or ADDI_TLSLD_L, and
|
||||
+ // pulling the symbol from that node.
|
||||
+ if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee))
|
||||
+ if (!strcmp(S->getSymbol(), "__tls_get_addr")) {
|
||||
+ assert(!needIndirectCall && "Indirect call to __tls_get_addr???");
|
||||
+ SDNode *AddI = Chain.getNode()->getOperand(2).getNode();
|
||||
+ SDValue TGTAddr = AddI->getOperand(1);
|
||||
+ assert(TGTAddr.getNode()->getOpcode() == ISD::TargetGlobalTLSAddress &&
|
||||
+ "Didn't find target global TLS address where we expected one");
|
||||
+ Ops.push_back(TGTAddr);
|
||||
+ CallOpc = PPCISD::CALL_TLS;
|
||||
+ }
|
||||
}
|
||||
// If this is a tail call add stack pointer delta.
|
||||
if (isTailCall)
|
||||
@@ -3863,7 +3877,9 @@ PPCTargetLowering::FinishCall(CallingConv::ID Call
|
||||
DAG.getTarget().getRelocationModel() == Reloc::PIC_)) {
|
||||
// Otherwise insert NOP for non-local calls.
|
||||
CallOpc = PPCISD::CALL_NOP;
|
||||
- }
|
||||
+ } else if (CallOpc == PPCISD::CALL_TLS)
|
||||
+ // For 64-bit SVR4, TLS calls are always non-local.
|
||||
+ CallOpc = PPCISD::CALL_NOP_TLS;
|
||||
}
|
||||
|
||||
Chain = DAG.getNode(CallOpc, dl, NodeTys, Ops);
|
||||
Index: lib/Target/PowerPC/PPCISelLowering.h
|
||||
===================================================================
|
||||
--- lib/Target/PowerPC/PPCISelLowering.h
|
||||
+++ lib/Target/PowerPC/PPCISelLowering.h
|
||||
@@ -99,6 +99,10 @@ namespace llvm {
|
||||
/// SVR4 calls.
|
||||
CALL, CALL_NOP,
|
||||
|
||||
+ /// CALL_TLS and CALL_NOP_TLS - Versions of CALL and CALL_NOP used
|
||||
+ /// to access TLS variables.
|
||||
+ CALL_TLS, CALL_NOP_TLS,
|
||||
+
|
||||
/// CHAIN,FLAG = MTCTR(VAL, CHAIN[, INFLAG]) - Directly corresponds to a
|
||||
/// MTCTR instruction.
|
||||
MTCTR,
|
||||
@@ -214,10 +218,6 @@ namespace llvm {
|
||||
/// sym\@got\@tlsgd\@l.
|
||||
ADDI_TLSGD_L,
|
||||
|
||||
- /// G8RC = GET_TLS_ADDR %X3, Symbol - For the general-dynamic TLS
|
||||
- /// model, produces a call to __tls_get_addr(sym\@tlsgd).
|
||||
- GET_TLS_ADDR,
|
||||
-
|
||||
/// G8RC = ADDIS_TLSLD_HA %X2, Symbol - For the local-dynamic TLS
|
||||
/// model, produces an ADDIS8 instruction that adds the GOT base
|
||||
/// register to sym\@got\@tlsld\@ha.
|
||||
@@ -228,10 +228,6 @@ namespace llvm {
|
||||
/// sym\@got\@tlsld\@l.
|
||||
ADDI_TLSLD_L,
|
||||
|
||||
- /// G8RC = GET_TLSLD_ADDR %X3, Symbol - For the local-dynamic TLS
|
||||
- /// model, produces a call to __tls_get_addr(sym\@tlsld).
|
||||
- GET_TLSLD_ADDR,
|
||||
-
|
||||
/// G8RC = ADDIS_DTPREL_HA %X3, Symbol, Chain - For the
|
||||
/// local-dynamic TLS model, produces an ADDIS8 instruction
|
||||
/// that adds X3 to sym\@dtprel\@ha. The Chain operand is needed
|
||||
@@ -552,6 +548,8 @@ namespace llvm {
|
||||
SDValue LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const;
|
||||
SDValue LowerConstantPool(SDValue Op, SelectionDAG &DAG) const;
|
||||
SDValue LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const;
|
||||
+ std::pair<SDValue,SDValue> lowerTLSCall(SDValue Op, SDLoc dl,
|
||||
+ SelectionDAG &DAG) const;
|
||||
SDValue LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const;
|
||||
SDValue LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const;
|
||||
SDValue LowerJumpTable(SDValue Op, SelectionDAG &DAG) const;
|
||||
Index: lib/Target/PowerPC/PPCInstr64Bit.td
|
||||
===================================================================
|
||||
--- lib/Target/PowerPC/PPCInstr64Bit.td
|
||||
+++ lib/Target/PowerPC/PPCInstr64Bit.td
|
||||
@@ -188,6 +188,9 @@ def : Pat<(PPCcall (i64 texternalsym:$dst)),
|
||||
def : Pat<(PPCcall_nop (i64 texternalsym:$dst)),
|
||||
(BL8_NOP texternalsym:$dst)>;
|
||||
|
||||
+def : Pat<(PPCcall_nop_tls texternalsym:$func, tglobaltlsaddr:$sym),
|
||||
+ (BL8_NOP_TLS texternalsym:$func, tglobaltlsaddr:$sym)>;
|
||||
+
|
||||
// Atomic operations
|
||||
let usesCustomInserter = 1 in {
|
||||
let Defs = [CR0] in {
|
||||
@@ -872,11 +875,6 @@ def ADDItlsgdL : Pseudo<(outs g8rc:$rD), (ins g8rc
|
||||
[(set i64:$rD,
|
||||
(PPCaddiTlsgdL i64:$reg, tglobaltlsaddr:$disp))]>,
|
||||
isPPC64;
|
||||
-def GETtlsADDR : Pseudo<(outs g8rc:$rD), (ins g8rc:$reg, tlsgd:$sym),
|
||||
- "#GETtlsADDR",
|
||||
- [(set i64:$rD,
|
||||
- (PPCgetTlsAddr i64:$reg, tglobaltlsaddr:$sym))]>,
|
||||
- isPPC64;
|
||||
def ADDIStlsldHA: Pseudo<(outs g8rc:$rD), (ins g8rc_nox0:$reg, s16imm64:$disp),
|
||||
"#ADDIStlsldHA",
|
||||
[(set i64:$rD,
|
||||
@@ -887,11 +885,6 @@ def ADDItlsldL : Pseudo<(outs g8rc:$rD), (ins g8rc
|
||||
[(set i64:$rD,
|
||||
(PPCaddiTlsldL i64:$reg, tglobaltlsaddr:$disp))]>,
|
||||
isPPC64;
|
||||
-def GETtlsldADDR : Pseudo<(outs g8rc:$rD), (ins g8rc:$reg, tlsgd:$sym),
|
||||
- "#GETtlsldADDR",
|
||||
- [(set i64:$rD,
|
||||
- (PPCgetTlsldAddr i64:$reg, tglobaltlsaddr:$sym))]>,
|
||||
- isPPC64;
|
||||
def ADDISdtprelHA: Pseudo<(outs g8rc:$rD), (ins g8rc_nox0:$reg, s16imm64:$disp),
|
||||
"#ADDISdtprelHA",
|
||||
[(set i64:$rD,
|
||||
Index: lib/Target/PowerPC/PPCInstrInfo.td
|
||||
===================================================================
|
||||
--- lib/Target/PowerPC/PPCInstrInfo.td
|
||||
+++ lib/Target/PowerPC/PPCInstrInfo.td
|
||||
@@ -110,10 +110,8 @@ def PPCldGotTprelL : SDNode<"PPCISD::LD_GOT_TPREL_
|
||||
def PPCaddTls : SDNode<"PPCISD::ADD_TLS", SDTIntBinOp, []>;
|
||||
def PPCaddisTlsgdHA : SDNode<"PPCISD::ADDIS_TLSGD_HA", SDTIntBinOp>;
|
||||
def PPCaddiTlsgdL : SDNode<"PPCISD::ADDI_TLSGD_L", SDTIntBinOp>;
|
||||
-def PPCgetTlsAddr : SDNode<"PPCISD::GET_TLS_ADDR", SDTIntBinOp>;
|
||||
def PPCaddisTlsldHA : SDNode<"PPCISD::ADDIS_TLSLD_HA", SDTIntBinOp>;
|
||||
def PPCaddiTlsldL : SDNode<"PPCISD::ADDI_TLSLD_L", SDTIntBinOp>;
|
||||
-def PPCgetTlsldAddr : SDNode<"PPCISD::GET_TLSLD_ADDR", SDTIntBinOp>;
|
||||
def PPCaddisDtprelHA : SDNode<"PPCISD::ADDIS_DTPREL_HA", SDTIntBinOp,
|
||||
[SDNPHasChain]>;
|
||||
def PPCaddiDtprelL : SDNode<"PPCISD::ADDI_DTPREL_L", SDTIntBinOp>;
|
||||
@@ -136,9 +134,15 @@ def SDT_PPCCall : SDTypeProfile<0, -1, [SDTCisIn
|
||||
def PPCcall : SDNode<"PPCISD::CALL", SDT_PPCCall,
|
||||
[SDNPHasChain, SDNPOptInGlue, SDNPOutGlue,
|
||||
SDNPVariadic]>;
|
||||
+def PPCcall_tls : SDNode<"PPCISD::CALL_TLS", SDT_PPCCall,
|
||||
+ [SDNPHasChain, SDNPOptInGlue, SDNPOutGlue,
|
||||
+ SDNPVariadic]>;
|
||||
def PPCcall_nop : SDNode<"PPCISD::CALL_NOP", SDT_PPCCall,
|
||||
[SDNPHasChain, SDNPOptInGlue, SDNPOutGlue,
|
||||
SDNPVariadic]>;
|
||||
+def PPCcall_nop_tls : SDNode<"PPCISD::CALL_NOP_TLS", SDT_PPCCall,
|
||||
+ [SDNPHasChain, SDNPOptInGlue, SDNPOutGlue,
|
||||
+ SDNPVariadic]>;
|
||||
def PPCload : SDNode<"PPCISD::LOAD", SDTypeProfile<1, 1, []>,
|
||||
[SDNPHasChain, SDNPOptInGlue, SDNPOutGlue]>;
|
||||
def PPCload_toc : SDNode<"PPCISD::LOAD_TOC", SDTypeProfile<0, 1, []>,
|
||||
@@ -2369,6 +2373,8 @@ def : Pat<(PPCcall (i32 tglobaladdr:$dst)),
|
||||
def : Pat<(PPCcall (i32 texternalsym:$dst)),
|
||||
(BL texternalsym:$dst)>;
|
||||
|
||||
+def : Pat<(PPCcall_tls texternalsym:$func, tglobaltlsaddr:$sym),
|
||||
+ (BL_TLS texternalsym:$func, tglobaltlsaddr:$sym)>;
|
||||
|
||||
def : Pat<(PPCtc_return (i32 tglobaladdr:$dst), imm:$imm),
|
||||
(TCRETURNdi tglobaladdr:$dst, imm:$imm)>;
|
||||
@@ -2424,18 +2430,10 @@ def ADDItlsgdL32 : Pseudo<(outs gprc:$rD), (ins gp
|
||||
"#ADDItlsgdL32",
|
||||
[(set i32:$rD,
|
||||
(PPCaddiTlsgdL i32:$reg, tglobaltlsaddr:$disp))]>;
|
||||
-def GETtlsADDR32 : Pseudo<(outs gprc:$rD), (ins gprc:$reg, tlsgd32:$sym),
|
||||
- "#GETtlsADDR32",
|
||||
- [(set i32:$rD,
|
||||
- (PPCgetTlsAddr i32:$reg, tglobaltlsaddr:$sym))]>;
|
||||
def ADDItlsldL32 : Pseudo<(outs gprc:$rD), (ins gprc_nor0:$reg, s16imm:$disp),
|
||||
"#ADDItlsldL32",
|
||||
[(set i32:$rD,
|
||||
(PPCaddiTlsldL i32:$reg, tglobaltlsaddr:$disp))]>;
|
||||
-def GETtlsldADDR32 : Pseudo<(outs gprc:$rD), (ins gprc:$reg, tlsgd32:$sym),
|
||||
- "#GETtlsldADDR32",
|
||||
- [(set i32:$rD,
|
||||
- (PPCgetTlsldAddr i32:$reg, tglobaltlsaddr:$sym))]>;
|
||||
def ADDIdtprelL32 : Pseudo<(outs gprc:$rD), (ins gprc_nor0:$reg, s16imm:$disp),
|
||||
"#ADDIdtprelL32",
|
||||
[(set i32:$rD,
|
||||
Index: lib/Target/PowerPC/PPCMCInstLower.cpp
|
||||
===================================================================
|
||||
--- lib/Target/PowerPC/PPCMCInstLower.cpp
|
||||
+++ lib/Target/PowerPC/PPCMCInstLower.cpp
|
||||
@@ -137,6 +137,12 @@ static MCOperand GetSymbolRef(const MachineOperand
|
||||
case PPCII::MO_TLS:
|
||||
RefKind = MCSymbolRefExpr::VK_PPC_TLS;
|
||||
break;
|
||||
+ case PPCII::MO_TLSGD:
|
||||
+ RefKind = MCSymbolRefExpr::VK_PPC_TLSGD;
|
||||
+ break;
|
||||
+ case PPCII::MO_TLSLD:
|
||||
+ RefKind = MCSymbolRefExpr::VK_PPC_TLSLD;
|
||||
+ break;
|
||||
}
|
||||
|
||||
if (MO.getTargetFlags() == PPCII::MO_PLT_OR_STUB && !isDarwin)
|
||||
Index: test/CodeGen/PowerPC/tls-store2.ll
|
||||
===================================================================
|
||||
--- test/CodeGen/PowerPC/tls-store2.ll
|
||||
+++ test/CodeGen/PowerPC/tls-store2.ll
|
||||
@@ -0,0 +1,33 @@
|
||||
+; RUN: llc -march=ppc64 -mcpu=pwr7 -O2 -relocation-model=pic < %s | FileCheck %s
|
||||
+
|
||||
+target datalayout = "e-m:e-i64:64-n32:64"
|
||||
+target triple = "powerpc64le-unknown-linux-gnu"
|
||||
+
|
||||
+; Test back-to-back stores of TLS variables to ensure call sequences no
|
||||
+; longer overlap.
|
||||
+
|
||||
+@__once_callable = external thread_local global i8**
|
||||
+@__once_call = external thread_local global void ()*
|
||||
+
|
||||
+define i64 @call_once(i64 %flag, i8* %ptr) {
|
||||
+entry:
|
||||
+ %var = alloca i8*, align 8
|
||||
+ store i8* %ptr, i8** %var, align 8
|
||||
+ store i8** %var, i8*** @__once_callable, align 8
|
||||
+ store void ()* @__once_call_impl, void ()** @__once_call, align 8
|
||||
+ ret i64 %flag
|
||||
+}
|
||||
+
|
||||
+; CHECK-LABEL: call_once:
|
||||
+; CHECK: addis 3, 2, __once_callable@got@tlsgd@ha
|
||||
+; CHECK: addi 3, 3, __once_callable@got@tlsgd@l
|
||||
+; CHECK: bl __tls_get_addr(__once_callable@tlsgd)
|
||||
+; CHECK-NEXT: nop
|
||||
+; CHECK: std {{[0-9]+}}, 0(3)
|
||||
+; CHECK: addis 3, 2, __once_call@got@tlsgd@ha
|
||||
+; CHECK: addi 3, 3, __once_call@got@tlsgd@l
|
||||
+; CHECK: bl __tls_get_addr(__once_call@tlsgd)
|
||||
+; CHECK-NEXT: nop
|
||||
+; CHECK: std {{[0-9]+}}, 0(3)
|
||||
+
|
||||
+declare void @__once_call_impl()
|
@ -2115,6 +2115,9 @@ def warn_attribute_invalid_on_definition : Warning<
|
||||
InGroup<IgnoredAttributes>;
|
||||
def err_attribute_dll_redeclaration : Error<
|
||||
"redeclaration of %q0 cannot add %q1 attribute">;
|
||||
def warn_attribute_dll_redeclaration : Warning<
|
||||
"redeclaration of %q0 should not add %q1 attribute">,
|
||||
InGroup<DiagGroup<"dll-attribute-on-redeclaration">>;
|
||||
def err_attribute_dllimport_function_definition : Error<
|
||||
"dllimport cannot be applied to non-inline function definition">;
|
||||
def err_attribute_dll_deleted : Error<
|
||||
|
@ -36,7 +36,7 @@ std::string getClangRepositoryPath() {
|
||||
|
||||
// If the SVN_REPOSITORY is empty, try to use the SVN keyword. This helps us
|
||||
// pick up a tag in an SVN export, for example.
|
||||
StringRef SVNRepository("$URL: https://llvm.org/svn/llvm-project/cfe/tags/RELEASE_350/final/lib/Basic/Version.cpp $");
|
||||
StringRef SVNRepository("$URL: https://llvm.org/svn/llvm-project/cfe/tags/RELEASE_351/final/lib/Basic/Version.cpp $");
|
||||
if (URL.empty()) {
|
||||
URL = SVNRepository.slice(SVNRepository.find(':'),
|
||||
SVNRepository.find("/lib/Basic"));
|
||||
|
@ -3216,18 +3216,26 @@ Value *ScalarExprEmitter::VisitChooseExpr(ChooseExpr *E) {
|
||||
|
||||
Value *ScalarExprEmitter::VisitVAArgExpr(VAArgExpr *VE) {
|
||||
QualType Ty = VE->getType();
|
||||
|
||||
if (Ty->isVariablyModifiedType())
|
||||
CGF.EmitVariablyModifiedType(Ty);
|
||||
|
||||
llvm::Value *ArgValue = CGF.EmitVAListRef(VE->getSubExpr());
|
||||
llvm::Value *ArgPtr = CGF.EmitVAArg(ArgValue, VE->getType());
|
||||
llvm::Type *ArgTy = ConvertType(VE->getType());
|
||||
|
||||
// If EmitVAArg fails, we fall back to the LLVM instruction.
|
||||
if (!ArgPtr)
|
||||
return Builder.CreateVAArg(ArgValue, ConvertType(VE->getType()));
|
||||
return Builder.CreateVAArg(ArgValue, ArgTy);
|
||||
|
||||
// FIXME Volatility.
|
||||
return Builder.CreateLoad(ArgPtr);
|
||||
llvm::Value *Val = Builder.CreateLoad(ArgPtr);
|
||||
|
||||
// If EmitVAArg promoted the type, we must truncate it.
|
||||
if (ArgTy != Val->getType())
|
||||
Val = Builder.CreateTrunc(Val, ArgTy);
|
||||
|
||||
return Val;
|
||||
}
|
||||
|
||||
Value *ScalarExprEmitter::VisitBlockExpr(const BlockExpr *block) {
|
||||
|
@ -5544,15 +5544,19 @@ MipsABIInfo::classifyArgumentType(QualType Ty, uint64_t &Offset) const {
|
||||
// If we have reached here, aggregates are passed directly by coercing to
|
||||
// another structure type. Padding is inserted if the offset of the
|
||||
// aggregate is unaligned.
|
||||
return ABIArgInfo::getDirect(HandleAggregates(Ty, TySize), 0,
|
||||
getPaddingType(OrigOffset, CurrOffset));
|
||||
ABIArgInfo ArgInfo =
|
||||
ABIArgInfo::getDirect(HandleAggregates(Ty, TySize), 0,
|
||||
getPaddingType(OrigOffset, CurrOffset));
|
||||
ArgInfo.setInReg(true);
|
||||
return ArgInfo;
|
||||
}
|
||||
|
||||
// Treat an enum type as its underlying type.
|
||||
if (const EnumType *EnumTy = Ty->getAs<EnumType>())
|
||||
Ty = EnumTy->getDecl()->getIntegerType();
|
||||
|
||||
if (Ty->isPromotableIntegerType())
|
||||
// All integral types are promoted to the GPR width.
|
||||
if (Ty->isIntegralOrEnumerationType())
|
||||
return ABIArgInfo::getExtend();
|
||||
|
||||
return ABIArgInfo::getDirect(
|
||||
@ -5604,7 +5608,12 @@ MipsABIInfo::returnAggregateInRegs(QualType RetTy, uint64_t Size) const {
|
||||
ABIArgInfo MipsABIInfo::classifyReturnType(QualType RetTy) const {
|
||||
uint64_t Size = getContext().getTypeSize(RetTy);
|
||||
|
||||
if (RetTy->isVoidType() || Size == 0)
|
||||
if (RetTy->isVoidType())
|
||||
return ABIArgInfo::getIgnore();
|
||||
|
||||
// O32 doesn't treat zero-sized structs differently from other structs.
|
||||
// However, N32/N64 ignores zero sized return values.
|
||||
if (!IsO32 && Size == 0)
|
||||
return ABIArgInfo::getIgnore();
|
||||
|
||||
if (isAggregateTypeForABI(RetTy) || RetTy->isVectorType()) {
|
||||
@ -5612,12 +5621,15 @@ ABIArgInfo MipsABIInfo::classifyReturnType(QualType RetTy) const {
|
||||
if (RetTy->isAnyComplexType())
|
||||
return ABIArgInfo::getDirect();
|
||||
|
||||
// O32 returns integer vectors in registers.
|
||||
if (IsO32 && RetTy->isVectorType() && !RetTy->hasFloatingRepresentation())
|
||||
return ABIArgInfo::getDirect(returnAggregateInRegs(RetTy, Size));
|
||||
|
||||
if (!IsO32)
|
||||
return ABIArgInfo::getDirect(returnAggregateInRegs(RetTy, Size));
|
||||
// O32 returns integer vectors in registers and N32/N64 returns all small
|
||||
// aggregates in registers..
|
||||
if (!IsO32 ||
|
||||
(RetTy->isVectorType() && !RetTy->hasFloatingRepresentation())) {
|
||||
ABIArgInfo ArgInfo =
|
||||
ABIArgInfo::getDirect(returnAggregateInRegs(RetTy, Size));
|
||||
ArgInfo.setInReg(true);
|
||||
return ArgInfo;
|
||||
}
|
||||
}
|
||||
|
||||
return ABIArgInfo::getIndirect(0);
|
||||
@ -5647,11 +5659,20 @@ llvm::Value* MipsABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
|
||||
CodeGenFunction &CGF) const {
|
||||
llvm::Type *BP = CGF.Int8PtrTy;
|
||||
llvm::Type *BPP = CGF.Int8PtrPtrTy;
|
||||
|
||||
// Integer arguments are promoted 32-bit on O32 and 64-bit on N32/N64.
|
||||
unsigned SlotSizeInBits = IsO32 ? 32 : 64;
|
||||
if (Ty->isIntegerType() &&
|
||||
CGF.getContext().getIntWidth(Ty) < SlotSizeInBits) {
|
||||
Ty = CGF.getContext().getIntTypeForBitwidth(SlotSizeInBits,
|
||||
Ty->isSignedIntegerType());
|
||||
}
|
||||
|
||||
CGBuilderTy &Builder = CGF.Builder;
|
||||
llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP, "ap");
|
||||
llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur");
|
||||
int64_t TypeAlign = getContext().getTypeAlign(Ty) / 8;
|
||||
int64_t TypeAlign =
|
||||
std::min(getContext().getTypeAlign(Ty) / 8, StackAlignInBytes);
|
||||
llvm::Type *PTy = llvm::PointerType::getUnqual(CGF.ConvertType(Ty));
|
||||
llvm::Value *AddrTyped;
|
||||
unsigned PtrWidth = getTarget().getPointerWidth(0);
|
||||
@ -5670,8 +5691,8 @@ llvm::Value* MipsABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
|
||||
|
||||
llvm::Value *AlignedAddr = Builder.CreateBitCast(AddrTyped, BP);
|
||||
TypeAlign = std::max((unsigned)TypeAlign, MinABIStackAlignInBytes);
|
||||
uint64_t Offset =
|
||||
llvm::RoundUpToAlignment(CGF.getContext().getTypeSize(Ty) / 8, TypeAlign);
|
||||
unsigned ArgSizeInBits = CGF.getContext().getTypeSize(Ty);
|
||||
uint64_t Offset = llvm::RoundUpToAlignment(ArgSizeInBits / 8, TypeAlign);
|
||||
llvm::Value *NextAddr =
|
||||
Builder.CreateGEP(AlignedAddr, llvm::ConstantInt::get(IntTy, Offset),
|
||||
"ap.next");
|
||||
|
@ -5020,7 +5020,7 @@ static void checkDLLAttributeRedeclaration(Sema &S, NamedDecl *OldDecl,
|
||||
NewDecl = NewTD->getTemplatedDecl();
|
||||
|
||||
if (!OldDecl || !NewDecl)
|
||||
return;
|
||||
return;
|
||||
|
||||
const DLLImportAttr *OldImportAttr = OldDecl->getAttr<DLLImportAttr>();
|
||||
const DLLExportAttr *OldExportAttr = OldDecl->getAttr<DLLExportAttr>();
|
||||
@ -5037,13 +5037,30 @@ static void checkDLLAttributeRedeclaration(Sema &S, NamedDecl *OldDecl,
|
||||
// Implicitly generated declarations are also excluded for now because there
|
||||
// is no other way to switch these to use dllimport or dllexport.
|
||||
bool AddsAttr = !(OldImportAttr || OldExportAttr) && HasNewAttr;
|
||||
|
||||
if (AddsAttr && !IsSpecialization && !OldDecl->isImplicit()) {
|
||||
S.Diag(NewDecl->getLocation(), diag::err_attribute_dll_redeclaration)
|
||||
<< NewDecl
|
||||
<< (NewImportAttr ? (const Attr *)NewImportAttr : NewExportAttr);
|
||||
// If the declaration hasn't been used yet, allow with a warning for
|
||||
// free functions and global variables.
|
||||
bool JustWarn = false;
|
||||
if (!OldDecl->isUsed() && !OldDecl->isCXXClassMember()) {
|
||||
auto *VD = dyn_cast<VarDecl>(OldDecl);
|
||||
if (VD && !VD->getDescribedVarTemplate())
|
||||
JustWarn = true;
|
||||
auto *FD = dyn_cast<FunctionDecl>(OldDecl);
|
||||
if (FD && FD->getTemplatedKind() == FunctionDecl::TK_NonTemplate)
|
||||
JustWarn = true;
|
||||
}
|
||||
|
||||
unsigned DiagID = JustWarn ? diag::warn_attribute_dll_redeclaration
|
||||
: diag::err_attribute_dll_redeclaration;
|
||||
S.Diag(NewDecl->getLocation(), DiagID)
|
||||
<< NewDecl
|
||||
<< (NewImportAttr ? (const Attr *)NewImportAttr : NewExportAttr);
|
||||
S.Diag(OldDecl->getLocation(), diag::note_previous_declaration);
|
||||
NewDecl->setInvalidDecl();
|
||||
return;
|
||||
if (!JustWarn) {
|
||||
NewDecl->setInvalidDecl();
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
// A redeclaration is not allowed to drop a dllimport attribute, the only
|
||||
|
@ -3692,12 +3692,12 @@ bool Sema::CheckTemplateArgumentList(TemplateDecl *Template,
|
||||
ArgumentPack.size(), Converted))
|
||||
return true;
|
||||
|
||||
if (TemplateArgs[ArgIdx].getArgument().isPackExpansion() &&
|
||||
isa<TypeAliasTemplateDecl>(Template) &&
|
||||
!(Param + 1 == ParamEnd && (*Param)->isTemplateParameterPack() &&
|
||||
!getExpandedPackSize(*Param))) {
|
||||
bool PackExpansionIntoNonPack =
|
||||
TemplateArgs[ArgIdx].getArgument().isPackExpansion() &&
|
||||
(!(*Param)->isTemplateParameterPack() || getExpandedPackSize(*Param));
|
||||
if (PackExpansionIntoNonPack && isa<TypeAliasTemplateDecl>(Template)) {
|
||||
// Core issue 1430: we have a pack expansion as an argument to an
|
||||
// alias template, and it's not part of a final parameter pack. This
|
||||
// alias template, and it's not part of a parameter pack. This
|
||||
// can't be canonicalized, so reject it now.
|
||||
Diag(TemplateArgs[ArgIdx].getLocation(),
|
||||
diag::err_alias_template_expansion_into_fixed_list)
|
||||
@ -3720,16 +3720,11 @@ bool Sema::CheckTemplateArgumentList(TemplateDecl *Template,
|
||||
++Param;
|
||||
}
|
||||
|
||||
// If we just saw a pack expansion, then directly convert the remaining
|
||||
// arguments, because we don't know what parameters they'll match up
|
||||
// with.
|
||||
if (TemplateArgs[ArgIdx-1].getArgument().isPackExpansion()) {
|
||||
bool InFinalParameterPack = Param != ParamEnd &&
|
||||
Param + 1 == ParamEnd &&
|
||||
(*Param)->isTemplateParameterPack() &&
|
||||
!getExpandedPackSize(*Param);
|
||||
|
||||
if (!InFinalParameterPack && !ArgumentPack.empty()) {
|
||||
// If we just saw a pack expansion into a non-pack, then directly convert
|
||||
// the remaining arguments, because we don't know what parameters they'll
|
||||
// match up with.
|
||||
if (PackExpansionIntoNonPack) {
|
||||
if (!ArgumentPack.empty()) {
|
||||
// If we were part way through filling in an expanded parameter pack,
|
||||
// fall back to just producing individual arguments.
|
||||
Converted.insert(Converted.end(),
|
||||
@ -3738,22 +3733,10 @@ bool Sema::CheckTemplateArgumentList(TemplateDecl *Template,
|
||||
}
|
||||
|
||||
while (ArgIdx < NumArgs) {
|
||||
if (InFinalParameterPack)
|
||||
ArgumentPack.push_back(TemplateArgs[ArgIdx].getArgument());
|
||||
else
|
||||
Converted.push_back(TemplateArgs[ArgIdx].getArgument());
|
||||
Converted.push_back(TemplateArgs[ArgIdx].getArgument());
|
||||
++ArgIdx;
|
||||
}
|
||||
|
||||
// Push the argument pack onto the list of converted arguments.
|
||||
if (InFinalParameterPack) {
|
||||
Converted.push_back(
|
||||
TemplateArgument::CreatePackCopy(Context,
|
||||
ArgumentPack.data(),
|
||||
ArgumentPack.size()));
|
||||
ArgumentPack.clear();
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
|
@ -35,23 +35,26 @@ class CallingConvEmitter {
|
||||
} // End anonymous namespace
|
||||
|
||||
void CallingConvEmitter::run(raw_ostream &O) {
|
||||
|
||||
std::vector<Record*> CCs = Records.getAllDerivedDefinitions("CallingConv");
|
||||
|
||||
// Emit prototypes for all of the CC's so that they can forward ref each
|
||||
// other.
|
||||
|
||||
// Emit prototypes for all of the non-custom CC's so that they can forward ref
|
||||
// each other.
|
||||
for (unsigned i = 0, e = CCs.size(); i != e; ++i) {
|
||||
O << "static bool " << CCs[i]->getName()
|
||||
<< "(unsigned ValNo, MVT ValVT,\n"
|
||||
<< std::string(CCs[i]->getName().size()+13, ' ')
|
||||
<< "MVT LocVT, CCValAssign::LocInfo LocInfo,\n"
|
||||
<< std::string(CCs[i]->getName().size()+13, ' ')
|
||||
<< "ISD::ArgFlagsTy ArgFlags, CCState &State);\n";
|
||||
if (!CCs[i]->getValueAsBit("Custom")) {
|
||||
O << "static bool " << CCs[i]->getName()
|
||||
<< "(unsigned ValNo, MVT ValVT,\n"
|
||||
<< std::string(CCs[i]->getName().size() + 13, ' ')
|
||||
<< "MVT LocVT, CCValAssign::LocInfo LocInfo,\n"
|
||||
<< std::string(CCs[i]->getName().size() + 13, ' ')
|
||||
<< "ISD::ArgFlagsTy ArgFlags, CCState &State);\n";
|
||||
}
|
||||
}
|
||||
|
||||
// Emit each non-custom calling convention description in full.
|
||||
for (unsigned i = 0, e = CCs.size(); i != e; ++i) {
|
||||
if (!CCs[i]->getValueAsBit("Custom"))
|
||||
EmitCallingConv(CCs[i], O);
|
||||
}
|
||||
|
||||
// Emit each calling convention description in full.
|
||||
for (unsigned i = 0, e = CCs.size(); i != e; ++i)
|
||||
EmitCallingConv(CCs[i], O);
|
||||
}
|
||||
|
||||
|
||||
@ -227,6 +230,21 @@ void CallingConvEmitter::EmitAction(Record *Action,
|
||||
<< IndentStr << "else\n"
|
||||
<< IndentStr << IndentStr << "LocInfo = CCValAssign::AExt;\n";
|
||||
}
|
||||
} else if (Action->isSubClassOf("CCPromoteToUpperBitsInType")) {
|
||||
Record *DestTy = Action->getValueAsDef("DestTy");
|
||||
MVT::SimpleValueType DestVT = getValueType(DestTy);
|
||||
O << IndentStr << "LocVT = " << getEnumName(DestVT) << ";\n";
|
||||
if (MVT(DestVT).isFloatingPoint()) {
|
||||
PrintFatalError("CCPromoteToUpperBitsInType does not handle floating "
|
||||
"point");
|
||||
} else {
|
||||
O << IndentStr << "if (ArgFlags.isSExt())\n"
|
||||
<< IndentStr << IndentStr << "LocInfo = CCValAssign::SExtUpper;\n"
|
||||
<< IndentStr << "else if (ArgFlags.isZExt())\n"
|
||||
<< IndentStr << IndentStr << "LocInfo = CCValAssign::ZExtUpper;\n"
|
||||
<< IndentStr << "else\n"
|
||||
<< IndentStr << IndentStr << "LocInfo = CCValAssign::AExtUpper;\n";
|
||||
}
|
||||
} else if (Action->isSubClassOf("CCBitConvertToType")) {
|
||||
Record *DestTy = Action->getValueAsDef("DestTy");
|
||||
O << IndentStr << "LocVT = " << getEnumName(getValueType(DestTy)) <<";\n";
|
||||
|
@ -25,7 +25,7 @@
|
||||
..
|
||||
lib
|
||||
clang
|
||||
3.5.0
|
||||
3.5.1
|
||||
lib
|
||||
freebsd
|
||||
..
|
||||
|
@ -96,7 +96,7 @@
|
||||
..
|
||||
..
|
||||
clang
|
||||
3.5.0
|
||||
3.5.1
|
||||
..
|
||||
..
|
||||
crypto
|
||||
|
@ -15,7 +15,7 @@
|
||||
aout
|
||||
..
|
||||
clang
|
||||
3.5.0
|
||||
3.5.1
|
||||
lib
|
||||
freebsd
|
||||
..
|
||||
|
@ -8,7 +8,7 @@ LLVM_SRCS= ${.CURDIR}/../../../contrib/llvm
|
||||
|
||||
.PATH: ${LLVM_SRCS}/tools/clang/lib/Headers
|
||||
|
||||
INCSDIR=${INCLUDEDIR}/clang/3.5.0
|
||||
INCSDIR=${INCLUDEDIR}/clang/3.5.1
|
||||
|
||||
INCS= __wmmintrin_aes.h \
|
||||
__wmmintrin_pclmul.h \
|
||||
|
@ -1,11 +1,11 @@
|
||||
/* $FreeBSD$ */
|
||||
|
||||
#define CLANG_VERSION 3.5.0
|
||||
#define CLANG_VERSION 3.5.1
|
||||
#define CLANG_VERSION_MAJOR 3
|
||||
#define CLANG_VERSION_MINOR 5
|
||||
#define CLANG_VERSION_PATCHLEVEL 0
|
||||
#define CLANG_VERSION_PATCHLEVEL 1
|
||||
|
||||
#define CLANG_VENDOR "FreeBSD "
|
||||
#define CLANG_VENDOR_SUFFIX " 20141124"
|
||||
#define CLANG_VENDOR_SUFFIX " 20150115"
|
||||
|
||||
#define SVN_REVISION "216957"
|
||||
#define SVN_REVISION "225668"
|
||||
|
@ -28,7 +28,7 @@
|
||||
/* Define if we have libxml2 */
|
||||
/* #undef CLANG_HAVE_LIBXML */
|
||||
|
||||
#define PACKAGE_STRING "LLVM 3.5.0"
|
||||
#define PACKAGE_STRING "LLVM 3.5.1"
|
||||
|
||||
/* The LLVM product name and version */
|
||||
#define BACKEND_PACKAGE_STRING PACKAGE_STRING
|
||||
|
@ -564,13 +564,13 @@
|
||||
#define PACKAGE_NAME "LLVM"
|
||||
|
||||
/* Define to the full name and version of this package. */
|
||||
#define PACKAGE_STRING "LLVM 3.5.0"
|
||||
#define PACKAGE_STRING "LLVM 3.5.1"
|
||||
|
||||
/* Define to the one symbol short name of this package. */
|
||||
#define PACKAGE_TARNAME "llvm"
|
||||
|
||||
/* Define to the version of this package. */
|
||||
#define PACKAGE_VERSION "3.5.0"
|
||||
#define PACKAGE_VERSION "3.5.1"
|
||||
|
||||
/* Define as the return type of signal handlers (`int' or `void'). */
|
||||
#define RETSIGTYPE void
|
||||
|
@ -12,8 +12,10 @@ SRCS= Mips16FrameLowering.cpp \
|
||||
Mips16ISelLowering.cpp \
|
||||
Mips16InstrInfo.cpp \
|
||||
Mips16RegisterInfo.cpp \
|
||||
MipsABIInfo.cpp \
|
||||
MipsAnalyzeImmediate.cpp \
|
||||
MipsAsmPrinter.cpp \
|
||||
MipsCCState.cpp \
|
||||
MipsCodeEmitter.cpp \
|
||||
MipsConstantIslandPass.cpp \
|
||||
MipsDelaySlotFiller.cpp \
|
||||
|
@ -5,7 +5,7 @@
|
||||
CRTARCH=${MACHINE_CPUARCH:C/amd64/x86_64/}
|
||||
CRTSRC=${.CURDIR}/../../../contrib/compiler-rt
|
||||
|
||||
LIBDIR=/usr/lib/clang/3.5.0/lib/freebsd
|
||||
LIBDIR=/usr/lib/clang/3.5.1/lib/freebsd
|
||||
|
||||
NO_PIC=
|
||||
MK_PROFILE=no
|
||||
|
@ -489,60 +489,60 @@ OLD_FILES+=usr/bin/clang++
|
||||
OLD_FILES+=usr/bin/clang-cpp
|
||||
OLD_FILES+=usr/bin/clang-tblgen
|
||||
OLD_FILES+=usr/bin/tblgen
|
||||
OLD_FILES+=usr/include/clang/3.5.0/__wmmintrin_aes.h
|
||||
OLD_FILES+=usr/include/clang/3.5.0/__wmmintrin_pclmul.h
|
||||
OLD_FILES+=usr/include/clang/3.5.0/altivec.h
|
||||
OLD_FILES+=usr/include/clang/3.5.0/ammintrin.h
|
||||
OLD_FILES+=usr/include/clang/3.5.0/arm_acle.h
|
||||
OLD_FILES+=usr/include/clang/3.5.0/arm_neon.h
|
||||
OLD_FILES+=usr/include/clang/3.5.0/avx2intrin.h
|
||||
OLD_FILES+=usr/include/clang/3.5.0/avxintrin.h
|
||||
OLD_FILES+=usr/include/clang/3.5.0/bmi2intrin.h
|
||||
OLD_FILES+=usr/include/clang/3.5.0/bmiintrin.h
|
||||
OLD_FILES+=usr/include/clang/3.5.0/cpuid.h
|
||||
OLD_FILES+=usr/include/clang/3.5.0/emmintrin.h
|
||||
OLD_FILES+=usr/include/clang/3.5.0/f16cintrin.h
|
||||
OLD_FILES+=usr/include/clang/3.5.0/fma4intrin.h
|
||||
OLD_FILES+=usr/include/clang/3.5.0/fmaintrin.h
|
||||
OLD_FILES+=usr/include/clang/3.5.0/ia32intrin.h
|
||||
OLD_FILES+=usr/include/clang/3.5.0/immintrin.h
|
||||
OLD_FILES+=usr/include/clang/3.5.0/lzcntintrin.h
|
||||
OLD_FILES+=usr/include/clang/3.5.0/mm3dnow.h
|
||||
OLD_FILES+=usr/include/clang/3.5.0/mm_malloc.h
|
||||
OLD_FILES+=usr/include/clang/3.5.0/mmintrin.h
|
||||
OLD_FILES+=usr/include/clang/3.5.0/module.modulemap
|
||||
OLD_FILES+=usr/include/clang/3.5.0/nmmintrin.h
|
||||
OLD_FILES+=usr/include/clang/3.5.0/pmmintrin.h
|
||||
OLD_FILES+=usr/include/clang/3.5.0/popcntintrin.h
|
||||
OLD_FILES+=usr/include/clang/3.5.0/prfchwintrin.h
|
||||
OLD_FILES+=usr/include/clang/3.5.0/rdseedintrin.h
|
||||
OLD_FILES+=usr/include/clang/3.5.0/rtmintrin.h
|
||||
OLD_FILES+=usr/include/clang/3.5.0/shaintrin.h
|
||||
OLD_FILES+=usr/include/clang/3.5.0/smmintrin.h
|
||||
OLD_FILES+=usr/include/clang/3.5.0/tbmintrin.h
|
||||
OLD_FILES+=usr/include/clang/3.5.0/tmmintrin.h
|
||||
OLD_FILES+=usr/include/clang/3.5.0/wmmintrin.h
|
||||
OLD_FILES+=usr/include/clang/3.5.0/x86intrin.h
|
||||
OLD_FILES+=usr/include/clang/3.5.0/xmmintrin.h
|
||||
OLD_FILES+=usr/include/clang/3.5.0/xopintrin.h
|
||||
OLD_DIRS+=usr/include/clang/3.5.0
|
||||
OLD_FILES+=usr/include/clang/3.5.1/__wmmintrin_aes.h
|
||||
OLD_FILES+=usr/include/clang/3.5.1/__wmmintrin_pclmul.h
|
||||
OLD_FILES+=usr/include/clang/3.5.1/altivec.h
|
||||
OLD_FILES+=usr/include/clang/3.5.1/ammintrin.h
|
||||
OLD_FILES+=usr/include/clang/3.5.1/arm_acle.h
|
||||
OLD_FILES+=usr/include/clang/3.5.1/arm_neon.h
|
||||
OLD_FILES+=usr/include/clang/3.5.1/avx2intrin.h
|
||||
OLD_FILES+=usr/include/clang/3.5.1/avxintrin.h
|
||||
OLD_FILES+=usr/include/clang/3.5.1/bmi2intrin.h
|
||||
OLD_FILES+=usr/include/clang/3.5.1/bmiintrin.h
|
||||
OLD_FILES+=usr/include/clang/3.5.1/cpuid.h
|
||||
OLD_FILES+=usr/include/clang/3.5.1/emmintrin.h
|
||||
OLD_FILES+=usr/include/clang/3.5.1/f16cintrin.h
|
||||
OLD_FILES+=usr/include/clang/3.5.1/fma4intrin.h
|
||||
OLD_FILES+=usr/include/clang/3.5.1/fmaintrin.h
|
||||
OLD_FILES+=usr/include/clang/3.5.1/ia32intrin.h
|
||||
OLD_FILES+=usr/include/clang/3.5.1/immintrin.h
|
||||
OLD_FILES+=usr/include/clang/3.5.1/lzcntintrin.h
|
||||
OLD_FILES+=usr/include/clang/3.5.1/mm3dnow.h
|
||||
OLD_FILES+=usr/include/clang/3.5.1/mm_malloc.h
|
||||
OLD_FILES+=usr/include/clang/3.5.1/mmintrin.h
|
||||
OLD_FILES+=usr/include/clang/3.5.1/module.modulemap
|
||||
OLD_FILES+=usr/include/clang/3.5.1/nmmintrin.h
|
||||
OLD_FILES+=usr/include/clang/3.5.1/pmmintrin.h
|
||||
OLD_FILES+=usr/include/clang/3.5.1/popcntintrin.h
|
||||
OLD_FILES+=usr/include/clang/3.5.1/prfchwintrin.h
|
||||
OLD_FILES+=usr/include/clang/3.5.1/rdseedintrin.h
|
||||
OLD_FILES+=usr/include/clang/3.5.1/rtmintrin.h
|
||||
OLD_FILES+=usr/include/clang/3.5.1/shaintrin.h
|
||||
OLD_FILES+=usr/include/clang/3.5.1/smmintrin.h
|
||||
OLD_FILES+=usr/include/clang/3.5.1/tbmintrin.h
|
||||
OLD_FILES+=usr/include/clang/3.5.1/tmmintrin.h
|
||||
OLD_FILES+=usr/include/clang/3.5.1/wmmintrin.h
|
||||
OLD_FILES+=usr/include/clang/3.5.1/x86intrin.h
|
||||
OLD_FILES+=usr/include/clang/3.5.1/xmmintrin.h
|
||||
OLD_FILES+=usr/include/clang/3.5.1/xopintrin.h
|
||||
OLD_DIRS+=usr/include/clang/3.5.1
|
||||
OLD_DIRS+=usr/include/clang
|
||||
OLD_FILES+=usr/lib/clang/3.5.0/lib/freebsd/libclang_rt.asan-i386.a
|
||||
OLD_FILES+=usr/lib/clang/3.5.0/lib/freebsd/libclang_rt.asan-x86_64.a
|
||||
OLD_FILES+=usr/lib/clang/3.5.0/lib/freebsd/libclang_rt.asan_cxx-i386.a
|
||||
OLD_FILES+=usr/lib/clang/3.5.0/lib/freebsd/libclang_rt.asan_cxx-x86_64.a
|
||||
OLD_FILES+=usr/lib/clang/3.5.0/lib/freebsd/libclang_rt.profile-arm.a
|
||||
OLD_FILES+=usr/lib/clang/3.5.0/lib/freebsd/libclang_rt.profile-i386.a
|
||||
OLD_FILES+=usr/lib/clang/3.5.0/lib/freebsd/libclang_rt.profile-x86_64.a
|
||||
OLD_FILES+=usr/lib/clang/3.5.0/lib/freebsd/libclang_rt.san-i386.a
|
||||
OLD_FILES+=usr/lib/clang/3.5.0/lib/freebsd/libclang_rt.san-x86_64.a
|
||||
OLD_FILES+=usr/lib/clang/3.5.0/lib/freebsd/libclang_rt.ubsan-i386.a
|
||||
OLD_FILES+=usr/lib/clang/3.5.0/lib/freebsd/libclang_rt.ubsan-x86_64.a
|
||||
OLD_FILES+=usr/lib/clang/3.5.0/lib/freebsd/libclang_rt.ubsan_cxx-i386.a
|
||||
OLD_FILES+=usr/lib/clang/3.5.0/lib/freebsd/libclang_rt.ubsan_cxx-x86_64.a
|
||||
OLD_DIRS+=usr/lib/clang/3.5.0/lib/freebsd
|
||||
OLD_DIRS+=usr/lib/clang/3.5.0/lib
|
||||
OLD_DIRS+=usr/lib/clang/3.5.0
|
||||
OLD_FILES+=usr/lib/clang/3.5.1/lib/freebsd/libclang_rt.asan-i386.a
|
||||
OLD_FILES+=usr/lib/clang/3.5.1/lib/freebsd/libclang_rt.asan-x86_64.a
|
||||
OLD_FILES+=usr/lib/clang/3.5.1/lib/freebsd/libclang_rt.asan_cxx-i386.a
|
||||
OLD_FILES+=usr/lib/clang/3.5.1/lib/freebsd/libclang_rt.asan_cxx-x86_64.a
|
||||
OLD_FILES+=usr/lib/clang/3.5.1/lib/freebsd/libclang_rt.profile-arm.a
|
||||
OLD_FILES+=usr/lib/clang/3.5.1/lib/freebsd/libclang_rt.profile-i386.a
|
||||
OLD_FILES+=usr/lib/clang/3.5.1/lib/freebsd/libclang_rt.profile-x86_64.a
|
||||
OLD_FILES+=usr/lib/clang/3.5.1/lib/freebsd/libclang_rt.san-i386.a
|
||||
OLD_FILES+=usr/lib/clang/3.5.1/lib/freebsd/libclang_rt.san-x86_64.a
|
||||
OLD_FILES+=usr/lib/clang/3.5.1/lib/freebsd/libclang_rt.ubsan-i386.a
|
||||
OLD_FILES+=usr/lib/clang/3.5.1/lib/freebsd/libclang_rt.ubsan-x86_64.a
|
||||
OLD_FILES+=usr/lib/clang/3.5.1/lib/freebsd/libclang_rt.ubsan_cxx-i386.a
|
||||
OLD_FILES+=usr/lib/clang/3.5.1/lib/freebsd/libclang_rt.ubsan_cxx-x86_64.a
|
||||
OLD_DIRS+=usr/lib/clang/3.5.1/lib/freebsd
|
||||
OLD_DIRS+=usr/lib/clang/3.5.1/lib
|
||||
OLD_DIRS+=usr/lib/clang/3.5.1
|
||||
OLD_DIRS+=usr/lib/clang
|
||||
OLD_FILES+=usr/share/doc/llvm/clang/LICENSE.TXT
|
||||
OLD_DIRS+=usr/share/doc/llvm/clang
|
||||
|
Loading…
Reference in New Issue
Block a user