Merge llvm, clang, compiler-rt, libc++, lld and lldb release_40 branch

r294803, and update build glue.
This commit is contained in:
Dimitry Andric 2017-02-11 13:58:05 +00:00
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/projects/clang400-import/; revision=313643
20 changed files with 173 additions and 253 deletions

View File

@ -160,14 +160,12 @@ namespace std // purposefully not using versioning namespace
{
class _LIBCPP_EXCEPTION_ABI bad_optional_access
: public logic_error
: public exception
{
public:
_LIBCPP_INLINE_VISIBILITY
bad_optional_access() : logic_error("bad optional access") {}
// Get the key function ~bad_optional_access() into the dylib
virtual ~bad_optional_access() _NOEXCEPT;
virtual const char* what() const _NOEXCEPT;
};
} // std

View File

@ -574,7 +574,7 @@ private:
constexpr decltype(auto) operator()(_Alts&&... __alts) const {
__std_visit_exhaustive_visitor_check<
_Visitor,
decltype(_VSTD::forward<_Alts>(__alts).__value)...>();
decltype((_VSTD::forward<_Alts>(__alts).__value))...>();
return __invoke_constexpr(_VSTD::forward<_Visitor>(__visitor),
_VSTD::forward<_Alts>(__alts).__value...);
}

View File

@ -15,6 +15,10 @@ namespace std
bad_optional_access::~bad_optional_access() _NOEXCEPT = default;
const char* bad_optional_access::what() const _NOEXCEPT {
return "bad_optional_access";
}
} // std
_LIBCPP_BEGIN_NAMESPACE_EXPERIMENTAL

View File

@ -102,10 +102,23 @@ class ilist_iterator : ilist_detail::SpecificNodeAccess<OptionsT> {
return *this;
}
/// Convert from an iterator to its reverse.
/// Explicit conversion between forward/reverse iterators.
///
/// TODO: Roll this into the implicit constructor once we're sure that no one
/// is relying on the std::reverse_iterator off-by-one semantics.
/// Translate between forward and reverse iterators without changing range
/// boundaries. The resulting iterator will dereference (and have a handle)
/// to the previous node, which is somewhat unexpected; but converting the
/// two endpoints in a range will give the same range in reverse.
///
/// This matches std::reverse_iterator conversions.
explicit ilist_iterator(
const ilist_iterator<OptionsT, !IsReverse, IsConst> &RHS)
: ilist_iterator(++RHS.getReverse()) {}
/// Get a reverse iterator to the same node.
///
/// Gives a reverse iterator that will dereference (and have a handle) to the
/// same node. Converting the endpoint iterators in a range will give a
/// different range; for range operations, use the explicit conversions.
ilist_iterator<OptionsT, !IsReverse, IsConst> getReverse() const {
if (NodePtr)
return ilist_iterator<OptionsT, !IsReverse, IsConst>(*NodePtr);

View File

@ -153,6 +153,18 @@ class MachineInstrBundleIterator : MachineInstrBundleIteratorHelper<IsReverse> {
: MII(I.getInstrIterator()) {}
MachineInstrBundleIterator() : MII(nullptr) {}
/// Explicit conversion between forward/reverse iterators.
///
/// Translate between forward and reverse iterators without changing range
/// boundaries. The resulting iterator will dereference (and have a handle)
/// to the previous node, which is somewhat unexpected; but converting the
/// two endpoints in a range will give the same range in reverse.
///
/// This matches std::reverse_iterator conversions.
explicit MachineInstrBundleIterator(
const MachineInstrBundleIterator<Ty, !IsReverse> &I)
: MachineInstrBundleIterator(++I.getReverse()) {}
/// Get the bundle iterator for the given instruction's bundle.
static MachineInstrBundleIterator getAtBundleBegin(instr_iterator MI) {
return MachineInstrBundleIteratorHelper<IsReverse>::getBundleBegin(MI);
@ -258,6 +270,11 @@ class MachineInstrBundleIterator : MachineInstrBundleIteratorHelper<IsReverse> {
nonconst_iterator getNonConstIterator() const { return MII.getNonConst(); }
/// Get a reverse iterator to the same node.
///
/// Gives a reverse iterator that will dereference (and have a handle) to the
/// same node. Converting the endpoint iterators in a range will give a
/// different range; for range operations, use the explicit conversions.
reverse_iterator getReverse() const { return MII.getReverse(); }
};

View File

@ -311,6 +311,8 @@ template <typename IRUnitT, typename... ExtraArgTs> class AnalysisManager;
template <typename DerivedT> struct PassInfoMixin {
/// Gets the name of the pass we are mixed into.
static StringRef name() {
static_assert(std::is_base_of<PassInfoMixin, DerivedT>::value,
"Must pass the derived type as the template argument!");
StringRef Name = getTypeName<DerivedT>();
if (Name.startswith("llvm::"))
Name = Name.drop_front(strlen("llvm::"));
@ -339,7 +341,11 @@ struct AnalysisInfoMixin : PassInfoMixin<DerivedT> {
/// known platform with this limitation is Windows DLL builds, specifically
/// building each part of LLVM as a DLL. If we ever remove that build
/// configuration, this mixin can provide the static key as well.
static AnalysisKey *ID() { return &DerivedT::Key; }
static AnalysisKey *ID() {
static_assert(std::is_base_of<AnalysisInfoMixin, DerivedT>::value,
"Must pass the derived type as the template argument!");
return &DerivedT::Key;
}
};
/// This templated class represents "all analyses that operate over \<a
@ -1010,7 +1016,7 @@ extern template class InnerAnalysisManagerProxy<FunctionAnalysisManager,
template <typename AnalysisManagerT, typename IRUnitT, typename... ExtraArgTs>
class OuterAnalysisManagerProxy
: public AnalysisInfoMixin<
OuterAnalysisManagerProxy<AnalysisManagerT, IRUnitT>> {
OuterAnalysisManagerProxy<AnalysisManagerT, IRUnitT, ExtraArgTs...>> {
public:
/// \brief Result proxy object for \c OuterAnalysisManagerProxy.
class Result {
@ -1072,7 +1078,7 @@ class OuterAnalysisManagerProxy
private:
friend AnalysisInfoMixin<
OuterAnalysisManagerProxy<AnalysisManagerT, IRUnitT>>;
OuterAnalysisManagerProxy<AnalysisManagerT, IRUnitT, ExtraArgTs...>>;
static AnalysisKey Key;
const AnalysisManagerT *AM;

View File

@ -1108,25 +1108,6 @@ class TargetInstrInfo : public MCInstrInfo {
/// terminator instruction that has not been predicated.
virtual bool isUnpredicatedTerminator(const MachineInstr &MI) const;
/// Returns true if MI is an unconditional tail call.
virtual bool isUnconditionalTailCall(const MachineInstr &MI) const {
return false;
}
/// Returns true if the tail call can be made conditional on BranchCond.
virtual bool
canMakeTailCallConditional(SmallVectorImpl<MachineOperand> &Cond,
const MachineInstr &TailCall) const {
return false;
}
/// Replace the conditional branch in MBB with a conditional tail call.
virtual void replaceBranchWithTailCall(MachineBasicBlock &MBB,
SmallVectorImpl<MachineOperand> &Cond,
const MachineInstr &TailCall) const {
llvm_unreachable("Target didn't implement replaceBranchWithTailCall!");
}
/// Convert the instruction into a predicated instruction.
/// It returns true if the operation was successful.
virtual bool PredicateInstruction(MachineInstr &MI,

View File

@ -448,6 +448,7 @@ class MetadataLoader::MetadataLoaderImpl {
bool StripTBAA = false;
bool HasSeenOldLoopTags = false;
bool NeedUpgradeToDIGlobalVariableExpression = false;
/// True if metadata is being parsed for a module being ThinLTO imported.
bool IsImporting = false;
@ -473,6 +474,45 @@ class MetadataLoader::MetadataLoaderImpl {
CUSubprograms.clear();
}
/// Upgrade old-style bare DIGlobalVariables to DIGlobalVariableExpressions.
void upgradeCUVariables() {
if (!NeedUpgradeToDIGlobalVariableExpression)
return;
// Upgrade list of variables attached to the CUs.
if (NamedMDNode *CUNodes = TheModule.getNamedMetadata("llvm.dbg.cu"))
for (unsigned I = 0, E = CUNodes->getNumOperands(); I != E; ++I) {
auto *CU = cast<DICompileUnit>(CUNodes->getOperand(I));
if (auto *GVs = dyn_cast_or_null<MDTuple>(CU->getRawGlobalVariables()))
for (unsigned I = 0; I < GVs->getNumOperands(); I++)
if (auto *GV =
dyn_cast_or_null<DIGlobalVariable>(GVs->getOperand(I))) {
auto *DGVE =
DIGlobalVariableExpression::getDistinct(Context, GV, nullptr);
GVs->replaceOperandWith(I, DGVE);
}
}
// Upgrade variables attached to globals.
for (auto &GV : TheModule.globals()) {
SmallVector<MDNode *, 1> MDs, NewMDs;
GV.getMetadata(LLVMContext::MD_dbg, MDs);
GV.eraseMetadata(LLVMContext::MD_dbg);
for (auto *MD : MDs)
if (auto *DGV = dyn_cast_or_null<DIGlobalVariable>(MD)) {
auto *DGVE =
DIGlobalVariableExpression::getDistinct(Context, DGV, nullptr);
GV.addMetadata(LLVMContext::MD_dbg, *DGVE);
} else
GV.addMetadata(LLVMContext::MD_dbg, *MD);
}
}
void upgradeDebugInfo() {
upgradeCUSubprograms();
upgradeCUVariables();
}
public:
MetadataLoaderImpl(BitstreamCursor &Stream, Module &TheModule,
BitcodeReaderValueList &ValueList,
@ -726,7 +766,7 @@ Error MetadataLoader::MetadataLoaderImpl::parseMetadata(bool ModuleLevel) {
// Reading the named metadata created forward references and/or
// placeholders, that we flush here.
resolveForwardRefsAndPlaceholders(Placeholders);
upgradeCUSubprograms();
upgradeDebugInfo();
// Return at the beginning of the block, since it is easy to skip it
// entirely from there.
Stream.ReadBlockEnd(); // Pop the abbrev block context.
@ -750,7 +790,7 @@ Error MetadataLoader::MetadataLoaderImpl::parseMetadata(bool ModuleLevel) {
return error("Malformed block");
case BitstreamEntry::EndBlock:
resolveForwardRefsAndPlaceholders(Placeholders);
upgradeCUSubprograms();
upgradeDebugInfo();
return Error::success();
case BitstreamEntry::Record:
// The interesting case.
@ -1420,11 +1460,17 @@ Error MetadataLoader::MetadataLoaderImpl::parseOneMetadata(
getDITypeRefOrNull(Record[6]), Record[7], Record[8],
getMDOrNull(Record[10]), AlignInBits));
auto *DGVE = DIGlobalVariableExpression::getDistinct(Context, DGV, Expr);
MetadataList.assignValue(DGVE, NextMetadataNo);
NextMetadataNo++;
DIGlobalVariableExpression *DGVE = nullptr;
if (Attach || Expr)
DGVE = DIGlobalVariableExpression::getDistinct(Context, DGV, Expr);
else
NeedUpgradeToDIGlobalVariableExpression = true;
if (Attach)
Attach->addDebugInfo(DGVE);
auto *MDNode = Expr ? cast<Metadata>(DGVE) : cast<Metadata>(DGV);
MetadataList.assignValue(MDNode, NextMetadataNo);
NextMetadataNo++;
} else
return error("Invalid record");

View File

@ -49,7 +49,6 @@ STATISTIC(NumDeadBlocks, "Number of dead blocks removed");
STATISTIC(NumBranchOpts, "Number of branches optimized");
STATISTIC(NumTailMerge , "Number of block tails merged");
STATISTIC(NumHoist , "Number of times common instructions are hoisted");
STATISTIC(NumTailCalls, "Number of tail calls optimized");
static cl::opt<cl::boolOrDefault> FlagEnableTailMerge("enable-tail-merge",
cl::init(cl::BOU_UNSET), cl::Hidden);
@ -1387,42 +1386,6 @@ bool BranchFolder::OptimizeBlock(MachineBasicBlock *MBB) {
}
}
if (!IsEmptyBlock(MBB) && MBB->pred_size() == 1 &&
MF.getFunction()->optForSize()) {
// Changing "Jcc foo; foo: jmp bar;" into "Jcc bar;" might change the branch
// direction, thereby defeating careful block placement and regressing
// performance. Therefore, only consider this for optsize functions.
MachineInstr &TailCall = *MBB->getFirstNonDebugInstr();
if (TII->isUnconditionalTailCall(TailCall)) {
MachineBasicBlock *Pred = *MBB->pred_begin();
MachineBasicBlock *PredTBB = nullptr, *PredFBB = nullptr;
SmallVector<MachineOperand, 4> PredCond;
bool PredAnalyzable =
!TII->analyzeBranch(*Pred, PredTBB, PredFBB, PredCond, true);
if (PredAnalyzable && !PredCond.empty() && PredTBB == MBB) {
// The predecessor has a conditional branch to this block which consists
// of only a tail call. Try to fold the tail call into the conditional
// branch.
if (TII->canMakeTailCallConditional(PredCond, TailCall)) {
// TODO: It would be nice if analyzeBranch() could provide a pointer
// to the branch insturction so replaceBranchWithTailCall() doesn't
// have to search for it.
TII->replaceBranchWithTailCall(*Pred, PredCond, TailCall);
++NumTailCalls;
Pred->removeSuccessor(MBB);
MadeChange = true;
return MadeChange;
}
}
// If the predecessor is falling through to this block, we could reverse
// the branch condition and fold the tail call into that. However, after
// that we might have to re-arrange the CFG to fall through to the other
// block and there is a high risk of regressing code size rather than
// improving it.
}
}
// Analyze the branch in the current block.
MachineBasicBlock *CurTBB = nullptr, *CurFBB = nullptr;
SmallVector<MachineOperand, 4> CurCond;

View File

@ -61,6 +61,7 @@ namespace {
private:
void ClobberRegister(unsigned Reg);
void ReadRegister(unsigned Reg);
void CopyPropagateBlock(MachineBasicBlock &MBB);
bool eraseIfRedundant(MachineInstr &Copy, unsigned Src, unsigned Def);
@ -120,6 +121,18 @@ void MachineCopyPropagation::ClobberRegister(unsigned Reg) {
}
}
void MachineCopyPropagation::ReadRegister(unsigned Reg) {
// If 'Reg' is defined by a copy, the copy is no longer a candidate
// for elimination.
for (MCRegAliasIterator AI(Reg, TRI, true); AI.isValid(); ++AI) {
Reg2MIMap::iterator CI = CopyMap.find(*AI);
if (CI != CopyMap.end()) {
DEBUG(dbgs() << "MCP: Copy is used - not dead: "; CI->second->dump());
MaybeDeadCopies.remove(CI->second);
}
}
}
/// Return true if \p PreviousCopy did copy register \p Src to register \p Def.
/// This fact may have been obscured by sub register usage or may not be true at
/// all even though Src and Def are subregisters of the registers used in
@ -212,12 +225,14 @@ void MachineCopyPropagation::CopyPropagateBlock(MachineBasicBlock &MBB) {
// If Src is defined by a previous copy, the previous copy cannot be
// eliminated.
for (MCRegAliasIterator AI(Src, TRI, true); AI.isValid(); ++AI) {
Reg2MIMap::iterator CI = CopyMap.find(*AI);
if (CI != CopyMap.end()) {
DEBUG(dbgs() << "MCP: Copy is no longer dead: "; CI->second->dump());
MaybeDeadCopies.remove(CI->second);
}
ReadRegister(Src);
for (const MachineOperand &MO : MI->implicit_operands()) {
if (!MO.isReg() || !MO.readsReg())
continue;
unsigned Reg = MO.getReg();
if (!Reg)
continue;
ReadRegister(Reg);
}
DEBUG(dbgs() << "MCP: Copy is a deletion candidate: "; MI->dump());
@ -234,6 +249,14 @@ void MachineCopyPropagation::CopyPropagateBlock(MachineBasicBlock &MBB) {
// ...
// %xmm2<def> = copy %xmm9
ClobberRegister(Def);
for (const MachineOperand &MO : MI->implicit_operands()) {
if (!MO.isReg() || !MO.isDef())
continue;
unsigned Reg = MO.getReg();
if (!Reg)
continue;
ClobberRegister(Reg);
}
// Remember Def is defined by the copy.
for (MCSubRegIterator SR(Def, TRI, /*IncludeSelf=*/true); SR.isValid();
@ -268,17 +291,8 @@ void MachineCopyPropagation::CopyPropagateBlock(MachineBasicBlock &MBB) {
if (MO.isDef()) {
Defs.push_back(Reg);
continue;
}
// If 'Reg' is defined by a copy, the copy is no longer a candidate
// for elimination.
for (MCRegAliasIterator AI(Reg, TRI, true); AI.isValid(); ++AI) {
Reg2MIMap::iterator CI = CopyMap.find(*AI);
if (CI != CopyMap.end()) {
DEBUG(dbgs() << "MCP: Copy is used - not dead: "; CI->second->dump());
MaybeDeadCopies.remove(CI->second);
}
} else {
ReadRegister(Reg);
}
// Treat undef use like defs for copy propagation but not for
// dead copy. We would need to do a liveness check to be sure the copy

View File

@ -1556,9 +1556,10 @@ bool RegisterCoalescer::joinCopy(MachineInstr *CopyMI, bool &Again) {
bool RegisterCoalescer::joinReservedPhysReg(CoalescerPair &CP) {
unsigned DstReg = CP.getDstReg();
unsigned SrcReg = CP.getSrcReg();
assert(CP.isPhys() && "Must be a physreg copy");
assert(MRI->isReserved(DstReg) && "Not a reserved register");
LiveInterval &RHS = LIS->getInterval(CP.getSrcReg());
LiveInterval &RHS = LIS->getInterval(SrcReg);
DEBUG(dbgs() << "\t\tRHS = " << RHS << '\n');
assert(RHS.containsOneValue() && "Invalid join with reserved register");
@ -1592,17 +1593,36 @@ bool RegisterCoalescer::joinReservedPhysReg(CoalescerPair &CP) {
// Delete the identity copy.
MachineInstr *CopyMI;
if (CP.isFlipped()) {
CopyMI = MRI->getVRegDef(RHS.reg);
// Physreg is copied into vreg
// %vregY = COPY %X
// ... //< no other def of %X here
// use %vregY
// =>
// ...
// use %X
CopyMI = MRI->getVRegDef(SrcReg);
} else {
if (!MRI->hasOneNonDBGUse(RHS.reg)) {
// VReg is copied into physreg:
// %vregX = def
// ... //< no other def or use of %Y here
// %Y = COPY %vregX
// =>
// %Y = def
// ...
if (!MRI->hasOneNonDBGUse(SrcReg)) {
DEBUG(dbgs() << "\t\tMultiple vreg uses!\n");
return false;
}
MachineInstr *DestMI = MRI->getVRegDef(RHS.reg);
CopyMI = &*MRI->use_instr_nodbg_begin(RHS.reg);
const SlotIndex CopyRegIdx = LIS->getInstructionIndex(*CopyMI).getRegSlot();
const SlotIndex DestRegIdx = LIS->getInstructionIndex(*DestMI).getRegSlot();
if (!LIS->intervalIsInOneMBB(RHS)) {
DEBUG(dbgs() << "\t\tComplex control flow!\n");
return false;
}
MachineInstr &DestMI = *MRI->getVRegDef(SrcReg);
CopyMI = &*MRI->use_instr_nodbg_begin(SrcReg);
SlotIndex CopyRegIdx = LIS->getInstructionIndex(*CopyMI).getRegSlot();
SlotIndex DestRegIdx = LIS->getInstructionIndex(DestMI).getRegSlot();
if (!MRI->isConstantPhysReg(DstReg)) {
// We checked above that there are no interfering defs of the physical
@ -1629,8 +1649,8 @@ bool RegisterCoalescer::joinReservedPhysReg(CoalescerPair &CP) {
// We're going to remove the copy which defines a physical reserved
// register, so remove its valno, etc.
DEBUG(dbgs() << "\t\tRemoving phys reg def of " << DstReg << " at "
<< CopyRegIdx << "\n");
DEBUG(dbgs() << "\t\tRemoving phys reg def of " << PrintReg(DstReg, TRI)
<< " at " << CopyRegIdx << "\n");
LIS->removePhysRegDefAt(DstReg, CopyRegIdx);
// Create a new dead def at the new def location.

View File

@ -509,17 +509,17 @@ void CodeViewContext::encodeDefRange(MCAsmLayout &Layout,
// are artificially constructing.
size_t RecordSize = FixedSizePortion.size() +
sizeof(LocalVariableAddrRange) + 4 * NumGaps;
// Write out the recrod size.
support::endian::Writer<support::little>(OS).write<uint16_t>(RecordSize);
// Write out the record size.
LEWriter.write<uint16_t>(RecordSize);
// Write out the fixed size prefix.
OS << FixedSizePortion;
// Make space for a fixup that will eventually have a section relative
// relocation pointing at the offset where the variable becomes live.
Fixups.push_back(MCFixup::create(Contents.size(), BE, FK_SecRel_4));
Contents.resize(Contents.size() + 4); // Fixup for code start.
LEWriter.write<uint32_t>(0); // Fixup for code start.
// Make space for a fixup that will record the section index for the code.
Fixups.push_back(MCFixup::create(Contents.size(), BE, FK_SecRel_2));
Contents.resize(Contents.size() + 2); // Fixup for section index.
LEWriter.write<uint16_t>(0); // Fixup for section index.
// Write down the range's extent.
LEWriter.write<uint16_t>(Chunk);
@ -529,7 +529,7 @@ void CodeViewContext::encodeDefRange(MCAsmLayout &Layout,
} while (RangeSize > 0);
// Emit the gaps afterwards.
assert((NumGaps == 0 || Bias < MaxDefRange) &&
assert((NumGaps == 0 || Bias <= MaxDefRange) &&
"large ranges should not have gaps");
unsigned GapStartOffset = GapAndRangeSizes[I].second;
for (++I; I != J; ++I) {
@ -537,7 +537,7 @@ void CodeViewContext::encodeDefRange(MCAsmLayout &Layout,
assert(I < GapAndRangeSizes.size());
std::tie(GapSize, RangeSize) = GapAndRangeSizes[I];
LEWriter.write<uint16_t>(GapStartOffset);
LEWriter.write<uint16_t>(RangeSize);
LEWriter.write<uint16_t>(GapSize);
GapStartOffset += GapSize + RangeSize;
}
}

View File

@ -8934,8 +8934,9 @@ static SDValue splitStoreSplat(SelectionDAG &DAG, StoreSDNode &St,
// instructions (stp).
SDLoc DL(&St);
SDValue BasePtr = St.getBasePtr();
const MachinePointerInfo &PtrInfo = St.getPointerInfo();
SDValue NewST1 =
DAG.getStore(St.getChain(), DL, SplatVal, BasePtr, St.getPointerInfo(),
DAG.getStore(St.getChain(), DL, SplatVal, BasePtr, PtrInfo,
OrigAlignment, St.getMemOperand()->getFlags());
unsigned Offset = EltOffset;
@ -8944,7 +8945,7 @@ static SDValue splitStoreSplat(SelectionDAG &DAG, StoreSDNode &St,
SDValue OffsetPtr = DAG.getNode(ISD::ADD, DL, MVT::i64, BasePtr,
DAG.getConstant(Offset, DL, MVT::i64));
NewST1 = DAG.getStore(NewST1.getValue(0), DL, SplatVal, OffsetPtr,
St.getPointerInfo(), Alignment,
PtrInfo.getWithOffset(Offset), Alignment,
St.getMemOperand()->getFlags());
Offset += EltOffset;
}

View File

@ -77,11 +77,9 @@ bool X86ExpandPseudo::ExpandMI(MachineBasicBlock &MBB,
default:
return false;
case X86::TCRETURNdi:
case X86::TCRETURNdicc:
case X86::TCRETURNri:
case X86::TCRETURNmi:
case X86::TCRETURNdi64:
case X86::TCRETURNdi64cc:
case X86::TCRETURNri64:
case X86::TCRETURNmi64: {
bool isMem = Opcode == X86::TCRETURNmi || Opcode == X86::TCRETURNmi64;
@ -99,10 +97,6 @@ bool X86ExpandPseudo::ExpandMI(MachineBasicBlock &MBB,
Offset = StackAdj - MaxTCDelta;
assert(Offset >= 0 && "Offset should never be negative");
if (Opcode == X86::TCRETURNdicc || Opcode == X86::TCRETURNdi64cc) {
assert(Offset == 0 && "Conditional tail call cannot adjust the stack.");
}
if (Offset) {
// Check for possible merge with preceding ADD instruction.
Offset += X86FL->mergeSPUpdates(MBB, MBBI, true);
@ -111,21 +105,12 @@ bool X86ExpandPseudo::ExpandMI(MachineBasicBlock &MBB,
// Jump to label or value in register.
bool IsWin64 = STI->isTargetWin64();
if (Opcode == X86::TCRETURNdi || Opcode == X86::TCRETURNdicc ||
Opcode == X86::TCRETURNdi64 || Opcode == X86::TCRETURNdi64cc) {
if (Opcode == X86::TCRETURNdi || Opcode == X86::TCRETURNdi64) {
unsigned Op;
switch (Opcode) {
case X86::TCRETURNdi:
Op = X86::TAILJMPd;
break;
case X86::TCRETURNdicc:
Op = X86::TAILJMPd_CC;
break;
case X86::TCRETURNdi64cc:
assert(!IsWin64 && "Conditional tail calls confuse the Win64 unwinder.");
// TODO: We could do it for Win64 "leaf" functions though; PR30337.
Op = X86::TAILJMPd64_CC;
break;
default:
// Note: Win64 uses REX prefixes indirect jumps out of functions, but
// not direct ones.
@ -141,10 +126,6 @@ bool X86ExpandPseudo::ExpandMI(MachineBasicBlock &MBB,
MIB.addExternalSymbol(JumpTarget.getSymbolName(),
JumpTarget.getTargetFlags());
}
if (Op == X86::TAILJMPd_CC || Op == X86::TAILJMPd64_CC) {
MIB.addImm(MBBI->getOperand(2).getImm());
}
} else if (Opcode == X86::TCRETURNmi || Opcode == X86::TCRETURNmi64) {
unsigned Op = (Opcode == X86::TCRETURNmi)
? X86::TAILJMPm

View File

@ -264,21 +264,6 @@ let isCall = 1, isTerminator = 1, isReturn = 1, isBarrier = 1,
"jmp{l}\t{*}$dst", [], IIC_JMP_MEM>;
}
// Conditional tail calls are similar to the above, but they are branches
// rather than barriers, and they use EFLAGS.
let isCall = 1, isTerminator = 1, isReturn = 1, isBranch = 1,
isCodeGenOnly = 1, SchedRW = [WriteJumpLd] in
let Uses = [ESP, EFLAGS] in {
def TCRETURNdicc : PseudoI<(outs),
(ins i32imm_pcrel:$dst, i32imm:$offset, i32imm:$cond), []>;
// This gets substituted to a conditional jump instruction in MC lowering.
def TAILJMPd_CC : Ii32PCRel<0x80, RawFrm, (outs),
(ins i32imm_pcrel:$dst, i32imm:$cond),
"",
[], IIC_JMP_REL>;
}
//===----------------------------------------------------------------------===//
// Call Instructions...
@ -340,19 +325,3 @@ let isCall = 1, isTerminator = 1, isReturn = 1, isBarrier = 1,
"rex64 jmp{q}\t{*}$dst", [], IIC_JMP_MEM>;
}
}
// Conditional tail calls are similar to the above, but they are branches
// rather than barriers, and they use EFLAGS.
let isCall = 1, isTerminator = 1, isReturn = 1, isBranch = 1,
isCodeGenOnly = 1, SchedRW = [WriteJumpLd] in
let Uses = [RSP, EFLAGS] in {
def TCRETURNdi64cc : PseudoI<(outs),
(ins i64i32imm_pcrel:$dst, i32imm:$offset,
i32imm:$cond), []>;
// This gets substituted to a conditional jump instruction in MC lowering.
def TAILJMPd64_CC : Ii32PCRel<0x80, RawFrm, (outs),
(ins i64i32imm_pcrel:$dst, i32imm:$cond),
"",
[], IIC_JMP_REL>;
}

View File

@ -5108,85 +5108,6 @@ bool X86InstrInfo::isUnpredicatedTerminator(const MachineInstr &MI) const {
return !isPredicated(MI);
}
bool X86InstrInfo::isUnconditionalTailCall(const MachineInstr &MI) const {
switch (MI.getOpcode()) {
case X86::TCRETURNdi:
case X86::TCRETURNri:
case X86::TCRETURNmi:
case X86::TCRETURNdi64:
case X86::TCRETURNri64:
case X86::TCRETURNmi64:
return true;
default:
return false;
}
}
bool X86InstrInfo::canMakeTailCallConditional(
SmallVectorImpl<MachineOperand> &BranchCond,
const MachineInstr &TailCall) const {
if (TailCall.getOpcode() != X86::TCRETURNdi &&
TailCall.getOpcode() != X86::TCRETURNdi64) {
// Only direct calls can be done with a conditional branch.
return false;
}
if (Subtarget.isTargetWin64()) {
// Conditional tail calls confuse the Win64 unwinder.
// TODO: Allow them for "leaf" functions; PR30337.
return false;
}
assert(BranchCond.size() == 1);
if (BranchCond[0].getImm() > X86::LAST_VALID_COND) {
// Can't make a conditional tail call with this condition.
return false;
}
const X86MachineFunctionInfo *X86FI =
TailCall.getParent()->getParent()->getInfo<X86MachineFunctionInfo>();
if (X86FI->getTCReturnAddrDelta() != 0 ||
TailCall.getOperand(1).getImm() != 0) {
// A conditional tail call cannot do any stack adjustment.
return false;
}
return true;
}
void X86InstrInfo::replaceBranchWithTailCall(
MachineBasicBlock &MBB, SmallVectorImpl<MachineOperand> &BranchCond,
const MachineInstr &TailCall) const {
assert(canMakeTailCallConditional(BranchCond, TailCall));
MachineBasicBlock::iterator I = MBB.end();
while (I != MBB.begin()) {
--I;
if (I->isDebugValue())
continue;
if (!I->isBranch())
assert(0 && "Can't find the branch to replace!");
X86::CondCode CC = getCondFromBranchOpc(I->getOpcode());
assert(BranchCond.size() == 1);
if (CC != BranchCond[0].getImm())
continue;
break;
}
unsigned Opc = TailCall.getOpcode() == X86::TCRETURNdi ? X86::TCRETURNdicc
: X86::TCRETURNdi64cc;
auto MIB = BuildMI(MBB, I, MBB.findDebugLoc(I), get(Opc));
MIB->addOperand(TailCall.getOperand(0)); // Destination.
MIB.addImm(0); // Stack offset (not used).
MIB->addOperand(BranchCond[0]); // Condition.
MIB.copyImplicitOps(TailCall); // Regmask and (imp-used) parameters.
I->eraseFromParent();
}
// Given a MBB and its TBB, find the FBB which was a fallthrough MBB (it may
// not be a fallthrough MBB now due to layout changes). Return nullptr if the
// fallthrough MBB cannot be identified.

View File

@ -316,13 +316,6 @@ class X86InstrInfo final : public X86GenInstrInfo {
// Branch analysis.
bool isUnpredicatedTerminator(const MachineInstr &MI) const override;
bool isUnconditionalTailCall(const MachineInstr &MI) const override;
bool canMakeTailCallConditional(SmallVectorImpl<MachineOperand> &Cond,
const MachineInstr &TailCall) const override;
void replaceBranchWithTailCall(MachineBasicBlock &MBB,
SmallVectorImpl<MachineOperand> &Cond,
const MachineInstr &TailCall) const override;
bool analyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB,
MachineBasicBlock *&FBB,
SmallVectorImpl<MachineOperand> &Cond,

View File

@ -498,16 +498,11 @@ void X86MCInstLower::Lower(const MachineInstr *MI, MCInst &OutMI) const {
break;
}
// TAILJMPd, TAILJMPd64, TailJMPd_cc - Lower to the correct jump instruction.
// TAILJMPd, TAILJMPd64 - Lower to the correct jump instruction.
{ unsigned Opcode;
case X86::TAILJMPr: Opcode = X86::JMP32r; goto SetTailJmpOpcode;
case X86::TAILJMPd:
case X86::TAILJMPd64: Opcode = X86::JMP_1; goto SetTailJmpOpcode;
case X86::TAILJMPd_CC:
case X86::TAILJMPd64_CC:
Opcode = X86::GetCondBranchFromCond(
static_cast<X86::CondCode>(MI->getOperand(1).getImm()));
goto SetTailJmpOpcode;
SetTailJmpOpcode:
MCOperand Saved = OutMI.getOperand(0);
@ -1281,11 +1276,9 @@ void X86AsmPrinter::EmitInstruction(const MachineInstr *MI) {
case X86::TAILJMPr:
case X86::TAILJMPm:
case X86::TAILJMPd:
case X86::TAILJMPd_CC:
case X86::TAILJMPr64:
case X86::TAILJMPm64:
case X86::TAILJMPd64:
case X86::TAILJMPd64_CC:
case X86::TAILJMPr64_REX:
case X86::TAILJMPm64_REX:
// Lower these as normal, but add some comments.

View File

@ -8,4 +8,4 @@
#define CLANG_VENDOR "FreeBSD "
#define SVN_REVISION "294123"
#define SVN_REVISION "294803"

View File

@ -4,5 +4,5 @@
#define LLD_VERSION_STRING "4.0.0"
#define LLD_VERSION_MAJOR 4
#define LLD_VERSION_MINOR 0
#define LLD_REVISION_STRING "294123"
#define LLD_REVISION_STRING "294803"
#define LLD_REPOSITORY_STRING "FreeBSD"