Merge llvm 3.6.0rc3 from ^/vendor/llvm/dist, merge clang 3.6.0rc3 from

^/vendor/clang/dist, resolve conflicts, and update patches README.
This commit is contained in:
Dimitry Andric 2015-02-14 14:13:00 +00:00
commit 44f7b0dcc5
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/projects/clang360-import/; revision=278757
65 changed files with 509 additions and 589 deletions

View File

@ -66,7 +66,6 @@ struct LandingPadInfo {
MachineBasicBlock *LandingPadBlock; // Landing pad block.
SmallVector<MCSymbol*, 1> BeginLabels; // Labels prior to invoke.
SmallVector<MCSymbol*, 1> EndLabels; // Labels after invoke.
SmallVector<MCSymbol*, 1> ClauseLabels; // Labels for each clause.
MCSymbol *LandingPadLabel; // Label at beginning of landing pad.
const Function *Personality; // Personality function.
std::vector<int> TypeIds; // List of type ids (filters negative)
@ -331,11 +330,6 @@ class MachineModuleInfo : public ImmutablePass {
///
void addCleanup(MachineBasicBlock *LandingPad);
/// Add a clause for a landing pad. Returns a new label for the clause. This
/// is used by EH schemes that have more than one landing pad. In this case,
/// each clause gets its own basic block.
MCSymbol *addClauseForLandingPad(MachineBasicBlock *LandingPad);
/// getTypeIDFor - Return the type id for the specified typeinfo. This is
/// function wide.
unsigned getTypeIDFor(const GlobalValue *TI);

View File

@ -248,7 +248,7 @@ class NodeMetadata {
void setReductionState(ReductionState RS) { this->RS = RS; }
void handleAddEdge(const MatrixMetadata& MD, bool Transpose) {
DeniedOpts += Transpose ? MD.getWorstCol() : MD.getWorstRow();
DeniedOpts += Transpose ? MD.getWorstRow() : MD.getWorstCol();
const bool* UnsafeOpts =
Transpose ? MD.getUnsafeCols() : MD.getUnsafeRows();
for (unsigned i = 0; i < NumOpts; ++i)
@ -256,7 +256,7 @@ class NodeMetadata {
}
void handleRemoveEdge(const MatrixMetadata& MD, bool Transpose) {
DeniedOpts -= Transpose ? MD.getWorstCol() : MD.getWorstRow();
DeniedOpts -= Transpose ? MD.getWorstRow() : MD.getWorstCol();
const bool* UnsafeOpts =
Transpose ? MD.getUnsafeCols() : MD.getUnsafeRows();
for (unsigned i = 0; i < NumOpts; ++i)

View File

@ -693,6 +693,7 @@ class MDNode : public Metadata {
static AAMDNodes getMostGenericAA(const AAMDNodes &A, const AAMDNodes &B);
static MDNode *getMostGenericFPMath(MDNode *A, MDNode *B);
static MDNode *getMostGenericRange(MDNode *A, MDNode *B);
static MDNode *getMostGenericAliasScope(MDNode *A, MDNode *B);
};
/// \brief Uniquable metadata node.

View File

@ -52,7 +52,7 @@ class CallAnalyzer : public InstVisitor<CallAnalyzer, bool> {
const TargetTransformInfo &TTI;
/// The cache of @llvm.assume intrinsics.
AssumptionCache &AC;
AssumptionCacheTracker *ACT;
// The called function.
Function &F;
@ -146,8 +146,8 @@ class CallAnalyzer : public InstVisitor<CallAnalyzer, bool> {
public:
CallAnalyzer(const DataLayout *DL, const TargetTransformInfo &TTI,
AssumptionCache &AC, Function &Callee, int Threshold)
: DL(DL), TTI(TTI), AC(AC), F(Callee), Threshold(Threshold), Cost(0),
AssumptionCacheTracker *ACT, Function &Callee, int Threshold)
: DL(DL), TTI(TTI), ACT(ACT), F(Callee), Threshold(Threshold), Cost(0),
IsCallerRecursive(false), IsRecursiveCall(false),
ExposesReturnsTwice(false), HasDynamicAlloca(false),
ContainsNoDuplicateCall(false), HasReturn(false), HasIndirectBr(false),
@ -783,7 +783,7 @@ bool CallAnalyzer::visitCallSite(CallSite CS) {
// during devirtualization and so we want to give it a hefty bonus for
// inlining, but cap that bonus in the event that inlining wouldn't pan
// out. Pretend to inline the function, with a custom threshold.
CallAnalyzer CA(DL, TTI, AC, *F, InlineConstants::IndirectCallThreshold);
CallAnalyzer CA(DL, TTI, ACT, *F, InlineConstants::IndirectCallThreshold);
if (CA.analyzeCall(CS)) {
// We were able to inline the indirect call! Subtract the cost from the
// bonus we want to apply, but don't go below zero.
@ -1110,7 +1110,7 @@ bool CallAnalyzer::analyzeCall(CallSite CS) {
// the ephemeral values multiple times (and they're completely determined by
// the callee, so this is purely duplicate work).
SmallPtrSet<const Value *, 32> EphValues;
CodeMetrics::collectEphemeralValues(&F, &AC, EphValues);
CodeMetrics::collectEphemeralValues(&F, &ACT->getAssumptionCache(F), EphValues);
// The worklist of live basic blocks in the callee *after* inlining. We avoid
// adding basic blocks of the callee which can be proven to be dead for this
@ -1310,7 +1310,7 @@ InlineCost InlineCostAnalysis::getInlineCost(CallSite CS, Function *Callee,
<< "...\n");
CallAnalyzer CA(Callee->getDataLayout(), *TTI,
ACT->getAssumptionCache(*Callee), *Callee, Threshold);
ACT, *Callee, Threshold);
bool ShouldInline = CA.analyzeCall(CS);
DEBUG(CA.dump());

View File

@ -623,8 +623,8 @@ void Instruction::getAAMetadata(AAMDNodes &N, bool Merge) const {
N.TBAA = getMetadata(LLVMContext::MD_tbaa);
if (Merge)
N.Scope =
MDNode::intersect(N.Scope, getMetadata(LLVMContext::MD_alias_scope));
N.Scope = MDNode::getMostGenericAliasScope(
N.Scope, getMetadata(LLVMContext::MD_alias_scope));
else
N.Scope = getMetadata(LLVMContext::MD_alias_scope);

View File

@ -9,9 +9,11 @@
#include "llvm-c/BitReader.h"
#include "llvm/Bitcode/ReaderWriter.h"
#include "llvm/IR/DiagnosticPrinter.h"
#include "llvm/IR/LLVMContext.h"
#include "llvm/IR/Module.h"
#include "llvm/Support/MemoryBuffer.h"
#include "llvm/Support/raw_ostream.h"
#include <cstring>
#include <string>
@ -30,11 +32,20 @@ LLVMBool LLVMParseBitcodeInContext(LLVMContextRef ContextRef,
LLVMMemoryBufferRef MemBuf,
LLVMModuleRef *OutModule,
char **OutMessage) {
ErrorOr<Module *> ModuleOrErr =
parseBitcodeFile(unwrap(MemBuf)->getMemBufferRef(), *unwrap(ContextRef));
if (std::error_code EC = ModuleOrErr.getError()) {
if (OutMessage)
*OutMessage = strdup(EC.message().c_str());
MemoryBufferRef Buf = unwrap(MemBuf)->getMemBufferRef();
LLVMContext &Ctx = *unwrap(ContextRef);
std::string Message;
raw_string_ostream Stream(Message);
DiagnosticPrinterRawOStream DP(Stream);
ErrorOr<Module *> ModuleOrErr = parseBitcodeFile(
Buf, Ctx, [&](const DiagnosticInfo &DI) { DI.print(DP); });
if (ModuleOrErr.getError()) {
if (OutMessage) {
Stream.flush();
*OutMessage = strdup(Message.c_str());
}
*OutModule = wrap((Module*)nullptr);
return 1;
}

View File

@ -121,8 +121,7 @@ computeActionsTable(const SmallVectorImpl<const LandingPadInfo*> &LandingPads,
for (unsigned J = NumShared, M = TypeIds.size(); J != M; ++J) {
int TypeID = TypeIds[J];
assert(-1 - TypeID < (int)FilterOffsets.size() && "Unknown filter id!");
int ValueForTypeID =
isFilterEHSelector(TypeID) ? FilterOffsets[-1 - TypeID] : TypeID;
int ValueForTypeID = TypeID < 0 ? FilterOffsets[-1 - TypeID] : TypeID;
unsigned SizeTypeID = getSLEB128Size(ValueForTypeID);
int NextAction = SizeAction ? -(SizeAction + SizeTypeID) : 0;
@ -270,14 +269,14 @@ computeCallSiteTable(SmallVectorImpl<CallSiteEntry> &CallSites,
CallSiteEntry Site = {
BeginLabel,
LastLabel,
LandingPad,
LandingPad->LandingPadLabel,
FirstActions[P.PadIndex]
};
// Try to merge with the previous call-site. SJLJ doesn't do this
if (PreviousIsInvoke && !IsSJLJ) {
CallSiteEntry &Prev = CallSites.back();
if (Site.LPad == Prev.LPad && Site.Action == Prev.Action) {
if (Site.PadLabel == Prev.PadLabel && Site.Action == Prev.Action) {
// Extend the range of the previous entry.
Prev.EndLabel = Site.EndLabel;
continue;
@ -577,15 +576,15 @@ void EHStreamer::emitExceptionTable() {
// Offset of the landing pad, counted in 16-byte bundles relative to the
// @LPStart address.
if (!S.LPad) {
if (!S.PadLabel) {
if (VerboseAsm)
Asm->OutStreamer.AddComment(" has no landing pad");
Asm->OutStreamer.EmitIntValue(0, 4/*size*/);
} else {
if (VerboseAsm)
Asm->OutStreamer.AddComment(Twine(" jumps to ") +
S.LPad->LandingPadLabel->getName());
Asm->EmitLabelDifference(S.LPad->LandingPadLabel, EHFuncBeginSym, 4);
S.PadLabel->getName());
Asm->EmitLabelDifference(S.PadLabel, EHFuncBeginSym, 4);
}
// Offset of the first associated action record, relative to the start of
@ -682,7 +681,7 @@ void EHStreamer::emitTypeInfos(unsigned TTypeEncoding) {
unsigned TypeID = *I;
if (VerboseAsm) {
--Entry;
if (isFilterEHSelector(TypeID))
if (TypeID != 0)
Asm->OutStreamer.AddComment("FilterInfo " + Twine(Entry));
}

View File

@ -23,8 +23,6 @@ class MachineModuleInfo;
class MachineInstr;
class MachineFunction;
class AsmPrinter;
class MCSymbol;
class MCSymbolRefExpr;
template <typename T>
class SmallVectorImpl;
@ -62,11 +60,11 @@ class EHStreamer : public AsmPrinterHandler {
/// Structure describing an entry in the call-site table.
struct CallSiteEntry {
// The 'try-range' is BeginLabel .. EndLabel.
MCSymbol *BeginLabel; // Null indicates the start of the function.
MCSymbol *EndLabel; // Null indicates the end of the function.
MCSymbol *BeginLabel; // zero indicates the start of the function.
MCSymbol *EndLabel; // zero indicates the end of the function.
// LPad contains the landing pad start labels.
const LandingPadInfo *LPad; // Null indicates that there is no landing pad.
// The landing pad starts at PadLabel.
MCSymbol *PadLabel; // zero indicates that there is no landing pad.
unsigned Action;
};
@ -114,13 +112,6 @@ class EHStreamer : public AsmPrinterHandler {
virtual void emitTypeInfos(unsigned TTypeEncoding);
// Helpers for for identifying what kind of clause an EH typeid or selector
// corresponds to. Negative selectors are for filter clauses, the zero
// selector is for cleanups, and positive selectors are for catch clauses.
static bool isFilterEHSelector(int Selector) { return Selector < 0; }
static bool isCleanupEHSelector(int Selector) { return Selector == 0; }
static bool isCatchEHSelector(int Selector) { return Selector > 0; }
public:
EHStreamer(AsmPrinter *A);
virtual ~EHStreamer();

View File

@ -99,156 +99,9 @@ void Win64Exception::endFunction(const MachineFunction *) {
if (shouldEmitPersonality) {
Asm->OutStreamer.PushSection();
// Emit an UNWIND_INFO struct describing the prologue.
Asm->OutStreamer.EmitWinEHHandlerData();
// Emit either MSVC-compatible tables or the usual Itanium-style LSDA after
// the UNWIND_INFO struct.
if (Asm->MAI->getExceptionHandlingType() == ExceptionHandling::MSVC) {
const Function *Per = MMI->getPersonalities()[MMI->getPersonalityIndex()];
if (Per->getName() == "__C_specific_handler")
emitCSpecificHandlerTable();
else
report_fatal_error(Twine("unexpected personality function: ") +
Per->getName());
} else {
emitExceptionTable();
}
emitExceptionTable();
Asm->OutStreamer.PopSection();
}
Asm->OutStreamer.EmitWinCFIEndProc();
}
const MCSymbolRefExpr *Win64Exception::createImageRel32(const MCSymbol *Value) {
return MCSymbolRefExpr::Create(Value, MCSymbolRefExpr::VK_COFF_IMGREL32,
Asm->OutContext);
}
/// Emit the language-specific data that __C_specific_handler expects. This
/// handler lives in the x64 Microsoft C runtime and allows catching or cleaning
/// up after faults with __try, __except, and __finally. The typeinfo values
/// are not really RTTI data, but pointers to filter functions that return an
/// integer (1, 0, or -1) indicating how to handle the exception. For __finally
/// blocks and other cleanups, the landing pad label is zero, and the filter
/// function is actually a cleanup handler with the same prototype. A catch-all
/// entry is modeled with a null filter function field and a non-zero landing
/// pad label.
///
/// Possible filter function return values:
/// EXCEPTION_EXECUTE_HANDLER (1):
/// Jump to the landing pad label after cleanups.
/// EXCEPTION_CONTINUE_SEARCH (0):
/// Continue searching this table or continue unwinding.
/// EXCEPTION_CONTINUE_EXECUTION (-1):
/// Resume execution at the trapping PC.
///
/// Inferred table structure:
/// struct Table {
/// int NumEntries;
/// struct Entry {
/// imagerel32 LabelStart;
/// imagerel32 LabelEnd;
/// imagerel32 FilterOrFinally; // Zero means catch-all.
/// imagerel32 LabelLPad; // Zero means __finally.
/// } Entries[NumEntries];
/// };
void Win64Exception::emitCSpecificHandlerTable() {
const std::vector<LandingPadInfo> &PadInfos = MMI->getLandingPads();
// Simplifying assumptions for first implementation:
// - Cleanups are not implemented.
// - Filters are not implemented.
// The Itanium LSDA table sorts similar landing pads together to simplify the
// actions table, but we don't need that.
SmallVector<const LandingPadInfo *, 64> LandingPads;
LandingPads.reserve(PadInfos.size());
for (const auto &LP : PadInfos)
LandingPads.push_back(&LP);
// Compute label ranges for call sites as we would for the Itanium LSDA, but
// use an all zero action table because we aren't using these actions.
SmallVector<unsigned, 64> FirstActions;
FirstActions.resize(LandingPads.size());
SmallVector<CallSiteEntry, 64> CallSites;
computeCallSiteTable(CallSites, LandingPads, FirstActions);
MCSymbol *EHFuncBeginSym =
Asm->GetTempSymbol("eh_func_begin", Asm->getFunctionNumber());
MCSymbol *EHFuncEndSym =
Asm->GetTempSymbol("eh_func_end", Asm->getFunctionNumber());
// Emit the number of table entries.
unsigned NumEntries = 0;
for (const CallSiteEntry &CSE : CallSites) {
if (!CSE.LPad)
continue; // Ignore gaps.
for (int Selector : CSE.LPad->TypeIds) {
// Ignore C++ filter clauses in SEH.
// FIXME: Implement cleanup clauses.
if (isCatchEHSelector(Selector))
++NumEntries;
}
}
Asm->OutStreamer.EmitIntValue(NumEntries, 4);
// Emit the four-label records for each call site entry. The table has to be
// sorted in layout order, and the call sites should already be sorted.
for (const CallSiteEntry &CSE : CallSites) {
// Ignore gaps. Unlike the Itanium model, unwinding through a frame without
// an EH table entry will propagate the exception rather than terminating
// the program.
if (!CSE.LPad)
continue;
const LandingPadInfo *LPad = CSE.LPad;
// Compute the label range. We may reuse the function begin and end labels
// rather than forming new ones.
const MCExpr *Begin =
createImageRel32(CSE.BeginLabel ? CSE.BeginLabel : EHFuncBeginSym);
const MCExpr *End;
if (CSE.EndLabel) {
// The interval is half-open, so we have to add one to include the return
// address of the last invoke in the range.
End = MCBinaryExpr::CreateAdd(createImageRel32(CSE.EndLabel),
MCConstantExpr::Create(1, Asm->OutContext),
Asm->OutContext);
} else {
End = createImageRel32(EHFuncEndSym);
}
// These aren't really type info globals, they are actually pointers to
// filter functions ordered by selector. The zero selector is used for
// cleanups, so slot zero corresponds to selector 1.
const std::vector<const GlobalValue *> &SelectorToFilter = MMI->getTypeInfos();
// Do a parallel iteration across typeids and clause labels, skipping filter
// clauses.
assert(LPad->TypeIds.size() == LPad->ClauseLabels.size());
for (size_t I = 0, E = LPad->TypeIds.size(); I < E; ++I) {
// AddLandingPadInfo stores the clauses in reverse, but there is a FIXME
// to change that.
int Selector = LPad->TypeIds[E - I - 1];
MCSymbol *ClauseLabel = LPad->ClauseLabels[I];
// Ignore C++ filter clauses in SEH.
// FIXME: Implement cleanup clauses.
if (!isCatchEHSelector(Selector))
continue;
Asm->OutStreamer.EmitValue(Begin, 4);
Asm->OutStreamer.EmitValue(End, 4);
if (isCatchEHSelector(Selector)) {
assert(unsigned(Selector - 1) < SelectorToFilter.size());
const GlobalValue *TI = SelectorToFilter[Selector - 1];
if (TI) // Emit the filter function pointer.
Asm->OutStreamer.EmitValue(createImageRel32(Asm->getSymbol(TI)), 4);
else // Otherwise, this is a "catch i8* null", or catch all.
Asm->OutStreamer.EmitIntValue(0, 4);
}
Asm->OutStreamer.EmitValue(createImageRel32(ClauseLabel), 4);
}
}
}

View File

@ -29,10 +29,6 @@ class Win64Exception : public EHStreamer {
/// Per-function flag to indicate if frame moves info should be emitted.
bool shouldEmitMoves;
void emitCSpecificHandlerTable();
const MCSymbolRefExpr *createImageRel32(const MCSymbol *Value);
public:
//===--------------------------------------------------------------------===//
// Main entry points.

View File

@ -452,14 +452,6 @@ void MachineModuleInfo::addCleanup(MachineBasicBlock *LandingPad) {
LP.TypeIds.push_back(0);
}
MCSymbol *
MachineModuleInfo::addClauseForLandingPad(MachineBasicBlock *LandingPad) {
MCSymbol *ClauseLabel = Context.CreateTempSymbol();
LandingPadInfo &LP = getOrCreateLandingPadInfo(LandingPad);
LP.ClauseLabels.push_back(ClauseLabel);
return ClauseLabel;
}
/// TidyLandingPads - Remap landing pad labels and remove any deleted landing
/// pads.
void MachineModuleInfo::TidyLandingPads(DenseMap<MCSymbol*, uintptr_t> *LPMap) {

View File

@ -449,9 +449,9 @@ void TargetPassConfig::addPassesToHandleExceptions() {
case ExceptionHandling::DwarfCFI:
case ExceptionHandling::ARM:
case ExceptionHandling::ItaniumWinEH:
case ExceptionHandling::MSVC: // FIXME: Needs preparation.
addPass(createDwarfEHPass(TM));
break;
case ExceptionHandling::MSVC: // FIXME: Add preparation.
case ExceptionHandling::None:
addPass(createLowerInvokePass());

View File

@ -6544,19 +6544,15 @@ SDValue DAGCombiner::visitBITCAST(SDNode *N) {
// If the input is a constant, let getNode fold it.
if (isa<ConstantSDNode>(N0) || isa<ConstantFPSDNode>(N0)) {
SDValue Res = DAG.getNode(ISD::BITCAST, SDLoc(N), VT, N0);
if (Res.getNode() != N) {
if (!LegalOperations ||
TLI.isOperationLegal(Res.getNode()->getOpcode(), VT))
return Res;
// Folding it resulted in an illegal node, and it's too late to
// do that. Clean up the old node and forego the transformation.
// Ideally this won't happen very often, because instcombine
// and the earlier dagcombine runs (where illegal nodes are
// permitted) should have folded most of them already.
deleteAndRecombine(Res.getNode());
}
// If we can't allow illegal operations, we need to check that this is just
// a fp -> int or int -> conversion and that the resulting operation will
// be legal.
if (!LegalOperations ||
(isa<ConstantSDNode>(N0) && VT.isFloatingPoint() && !VT.isVector() &&
TLI.isOperationLegal(ISD::ConstantFP, VT)) ||
(isa<ConstantFPSDNode>(N0) && VT.isInteger() && !VT.isVector() &&
TLI.isOperationLegal(ISD::Constant, VT)))
return DAG.getNode(ISD::BITCAST, SDLoc(N), VT, N0);
}
// (conv (conv x, t1), t2) -> (conv x, t2)

View File

@ -390,7 +390,8 @@ SDValue VectorLegalizer::Promote(SDValue Op) {
if (Op.getOperand(j)
.getValueType()
.getVectorElementType()
.isFloatingPoint())
.isFloatingPoint() &&
NVT.isVector() && NVT.getVectorElementType().isFloatingPoint())
Operands[j] = DAG.getNode(ISD::FP_EXTEND, dl, NVT, Op.getOperand(j));
else
Operands[j] = DAG.getNode(ISD::BITCAST, dl, NVT, Op.getOperand(j));
@ -399,8 +400,9 @@ SDValue VectorLegalizer::Promote(SDValue Op) {
}
Op = DAG.getNode(Op.getOpcode(), dl, NVT, Operands);
if (VT.isFloatingPoint() ||
(VT.isVector() && VT.getVectorElementType().isFloatingPoint()))
if ((VT.isFloatingPoint() && NVT.isFloatingPoint()) ||
(VT.isVector() && VT.getVectorElementType().isFloatingPoint() &&
NVT.isVector() && NVT.getVectorElementType().isFloatingPoint()))
return DAG.getNode(ISD::FP_ROUND, dl, VT, Op, DAG.getIntPtrConstant(0));
else
return DAG.getNode(ISD::BITCAST, dl, VT, Op);
@ -554,9 +556,9 @@ SDValue VectorLegalizer::ExpandLoad(SDValue Op) {
BitOffset += SrcEltBits;
if (BitOffset >= WideBits) {
WideIdx++;
Offset -= WideBits;
if (Offset > 0) {
ShAmt = DAG.getConstant(SrcEltBits - Offset,
BitOffset -= WideBits;
if (BitOffset > 0) {
ShAmt = DAG.getConstant(SrcEltBits - BitOffset,
TLI.getShiftAmountTy(WideVT));
Hi = DAG.getNode(ISD::SHL, dl, WideVT, LoadVals[WideIdx], ShAmt);
Hi = DAG.getNode(ISD::AND, dl, WideVT, Hi, SrcEltBitMask);

View File

@ -2071,14 +2071,10 @@ void SelectionDAGBuilder::visitLandingPad(const LandingPadInst &LP) {
// Get the two live-in registers as SDValues. The physregs have already been
// copied into virtual registers.
SDValue Ops[2];
if (FuncInfo.ExceptionPointerVirtReg) {
Ops[0] = DAG.getZExtOrTrunc(
DAG.getCopyFromReg(DAG.getEntryNode(), getCurSDLoc(),
FuncInfo.ExceptionPointerVirtReg, TLI.getPointerTy()),
getCurSDLoc(), ValueVTs[0]);
} else {
Ops[0] = DAG.getConstant(0, TLI.getPointerTy());
}
Ops[0] = DAG.getZExtOrTrunc(
DAG.getCopyFromReg(DAG.getEntryNode(), getCurSDLoc(),
FuncInfo.ExceptionPointerVirtReg, TLI.getPointerTy()),
getCurSDLoc(), ValueVTs[0]);
Ops[1] = DAG.getZExtOrTrunc(
DAG.getCopyFromReg(DAG.getEntryNode(), getCurSDLoc(),
FuncInfo.ExceptionSelectorVirtReg, TLI.getPointerTy()),
@ -2090,27 +2086,6 @@ void SelectionDAGBuilder::visitLandingPad(const LandingPadInst &LP) {
setValue(&LP, Res);
}
unsigned
SelectionDAGBuilder::visitLandingPadClauseBB(GlobalValue *ClauseGV,
MachineBasicBlock *LPadBB) {
SDValue Chain = getControlRoot();
// Get the typeid that we will dispatch on later.
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
const TargetRegisterClass *RC = TLI.getRegClassFor(TLI.getPointerTy());
unsigned VReg = FuncInfo.MF->getRegInfo().createVirtualRegister(RC);
unsigned TypeID = DAG.getMachineFunction().getMMI().getTypeIDFor(ClauseGV);
SDValue Sel = DAG.getConstant(TypeID, TLI.getPointerTy());
Chain = DAG.getCopyToReg(Chain, getCurSDLoc(), VReg, Sel);
// Branch to the main landing pad block.
MachineBasicBlock *ClauseMBB = FuncInfo.MBB;
ClauseMBB->addSuccessor(LPadBB);
DAG.setRoot(DAG.getNode(ISD::BR, getCurSDLoc(), MVT::Other, Chain,
DAG.getBasicBlock(LPadBB)));
return VReg;
}
/// handleSmallSwitchCaseRange - Emit a series of specific tests (suitable for
/// small case ranges).
bool SelectionDAGBuilder::handleSmallSwitchRange(CaseRec& CR,

View File

@ -713,8 +713,6 @@ class SelectionDAGBuilder {
void visitJumpTable(JumpTable &JT);
void visitJumpTableHeader(JumpTable &JT, JumpTableHeader &JTH,
MachineBasicBlock *SwitchBB);
unsigned visitLandingPadClauseBB(GlobalValue *ClauseGV,
MachineBasicBlock *LPadMBB);
private:
// These all get lowered before this pass.

View File

@ -19,7 +19,6 @@
#include "llvm/Analysis/AliasAnalysis.h"
#include "llvm/Analysis/BranchProbabilityInfo.h"
#include "llvm/Analysis/CFG.h"
#include "llvm/CodeGen/Analysis.h"
#include "llvm/CodeGen/FastISel.h"
#include "llvm/CodeGen/FunctionLoweringInfo.h"
#include "llvm/CodeGen/GCMetadata.h"
@ -41,7 +40,6 @@
#include "llvm/IR/Intrinsics.h"
#include "llvm/IR/LLVMContext.h"
#include "llvm/IR/Module.h"
#include "llvm/MC/MCAsmInfo.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
@ -909,8 +907,6 @@ void SelectionDAGISel::DoInstructionSelection() {
void SelectionDAGISel::PrepareEHLandingPad() {
MachineBasicBlock *MBB = FuncInfo->MBB;
const TargetRegisterClass *PtrRC = TLI->getRegClassFor(TLI->getPointerTy());
// Add a label to mark the beginning of the landing pad. Deletion of the
// landing pad can thus be detected via the MachineModuleInfo.
MCSymbol *Label = MF->getMMI().addLandingPad(MBB);
@ -922,66 +918,8 @@ void SelectionDAGISel::PrepareEHLandingPad() {
BuildMI(*MBB, FuncInfo->InsertPt, SDB->getCurDebugLoc(), II)
.addSym(Label);
if (TM.getMCAsmInfo()->getExceptionHandlingType() ==
ExceptionHandling::MSVC) {
// Make virtual registers and a series of labels that fill in values for the
// clauses.
auto &RI = MF->getRegInfo();
FuncInfo->ExceptionSelectorVirtReg = RI.createVirtualRegister(PtrRC);
// Get all invoke BBs that will unwind into the clause BBs.
SmallVector<MachineBasicBlock *, 4> InvokeBBs(MBB->pred_begin(),
MBB->pred_end());
// Emit separate machine basic blocks with separate labels for each clause
// before the main landing pad block.
const BasicBlock *LLVMBB = MBB->getBasicBlock();
const LandingPadInst *LPadInst = LLVMBB->getLandingPadInst();
MachineInstrBuilder SelectorPHI = BuildMI(
*MBB, MBB->begin(), SDB->getCurDebugLoc(), TII->get(TargetOpcode::PHI),
FuncInfo->ExceptionSelectorVirtReg);
for (unsigned I = 0, E = LPadInst->getNumClauses(); I != E; ++I) {
MachineBasicBlock *ClauseBB = MF->CreateMachineBasicBlock(LLVMBB);
MF->insert(MBB, ClauseBB);
// Add the edge from the invoke to the clause.
for (MachineBasicBlock *InvokeBB : InvokeBBs)
InvokeBB->addSuccessor(ClauseBB);
// Mark the clause as a landing pad or MI passes will delete it.
ClauseBB->setIsLandingPad();
GlobalValue *ClauseGV = ExtractTypeInfo(LPadInst->getClause(I));
// Start the BB with a label.
MCSymbol *ClauseLabel = MF->getMMI().addClauseForLandingPad(MBB);
BuildMI(*ClauseBB, ClauseBB->begin(), SDB->getCurDebugLoc(), II)
.addSym(ClauseLabel);
// Construct a simple BB that defines a register with the typeid constant.
FuncInfo->MBB = ClauseBB;
FuncInfo->InsertPt = ClauseBB->end();
unsigned VReg = SDB->visitLandingPadClauseBB(ClauseGV, MBB);
CurDAG->setRoot(SDB->getRoot());
SDB->clear();
CodeGenAndEmitDAG();
// Add the typeid virtual register to the phi in the main landing pad.
SelectorPHI.addReg(VReg).addMBB(ClauseBB);
}
// Remove the edge from the invoke to the lpad.
for (MachineBasicBlock *InvokeBB : InvokeBBs)
InvokeBB->removeSuccessor(MBB);
// Restore FuncInfo back to its previous state and select the main landing
// pad block.
FuncInfo->MBB = MBB;
FuncInfo->InsertPt = MBB->end();
return;
}
// Mark exception register as live in.
const TargetRegisterClass *PtrRC = TLI->getRegClassFor(TLI->getPointerTy());
if (unsigned Reg = TLI->getExceptionPointerRegister())
FuncInfo->ExceptionPointerVirtReg = MBB->addLiveIn(Reg, PtrRC);

View File

@ -525,12 +525,15 @@ bool DISubprogram::Verify() const {
while ((IA = DL.getInlinedAt()))
DL = DebugLoc::getFromDILocation(IA);
DL.getScopeAndInlinedAt(Scope, IA);
if (!Scope)
return false;
assert(!IA);
while (!DIDescriptor(Scope).isSubprogram()) {
DILexicalBlockFile D(Scope);
Scope = D.isLexicalBlockFile()
? D.getScope()
: DebugLoc::getFromDILexicalBlock(Scope).getScope();
assert(Scope && "lexical block file has no scope");
}
if (!DISubprogram(Scope).describes(F))
return false;

View File

@ -826,6 +826,28 @@ MDNode *MDNode::intersect(MDNode *A, MDNode *B) {
return getOrSelfReference(A->getContext(), MDs);
}
MDNode *MDNode::getMostGenericAliasScope(MDNode *A, MDNode *B) {
if (!A || !B)
return nullptr;
SmallVector<Metadata *, 4> MDs(B->op_begin(), B->op_end());
for (unsigned i = 0, ie = A->getNumOperands(); i != ie; ++i) {
Metadata *MD = A->getOperand(i);
bool insert = true;
for (unsigned j = 0, je = B->getNumOperands(); j != je; ++j)
if (MD == B->getOperand(j)) {
insert = false;
break;
}
if (insert)
MDs.push_back(MD);
}
// FIXME: This preserves long-standing behaviour, but is it really the right
// behaviour? Or was that an unintended side-effect of node uniquing?
return getOrSelfReference(A->getContext(), MDs);
}
MDNode *MDNode::getMostGenericFPMath(MDNode *A, MDNode *B) {
if (!A || !B)
return nullptr;

View File

@ -708,9 +708,10 @@ VectorType::VectorType(Type *ElType, unsigned NumEl)
VectorType *VectorType::get(Type *elementType, unsigned NumElements) {
Type *ElementType = const_cast<Type*>(elementType);
assert(NumElements > 0 && "#Elements of a VectorType must be greater than 0");
assert(isValidElementType(ElementType) &&
"Elements of a VectorType must be a primitive type");
assert(isValidElementType(ElementType) && "Element type of a VectorType must "
"be an integer, floating point, or "
"pointer type.");
LLVMContextImpl *pImpl = ElementType->getContext().pImpl;
VectorType *&Entry = ElementType->getContext().pImpl
->VectorTypes[std::make_pair(ElementType, NumElements)];

View File

@ -47,6 +47,10 @@ void MCSectionCOFF::PrintSwitchToSection(const MCAsmInfo &MAI,
}
OS << "\t.section\t" << getSectionName() << ",\"";
if (getCharacteristics() & COFF::IMAGE_SCN_CNT_INITIALIZED_DATA)
OS << 'd';
if (getCharacteristics() & COFF::IMAGE_SCN_CNT_UNINITIALIZED_DATA)
OS << 'b';
if (getCharacteristics() & COFF::IMAGE_SCN_MEM_EXECUTE)
OS << 'x';
if (getCharacteristics() & COFF::IMAGE_SCN_MEM_WRITE)
@ -55,10 +59,6 @@ void MCSectionCOFF::PrintSwitchToSection(const MCAsmInfo &MAI,
OS << 'r';
else
OS << 'y';
if (getCharacteristics() & COFF::IMAGE_SCN_CNT_INITIALIZED_DATA)
OS << 'd';
if (getCharacteristics() & COFF::IMAGE_SCN_CNT_UNINITIALIZED_DATA)
OS << 'b';
if (getCharacteristics() & COFF::IMAGE_SCN_LNK_REMOVE)
OS << 'n';
if (getCharacteristics() & COFF::IMAGE_SCN_MEM_SHARED)

View File

@ -710,17 +710,22 @@ void WinCOFFObjectWriter::RecordRelocation(const MCAssembler &Asm,
CrossSection = &Symbol.getSection() != &B->getSection();
// Offset of the symbol in the section
int64_t a = Layout.getSymbolOffset(&B_SD);
int64_t OffsetOfB = Layout.getSymbolOffset(&B_SD);
// Offset of the relocation in the section
int64_t b = Layout.getFragmentOffset(Fragment) + Fixup.getOffset();
FixedValue = b - a;
// In the case where we have SymbA and SymB, we just need to store the delta
// between the two symbols. Update FixedValue to account for the delta, and
// skip recording the relocation.
if (!CrossSection)
if (!CrossSection) {
int64_t OffsetOfA = Layout.getSymbolOffset(&A_SD);
FixedValue = (OffsetOfA - OffsetOfB) + Target.getConstant();
return;
}
// Offset of the relocation in the section
int64_t OffsetOfRelocation =
Layout.getFragmentOffset(Fragment) + Fixup.getOffset();
FixedValue = OffsetOfRelocation - OffsetOfB;
} else {
FixedValue = Target.getConstant();
}

View File

@ -49,6 +49,14 @@
#include "regcclass.h"
#include "regcname.h"
#include "llvm/Config/config.h"
#if HAVE_STDINT_H
#include <stdint.h>
#else
/* Pessimistically bound memory use */
#define SIZE_MAX UINT_MAX
#endif
/*
* parse structure, passed up and down to avoid global variables and
* other clumsinesses
@ -1069,6 +1077,8 @@ allocset(struct parse *p)
p->ncsalloc += CHAR_BIT;
nc = p->ncsalloc;
if (nc > SIZE_MAX / sizeof(cset))
goto nomem;
assert(nc % CHAR_BIT == 0);
nbytes = nc / CHAR_BIT * css;
@ -1412,6 +1422,11 @@ enlarge(struct parse *p, sopno size)
if (p->ssize >= size)
return;
if ((unsigned long)size > SIZE_MAX / sizeof(sop)) {
SETERROR(REG_ESPACE);
return;
}
sp = (sop *)realloc(p->strip, size*sizeof(sop));
if (sp == NULL) {
SETERROR(REG_ESPACE);
@ -1428,6 +1443,12 @@ static void
stripsnug(struct parse *p, struct re_guts *g)
{
g->nstates = p->slen;
if ((unsigned long)p->slen > SIZE_MAX / sizeof(sop)) {
g->strip = p->strip;
SETERROR(REG_ESPACE);
return;
}
g->strip = (sop *)realloc((char *)p->strip, p->slen * sizeof(sop));
if (g->strip == NULL) {
SETERROR(REG_ESPACE);

View File

@ -6287,6 +6287,8 @@ static SDValue EmitVectorComparison(SDValue LHS, SDValue RHS,
AArch64CC::CondCode CC, bool NoNans, EVT VT,
SDLoc dl, SelectionDAG &DAG) {
EVT SrcVT = LHS.getValueType();
assert(VT.getSizeInBits() == SrcVT.getSizeInBits() &&
"function only supposed to emit natural comparisons");
BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(RHS.getNode());
APInt CnstBits(VT.getSizeInBits(), 0);
@ -6381,13 +6383,15 @@ SDValue AArch64TargetLowering::LowerVSETCC(SDValue Op,
ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get();
SDValue LHS = Op.getOperand(0);
SDValue RHS = Op.getOperand(1);
EVT CmpVT = LHS.getValueType().changeVectorElementTypeToInteger();
SDLoc dl(Op);
if (LHS.getValueType().getVectorElementType().isInteger()) {
assert(LHS.getValueType() == RHS.getValueType());
AArch64CC::CondCode AArch64CC = changeIntCCToAArch64CC(CC);
return EmitVectorComparison(LHS, RHS, AArch64CC, false, Op.getValueType(),
dl, DAG);
SDValue Cmp =
EmitVectorComparison(LHS, RHS, AArch64CC, false, CmpVT, dl, DAG);
return DAG.getSExtOrTrunc(Cmp, dl, Op.getValueType());
}
assert(LHS.getValueType().getVectorElementType() == MVT::f32 ||
@ -6401,19 +6405,21 @@ SDValue AArch64TargetLowering::LowerVSETCC(SDValue Op,
bool NoNaNs = getTargetMachine().Options.NoNaNsFPMath;
SDValue Cmp =
EmitVectorComparison(LHS, RHS, CC1, NoNaNs, Op.getValueType(), dl, DAG);
EmitVectorComparison(LHS, RHS, CC1, NoNaNs, CmpVT, dl, DAG);
if (!Cmp.getNode())
return SDValue();
if (CC2 != AArch64CC::AL) {
SDValue Cmp2 =
EmitVectorComparison(LHS, RHS, CC2, NoNaNs, Op.getValueType(), dl, DAG);
EmitVectorComparison(LHS, RHS, CC2, NoNaNs, CmpVT, dl, DAG);
if (!Cmp2.getNode())
return SDValue();
Cmp = DAG.getNode(ISD::OR, dl, Cmp.getValueType(), Cmp, Cmp2);
Cmp = DAG.getNode(ISD::OR, dl, CmpVT, Cmp, Cmp2);
}
Cmp = DAG.getSExtOrTrunc(Cmp, dl, Op.getValueType());
if (ShouldInvert)
return Cmp = DAG.getNOT(dl, Cmp, Cmp.getValueType());

View File

@ -2400,7 +2400,8 @@ optimizeCompareInstr(MachineInstr *CmpInstr, unsigned SrcReg, unsigned SrcReg2,
else if (MI->getParent() != CmpInstr->getParent() || CmpValue != 0) {
// Conservatively refuse to convert an instruction which isn't in the same
// BB as the comparison.
// For CMPri, we need to check Sub, thus we can't return here.
// For CMPri w/ CmpValue != 0, a Sub may still be a candidate.
// Thus we cannot return here.
if (CmpInstr->getOpcode() == ARM::CMPri ||
CmpInstr->getOpcode() == ARM::t2CMPri)
MI = nullptr;
@ -2479,8 +2480,8 @@ optimizeCompareInstr(MachineInstr *CmpInstr, unsigned SrcReg, unsigned SrcReg2,
case ARM::t2EORrr:
case ARM::t2EORri: {
// Scan forward for the use of CPSR
// When checking against MI: if it's a conditional code requires
// checking of V bit, then this is not safe to do.
// When checking against MI: if it's a conditional code that requires
// checking of the V bit or C bit, then this is not safe to do.
// It is safe to remove CmpInstr if CPSR is redefined or killed.
// If we are done with the basic block, we need to check whether CPSR is
// live-out.
@ -2547,19 +2548,30 @@ optimizeCompareInstr(MachineInstr *CmpInstr, unsigned SrcReg, unsigned SrcReg2,
OperandsToUpdate.push_back(
std::make_pair(&((*I).getOperand(IO - 1)), NewCC));
}
} else
} else {
// No Sub, so this is x = <op> y, z; cmp x, 0.
switch (CC) {
default:
case ARMCC::EQ: // Z
case ARMCC::NE: // Z
case ARMCC::MI: // N
case ARMCC::PL: // N
case ARMCC::AL: // none
// CPSR can be used multiple times, we should continue.
break;
case ARMCC::VS:
case ARMCC::VC:
case ARMCC::GE:
case ARMCC::LT:
case ARMCC::GT:
case ARMCC::LE:
case ARMCC::HS: // C
case ARMCC::LO: // C
case ARMCC::VS: // V
case ARMCC::VC: // V
case ARMCC::HI: // C Z
case ARMCC::LS: // C Z
case ARMCC::GE: // N V
case ARMCC::LT: // N V
case ARMCC::GT: // Z N V
case ARMCC::LE: // Z N V
// The instruction uses the V bit or C bit which is not safe.
return false;
}
}
}
}

View File

@ -565,7 +565,6 @@ ARMTargetLowering::ARMTargetLowering(const TargetMachine &TM)
setTargetDAGCombine(ISD::FP_TO_SINT);
setTargetDAGCombine(ISD::FP_TO_UINT);
setTargetDAGCombine(ISD::FDIV);
setTargetDAGCombine(ISD::LOAD);
// It is legal to extload from v4i8 to v4i16 or v4i32.
MVT Tys[6] = {MVT::v8i8, MVT::v4i8, MVT::v2i8,
@ -4488,6 +4487,7 @@ static SDValue LowerVSETCC(SDValue Op, SelectionDAG &DAG) {
SDValue Op0 = Op.getOperand(0);
SDValue Op1 = Op.getOperand(1);
SDValue CC = Op.getOperand(2);
EVT CmpVT = Op0.getValueType().changeVectorElementTypeToInteger();
EVT VT = Op.getValueType();
ISD::CondCode SetCCOpcode = cast<CondCodeSDNode>(CC)->get();
SDLoc dl(Op);
@ -4517,8 +4517,8 @@ static SDValue LowerVSETCC(SDValue Op, SelectionDAG &DAG) {
TmpOp0 = Op0;
TmpOp1 = Op1;
Opc = ISD::OR;
Op0 = DAG.getNode(ARMISD::VCGT, dl, VT, TmpOp1, TmpOp0);
Op1 = DAG.getNode(ARMISD::VCGT, dl, VT, TmpOp0, TmpOp1);
Op0 = DAG.getNode(ARMISD::VCGT, dl, CmpVT, TmpOp1, TmpOp0);
Op1 = DAG.getNode(ARMISD::VCGT, dl, CmpVT, TmpOp0, TmpOp1);
break;
case ISD::SETUO: Invert = true; // Fallthrough
case ISD::SETO:
@ -4526,8 +4526,8 @@ static SDValue LowerVSETCC(SDValue Op, SelectionDAG &DAG) {
TmpOp0 = Op0;
TmpOp1 = Op1;
Opc = ISD::OR;
Op0 = DAG.getNode(ARMISD::VCGT, dl, VT, TmpOp1, TmpOp0);
Op1 = DAG.getNode(ARMISD::VCGE, dl, VT, TmpOp0, TmpOp1);
Op0 = DAG.getNode(ARMISD::VCGT, dl, CmpVT, TmpOp1, TmpOp0);
Op1 = DAG.getNode(ARMISD::VCGE, dl, CmpVT, TmpOp0, TmpOp1);
break;
}
} else {
@ -4561,8 +4561,8 @@ static SDValue LowerVSETCC(SDValue Op, SelectionDAG &DAG) {
if (AndOp.getNode() && AndOp.getOpcode() == ISD::AND) {
Opc = ARMISD::VTST;
Op0 = DAG.getNode(ISD::BITCAST, dl, VT, AndOp.getOperand(0));
Op1 = DAG.getNode(ISD::BITCAST, dl, VT, AndOp.getOperand(1));
Op0 = DAG.getNode(ISD::BITCAST, dl, CmpVT, AndOp.getOperand(0));
Op1 = DAG.getNode(ISD::BITCAST, dl, CmpVT, AndOp.getOperand(1));
Invert = !Invert;
}
}
@ -4588,22 +4588,24 @@ static SDValue LowerVSETCC(SDValue Op, SelectionDAG &DAG) {
if (SingleOp.getNode()) {
switch (Opc) {
case ARMISD::VCEQ:
Result = DAG.getNode(ARMISD::VCEQZ, dl, VT, SingleOp); break;
Result = DAG.getNode(ARMISD::VCEQZ, dl, CmpVT, SingleOp); break;
case ARMISD::VCGE:
Result = DAG.getNode(ARMISD::VCGEZ, dl, VT, SingleOp); break;
Result = DAG.getNode(ARMISD::VCGEZ, dl, CmpVT, SingleOp); break;
case ARMISD::VCLEZ:
Result = DAG.getNode(ARMISD::VCLEZ, dl, VT, SingleOp); break;
Result = DAG.getNode(ARMISD::VCLEZ, dl, CmpVT, SingleOp); break;
case ARMISD::VCGT:
Result = DAG.getNode(ARMISD::VCGTZ, dl, VT, SingleOp); break;
Result = DAG.getNode(ARMISD::VCGTZ, dl, CmpVT, SingleOp); break;
case ARMISD::VCLTZ:
Result = DAG.getNode(ARMISD::VCLTZ, dl, VT, SingleOp); break;
Result = DAG.getNode(ARMISD::VCLTZ, dl, CmpVT, SingleOp); break;
default:
Result = DAG.getNode(Opc, dl, VT, Op0, Op1);
Result = DAG.getNode(Opc, dl, CmpVT, Op0, Op1);
}
} else {
Result = DAG.getNode(Opc, dl, VT, Op0, Op1);
Result = DAG.getNode(Opc, dl, CmpVT, Op0, Op1);
}
Result = DAG.getSExtOrTrunc(Result, dl, VT);
if (Invert)
Result = DAG.getNOT(dl, Result, VT);
@ -8877,18 +8879,17 @@ static SDValue PerformVECTOR_SHUFFLECombine(SDNode *N, SelectionDAG &DAG) {
DAG.getUNDEF(VT), NewMask.data());
}
/// CombineBaseUpdate - Target-specific DAG combine function for VLDDUP,
/// NEON load/store intrinsics, and generic vector load/stores, to merge
/// base address updates.
/// For generic load/stores, the memory type is assumed to be a vector.
/// The caller is assumed to have checked legality.
/// CombineBaseUpdate - Target-specific DAG combine function for VLDDUP and
/// NEON load/store intrinsics to merge base address updates.
static SDValue CombineBaseUpdate(SDNode *N,
TargetLowering::DAGCombinerInfo &DCI) {
if (DCI.isBeforeLegalize() || DCI.isCalledByLegalizer())
return SDValue();
SelectionDAG &DAG = DCI.DAG;
bool isIntrinsic = (N->getOpcode() == ISD::INTRINSIC_VOID ||
N->getOpcode() == ISD::INTRINSIC_W_CHAIN);
bool isStore = N->getOpcode() == ISD::STORE;
unsigned AddrOpIdx = ((isIntrinsic || isStore) ? 2 : 1);
unsigned AddrOpIdx = (isIntrinsic ? 2 : 1);
SDValue Addr = N->getOperand(AddrOpIdx);
// Search for a use of the address operand that is an increment.
@ -8949,10 +8950,6 @@ static SDValue CombineBaseUpdate(SDNode *N,
case ARMISD::VLD2DUP: NewOpc = ARMISD::VLD2DUP_UPD; NumVecs = 2; break;
case ARMISD::VLD3DUP: NewOpc = ARMISD::VLD3DUP_UPD; NumVecs = 3; break;
case ARMISD::VLD4DUP: NewOpc = ARMISD::VLD4DUP_UPD; NumVecs = 4; break;
case ISD::LOAD: NewOpc = ARMISD::VLD1_UPD;
NumVecs = 1; isLaneOp = false; break;
case ISD::STORE: NewOpc = ARMISD::VST1_UPD;
NumVecs = 1; isLoad = false; isLaneOp = false; break;
}
}
@ -8960,11 +8957,8 @@ static SDValue CombineBaseUpdate(SDNode *N,
EVT VecTy;
if (isLoad)
VecTy = N->getValueType(0);
else if (isIntrinsic)
VecTy = N->getOperand(AddrOpIdx+1).getValueType();
else
VecTy = N->getOperand(1).getValueType();
VecTy = N->getOperand(AddrOpIdx+1).getValueType();
unsigned NumBytes = NumVecs * VecTy.getSizeInBits() / 8;
if (isLaneOp)
NumBytes /= VecTy.getVectorNumElements();
@ -8981,70 +8975,25 @@ static SDValue CombineBaseUpdate(SDNode *N,
continue;
}
EVT AlignedVecTy = VecTy;
// If this is a less-than-standard-aligned load/store, change the type to
// match the standard alignment.
// The alignment is overlooked when selecting _UPD variants; and it's
// easier to introduce bitcasts here than fix that.
// There are 3 ways to get to this base-update combine:
// - intrinsics: they are assumed to be properly aligned (to the standard
// alignment of the memory type), so we don't need to do anything.
// - ARMISD::VLDx nodes: they are only generated from the aforementioned
// intrinsics, so, likewise, there's nothing to do.
// - generic load/store instructions: the alignment is specified as an
// explicit operand, rather than implicitly as the standard alignment
// of the memory type (like the intrisics). We need to change the
// memory type to match the explicit alignment. That way, we don't
// generate non-standard-aligned ARMISD::VLDx nodes.
if (LSBaseSDNode *LSN = dyn_cast<LSBaseSDNode>(N)) {
unsigned Alignment = LSN->getAlignment();
if (Alignment == 0)
Alignment = 1;
if (Alignment < VecTy.getScalarSizeInBits() / 8) {
MVT EltTy = MVT::getIntegerVT(Alignment * 8);
assert(NumVecs == 1 && "Unexpected multi-element generic load/store.");
assert(!isLaneOp && "Unexpected generic load/store lane.");
unsigned NumElts = NumBytes / (EltTy.getSizeInBits() / 8);
AlignedVecTy = MVT::getVectorVT(EltTy, NumElts);
}
}
// Create the new updating load/store node.
// First, create an SDVTList for the new updating node's results.
EVT Tys[6];
unsigned NumResultVecs = (isLoad ? NumVecs : 0);
unsigned n;
for (n = 0; n < NumResultVecs; ++n)
Tys[n] = AlignedVecTy;
Tys[n] = VecTy;
Tys[n++] = MVT::i32;
Tys[n] = MVT::Other;
SDVTList SDTys = DAG.getVTList(makeArrayRef(Tys, NumResultVecs+2));
// Then, gather the new node's operands.
SmallVector<SDValue, 8> Ops;
Ops.push_back(N->getOperand(0)); // incoming chain
Ops.push_back(N->getOperand(AddrOpIdx));
Ops.push_back(Inc);
if (StoreSDNode *StN = dyn_cast<StoreSDNode>(N)) {
// Try to match the intrinsic's signature
Ops.push_back(StN->getValue());
Ops.push_back(DAG.getConstant(StN->getAlignment(), MVT::i32));
} else {
for (unsigned i = AddrOpIdx + 1; i < N->getNumOperands(); ++i)
Ops.push_back(N->getOperand(i));
for (unsigned i = AddrOpIdx + 1; i < N->getNumOperands(); ++i) {
Ops.push_back(N->getOperand(i));
}
// If this is a non-standard-aligned store, the penultimate operand is the
// stored value. Bitcast it to the aligned type.
if (AlignedVecTy != VecTy && N->getOpcode() == ISD::STORE) {
SDValue &StVal = Ops[Ops.size()-2];
StVal = DAG.getNode(ISD::BITCAST, SDLoc(N), AlignedVecTy, StVal);
}
MemSDNode *MemInt = cast<MemSDNode>(N);
MemIntrinsicSDNode *MemInt = cast<MemIntrinsicSDNode>(N);
SDValue UpdN = DAG.getMemIntrinsicNode(NewOpc, SDLoc(N), SDTys,
Ops, AlignedVecTy,
Ops, MemInt->getMemoryVT(),
MemInt->getMemOperand());
// Update the uses.
@ -9052,14 +9001,6 @@ static SDValue CombineBaseUpdate(SDNode *N,
for (unsigned i = 0; i < NumResultVecs; ++i) {
NewResults.push_back(SDValue(UpdN.getNode(), i));
}
// If this is an non-standard-aligned load, the first result is the loaded
// value. Bitcast it to the expected result type.
if (AlignedVecTy != VecTy && N->getOpcode() == ISD::LOAD) {
SDValue &LdVal = NewResults[0];
LdVal = DAG.getNode(ISD::BITCAST, SDLoc(N), VecTy, LdVal);
}
NewResults.push_back(SDValue(UpdN.getNode(), NumResultVecs+1)); // chain
DCI.CombineTo(N, NewResults);
DCI.CombineTo(User, SDValue(UpdN.getNode(), NumResultVecs));
@ -9069,14 +9010,6 @@ static SDValue CombineBaseUpdate(SDNode *N,
return SDValue();
}
static SDValue PerformVLDCombine(SDNode *N,
TargetLowering::DAGCombinerInfo &DCI) {
if (DCI.isBeforeLegalize() || DCI.isCalledByLegalizer())
return SDValue();
return CombineBaseUpdate(N, DCI);
}
/// CombineVLDDUP - For a VDUPLANE node N, check if its source operand is a
/// vldN-lane (N > 1) intrinsic, and if all the other uses of that intrinsic
/// are also VDUPLANEs. If so, combine them to a vldN-dup operation and
@ -9190,18 +9123,6 @@ static SDValue PerformVDUPLANECombine(SDNode *N,
return DCI.DAG.getNode(ISD::BITCAST, SDLoc(N), VT, Op);
}
static SDValue PerformLOADCombine(SDNode *N,
TargetLowering::DAGCombinerInfo &DCI) {
EVT VT = N->getValueType(0);
// If this is a legal vector load, try to combine it into a VLD1_UPD.
if (ISD::isNormalLoad(N) && VT.isVector() &&
DCI.DAG.getTargetLoweringInfo().isTypeLegal(VT))
return CombineBaseUpdate(N, DCI);
return SDValue();
}
/// PerformSTORECombine - Target-specific dag combine xforms for
/// ISD::STORE.
static SDValue PerformSTORECombine(SDNode *N,
@ -9340,11 +9261,6 @@ static SDValue PerformSTORECombine(SDNode *N,
St->getAAInfo());
}
// If this is a legal vector store, try to combine it into a VST1_UPD.
if (ISD::isNormalStore(N) && VT.isVector() &&
DCI.DAG.getTargetLoweringInfo().isTypeLegal(VT))
return CombineBaseUpdate(N, DCI);
return SDValue();
}
@ -9938,11 +9854,10 @@ SDValue ARMTargetLowering::PerformDAGCombine(SDNode *N,
case ISD::ANY_EXTEND: return PerformExtendCombine(N, DCI.DAG, Subtarget);
case ISD::SELECT_CC: return PerformSELECT_CCCombine(N, DCI.DAG, Subtarget);
case ARMISD::CMOV: return PerformCMOVCombine(N, DCI.DAG);
case ISD::LOAD: return PerformLOADCombine(N, DCI);
case ARMISD::VLD2DUP:
case ARMISD::VLD3DUP:
case ARMISD::VLD4DUP:
return PerformVLDCombine(N, DCI);
return CombineBaseUpdate(N, DCI);
case ARMISD::BUILD_VECTOR:
return PerformARMBUILD_VECTORCombine(N, DCI);
case ISD::INTRINSIC_VOID:
@ -9962,7 +9877,7 @@ SDValue ARMTargetLowering::PerformDAGCombine(SDNode *N,
case Intrinsic::arm_neon_vst2lane:
case Intrinsic::arm_neon_vst3lane:
case Intrinsic::arm_neon_vst4lane:
return PerformVLDCombine(N, DCI);
return CombineBaseUpdate(N, DCI);
default: break;
}
break;

View File

@ -9195,34 +9195,48 @@ static const struct {
const uint64_t Enabled;
const uint64_t Disabled;
} FPUs[] = {
{ARM::VFP, ARM::FeatureVFP2, ARM::FeatureNEON},
{ARM::VFPV2, ARM::FeatureVFP2, ARM::FeatureNEON},
{ARM::VFPV3, ARM::FeatureVFP2 | ARM::FeatureVFP3, ARM::FeatureNEON},
{ARM::VFPV3_D16, ARM::FeatureVFP2 | ARM::FeatureVFP3 | ARM::FeatureD16,
ARM::FeatureNEON},
{ARM::VFPV4, ARM::FeatureVFP2 | ARM::FeatureVFP3 | ARM::FeatureVFP4,
ARM::FeatureNEON},
{ARM::VFPV4_D16,
ARM::FeatureVFP2 | ARM::FeatureVFP3 | ARM::FeatureVFP4 | ARM::FeatureD16,
ARM::FeatureNEON},
{ARM::FPV5_D16, ARM::FeatureVFP2 | ARM::FeatureVFP3 | ARM::FeatureVFP4 |
ARM::FeatureFPARMv8 | ARM::FeatureD16,
ARM::FeatureNEON | ARM::FeatureCrypto},
{ARM::FP_ARMV8, ARM::FeatureVFP2 | ARM::FeatureVFP3 | ARM::FeatureVFP4 |
ARM::FeatureFPARMv8,
ARM::FeatureNEON | ARM::FeatureCrypto},
{ARM::NEON, ARM::FeatureVFP2 | ARM::FeatureVFP3 | ARM::FeatureNEON, 0},
{ARM::NEON_VFPV4,
ARM::FeatureVFP2 | ARM::FeatureVFP3 | ARM::FeatureVFP4 | ARM::FeatureNEON,
0},
{ARM::NEON_FP_ARMV8,
ARM::FeatureVFP2 | ARM::FeatureVFP3 | ARM::FeatureVFP4 |
{/* ID */ ARM::VFP,
/* Enabled */ ARM::FeatureVFP2,
/* Disabled */ ARM::FeatureNEON},
{/* ID */ ARM::VFPV2,
/* Enabled */ ARM::FeatureVFP2,
/* Disabled */ ARM::FeatureNEON},
{/* ID */ ARM::VFPV3,
/* Enabled */ ARM::FeatureVFP2 | ARM::FeatureVFP3,
/* Disabled */ ARM::FeatureNEON | ARM::FeatureD16},
{/* ID */ ARM::VFPV3_D16,
/* Enable */ ARM::FeatureVFP2 | ARM::FeatureVFP3 | ARM::FeatureD16,
/* Disabled */ ARM::FeatureNEON},
{/* ID */ ARM::VFPV4,
/* Enabled */ ARM::FeatureVFP2 | ARM::FeatureVFP3 | ARM::FeatureVFP4,
/* Disabled */ ARM::FeatureNEON | ARM::FeatureD16},
{/* ID */ ARM::VFPV4_D16,
/* Enabled */ ARM::FeatureVFP2 | ARM::FeatureVFP3 | ARM::FeatureVFP4 |
ARM::FeatureD16,
/* Disabled */ ARM::FeatureNEON},
{/* ID */ ARM::FPV5_D16,
/* Enabled */ ARM::FeatureVFP2 | ARM::FeatureVFP3 | ARM::FeatureVFP4 |
ARM::FeatureFPARMv8 | ARM::FeatureD16,
/* Disabled */ ARM::FeatureNEON | ARM::FeatureCrypto},
{/* ID */ ARM::FP_ARMV8,
/* Enabled */ ARM::FeatureVFP2 | ARM::FeatureVFP3 | ARM::FeatureVFP4 |
ARM::FeatureFPARMv8,
/* Disabled */ ARM::FeatureNEON | ARM::FeatureCrypto | ARM::FeatureD16},
{/* ID */ ARM::NEON,
/* Enabled */ ARM::FeatureVFP2 | ARM::FeatureVFP3 | ARM::FeatureNEON,
/* Disabled */ ARM::FeatureD16},
{/* ID */ ARM::NEON_VFPV4,
/* Enabled */ ARM::FeatureVFP2 | ARM::FeatureVFP3 | ARM::FeatureVFP4 |
ARM::FeatureNEON,
/* Disabled */ ARM::FeatureD16},
{/* ID */ ARM::NEON_FP_ARMV8,
/* Enabled */ ARM::FeatureVFP2 | ARM::FeatureVFP3 | ARM::FeatureVFP4 |
ARM::FeatureFPARMv8 | ARM::FeatureNEON,
ARM::FeatureCrypto},
{ARM::CRYPTO_NEON_FP_ARMV8,
ARM::FeatureVFP2 | ARM::FeatureVFP3 | ARM::FeatureVFP4 |
/* Disabled */ ARM::FeatureCrypto | ARM::FeatureD16},
{/* ID */ ARM::CRYPTO_NEON_FP_ARMV8,
/* Enabled */ ARM::FeatureVFP2 | ARM::FeatureVFP3 | ARM::FeatureVFP4 |
ARM::FeatureFPARMv8 | ARM::FeatureNEON | ARM::FeatureCrypto,
0},
/* Disabled */ ARM::FeatureD16},
{ARM::SOFTVFP, 0, 0},
};

View File

@ -3134,7 +3134,8 @@ def ISYNC : XLForm_2_ext<19, 150, 0, 0, 0, (outs), (ins),
def ICBI : XForm_1a<31, 982, (outs), (ins memrr:$src),
"icbi $src", IIC_LdStICBI, []>;
def EIEIO : XForm_24_eieio<31, 854, (outs), (ins),
// We used to have EIEIO as value but E[0-9A-Z] is a reserved name
def EnforceIEIO : XForm_24_eieio<31, 854, (outs), (ins),
"eieio", IIC_LdStLoad, []>;
def WAIT : XForm_24_sync<31, 62, (outs), (ins i32imm:$L),

View File

@ -100,7 +100,7 @@ bool AMDGPUTTI::hasBranchDivergence() const { return true; }
void AMDGPUTTI::getUnrollingPreferences(const Function *, Loop *L,
UnrollingPreferences &UP) const {
UP.Threshold = 300; // Twice the default.
UP.Count = UINT_MAX;
UP.MaxCount = UINT_MAX;
UP.Partial = true;
// TODO: Do we want runtime unrolling?

View File

@ -14,6 +14,7 @@
#include "AMDGPU.h"
#include "llvm/ADT/DepthFirstIterator.h"
#include "llvm/Analysis/LoopInfo.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/Dominators.h"
#include "llvm/IR/Instructions.h"
@ -66,6 +67,8 @@ class SIAnnotateControlFlow : public FunctionPass {
DominatorTree *DT;
StackVector Stack;
LoopInfo *LI;
bool isTopOfStack(BasicBlock *BB);
Value *popSaved();
@ -99,6 +102,7 @@ class SIAnnotateControlFlow : public FunctionPass {
}
void getAnalysisUsage(AnalysisUsage &AU) const override {
AU.addRequired<LoopInfo>();
AU.addRequired<DominatorTreeWrapperPass>();
AU.addPreserved<DominatorTreeWrapperPass>();
FunctionPass::getAnalysisUsage(AU);
@ -277,10 +281,25 @@ void SIAnnotateControlFlow::handleLoop(BranchInst *Term) {
Term->setCondition(CallInst::Create(Loop, Arg, "", Term));
push(Term->getSuccessor(0), Arg);
}
/// \brief Close the last opened control flow
}/// \brief Close the last opened control flow
void SIAnnotateControlFlow::closeControlFlow(BasicBlock *BB) {
llvm::Loop *L = LI->getLoopFor(BB);
if (L && L->getHeader() == BB) {
// We can't insert an EndCF call into a loop header, because it will
// get executed on every iteration of the loop, when it should be
// executed only once before the loop.
SmallVector <BasicBlock*, 8> Latches;
L->getLoopLatches(Latches);
std::vector<BasicBlock*> Preds;
for (pred_iterator PI = pred_begin(BB), PE = pred_end(BB); PI != PE; ++PI) {
if (std::find(Latches.begin(), Latches.end(), *PI) == Latches.end())
Preds.push_back(*PI);
}
BB = llvm::SplitBlockPredecessors(BB, Preds, "endcf.split", this);
}
CallInst::Create(EndCf, popSaved(), "", BB->getFirstInsertionPt());
}
@ -288,6 +307,7 @@ void SIAnnotateControlFlow::closeControlFlow(BasicBlock *BB) {
/// recognize if/then/else and loops.
bool SIAnnotateControlFlow::runOnFunction(Function &F) {
DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
LI = &getAnalysis<LoopInfo>();
for (df_iterator<BasicBlock *> I = df_begin(&F.getEntryBlock()),
E = df_end(&F.getEntryBlock()); I != E; ++I) {

View File

@ -266,6 +266,7 @@ void SIRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator MI,
break;
case AMDGPU::SI_SPILL_V32_RESTORE:
case AMDGPU::SI_SPILL_V64_RESTORE:
case AMDGPU::SI_SPILL_V96_RESTORE:
case AMDGPU::SI_SPILL_V128_RESTORE:
case AMDGPU::SI_SPILL_V256_RESTORE:
case AMDGPU::SI_SPILL_V512_RESTORE: {

View File

@ -132,9 +132,9 @@ def FeatureFMA4 : SubtargetFeature<"fma4", "HasFMA4", "true",
def FeatureXOP : SubtargetFeature<"xop", "HasXOP", "true",
"Enable XOP instructions",
[FeatureFMA4]>;
def FeatureVectorUAMem : SubtargetFeature<"vector-unaligned-mem",
"HasVectorUAMem", "true",
"Allow unaligned memory operands on vector/SIMD instructions">;
def FeatureSSEUnalignedMem : SubtargetFeature<"sse-unaligned-mem",
"HasSSEUnalignedMem", "true",
"Allow unaligned memory operands with SSE instructions">;
def FeatureAES : SubtargetFeature<"aes", "HasAES", "true",
"Enable AES instructions",
[FeatureSSE2]>;
@ -309,7 +309,6 @@ class SandyBridgeProc<string Name> : ProcessorModel<Name, SandyBridgeModel, [
FeatureCMPXCHG16B,
FeatureFastUAMem,
FeatureSlowUAMem32,
FeatureVectorUAMem,
FeaturePOPCNT,
FeatureAES,
FeaturePCLMUL
@ -322,7 +321,6 @@ class IvyBridgeProc<string Name> : ProcessorModel<Name, SandyBridgeModel, [
FeatureCMPXCHG16B,
FeatureFastUAMem,
FeatureSlowUAMem32,
FeatureVectorUAMem,
FeaturePOPCNT,
FeatureAES,
FeaturePCLMUL,
@ -337,7 +335,6 @@ class HaswellProc<string Name> : ProcessorModel<Name, HaswellModel, [
FeatureAVX2,
FeatureCMPXCHG16B,
FeatureFastUAMem,
FeatureVectorUAMem,
FeaturePOPCNT,
FeatureAES,
FeaturePCLMUL,
@ -360,7 +357,6 @@ class BroadwellProc<string Name> : ProcessorModel<Name, HaswellModel, [
FeatureAVX2,
FeatureCMPXCHG16B,
FeatureFastUAMem,
FeatureVectorUAMem,
FeaturePOPCNT,
FeatureAES,
FeaturePCLMUL,
@ -388,7 +384,7 @@ class KnightsLandingProc<string Name> : ProcessorModel<Name, HaswellModel,
FeatureAES, FeaturePCLMUL, FeatureRDRAND, FeatureF16C,
FeatureFSGSBase, FeatureMOVBE, FeatureLZCNT, FeatureBMI,
FeatureBMI2, FeatureFMA, FeatureRTM, FeatureHLE,
FeatureSlowIncDec, FeatureVectorUAMem]>;
FeatureSlowIncDec]>;
def : KnightsLandingProc<"knl">;
// FIXME: define SKX model
@ -399,7 +395,7 @@ class SkylakeProc<string Name> : ProcessorModel<Name, HaswellModel,
FeatureAES, FeaturePCLMUL, FeatureRDRAND, FeatureF16C,
FeatureFSGSBase, FeatureMOVBE, FeatureLZCNT, FeatureBMI,
FeatureBMI2, FeatureFMA, FeatureRTM, FeatureHLE,
FeatureSlowIncDec, FeatureSGX, FeatureVectorUAMem]>;
FeatureSlowIncDec, FeatureSGX]>;
def : SkylakeProc<"skylake">;
def : SkylakeProc<"skx">; // Legacy alias.

View File

@ -688,11 +688,11 @@ void X86AsmPrinter::EmitEndOfAsmFile(Module &M) {
std::vector<const MCSymbol*> DLLExportedFns, DLLExportedGlobals;
for (const auto &Function : M)
if (Function.hasDLLExportStorageClass())
if (Function.hasDLLExportStorageClass() && !Function.isDeclaration())
DLLExportedFns.push_back(getSymbol(&Function));
for (const auto &Global : M.globals())
if (Global.hasDLLExportStorageClass())
if (Global.hasDLLExportStorageClass() && !Global.isDeclaration())
DLLExportedGlobals.push_back(getSymbol(&Global));
for (const auto &Alias : M.aliases()) {

View File

@ -5473,6 +5473,8 @@ static bool getTargetShuffleMask(SDNode *N, MVT VT,
if (auto *C = dyn_cast<Constant>(MaskCP->getConstVal())) {
DecodePSHUFBMask(C, Mask);
if (Mask.empty())
return false;
break;
}

View File

@ -424,7 +424,7 @@ def alignedloadv8i64 : PatFrag<(ops node:$ptr),
// setting a feature bit in the processor (on startup, for example).
// Opteron 10h and later implement such a feature.
def memop : PatFrag<(ops node:$ptr), (load node:$ptr), [{
return Subtarget->hasVectorUAMem()
return Subtarget->hasSSEUnalignedMem()
|| cast<LoadSDNode>(N)->getAlignment() >= 16;
}]>;

View File

@ -265,7 +265,7 @@ void X86Subtarget::initializeEnvironment() {
IsSHLDSlow = false;
IsUAMemFast = false;
IsUAMem32Slow = false;
HasVectorUAMem = false;
HasSSEUnalignedMem = false;
HasCmpxchg16b = false;
UseLeaForSP = false;
HasSlowDivide32 = false;

View File

@ -162,9 +162,9 @@ class X86Subtarget final : public X86GenSubtargetInfo {
/// True if unaligned 32-byte memory accesses are slow.
bool IsUAMem32Slow;
/// HasVectorUAMem - True if SIMD operations can have unaligned memory
/// operands. This may require setting a feature bit in the processor.
bool HasVectorUAMem;
/// True if SSE operations can have unaligned memory operands.
/// This may require setting a configuration bit in the processor.
bool HasSSEUnalignedMem;
/// HasCmpxchg16b - True if this processor has the CMPXCHG16B instruction;
/// this is true for most x86-64 chips, but not the first AMD chips.
@ -378,7 +378,7 @@ class X86Subtarget final : public X86GenSubtargetInfo {
bool isSHLDSlow() const { return IsSHLDSlow; }
bool isUnalignedMemAccessFast() const { return IsUAMemFast; }
bool isUnalignedMem32Slow() const { return IsUAMem32Slow; }
bool hasVectorUAMem() const { return HasVectorUAMem; }
bool hasSSEUnalignedMem() const { return HasSSEUnalignedMem; }
bool hasCmpxchg16b() const { return HasCmpxchg16b; }
bool useLeaForSP() const { return UseLeaForSP; }
bool hasSlowDivide32() const { return HasSlowDivide32; }

View File

@ -330,11 +330,17 @@ static LoadInst *combineLoadToNewType(InstCombiner &IC, LoadInst &LI, Type *NewT
case LLVMContext::MD_noalias:
case LLVMContext::MD_nontemporal:
case LLVMContext::MD_mem_parallel_loop_access:
case LLVMContext::MD_nonnull:
// All of these directly apply.
NewLoad->setMetadata(ID, N);
break;
case LLVMContext::MD_nonnull:
// FIXME: We should translate this into range metadata for integer types
// and vice versa.
if (NewTy->isPointerTy())
NewLoad->setMetadata(ID, N);
break;
case LLVMContext::MD_range:
// FIXME: It would be nice to propagate this in some way, but the type
// conversions make it hard.
@ -548,13 +554,14 @@ static bool combineStoreToValueType(InstCombiner &IC, StoreInst &SI) {
case LLVMContext::MD_noalias:
case LLVMContext::MD_nontemporal:
case LLVMContext::MD_mem_parallel_loop_access:
case LLVMContext::MD_nonnull:
// All of these directly apply.
NewStore->setMetadata(ID, N);
break;
case LLVMContext::MD_invariant_load:
case LLVMContext::MD_nonnull:
case LLVMContext::MD_range:
// These don't apply for stores.
break;
}
}

View File

@ -67,7 +67,7 @@ static const uint64_t kMIPS32_ShadowOffset32 = 0x0aaa0000;
static const uint64_t kMIPS64_ShadowOffset64 = 1ULL << 36;
static const uint64_t kFreeBSD_ShadowOffset32 = 1ULL << 30;
static const uint64_t kFreeBSD_ShadowOffset64 = 1ULL << 46;
static const uint64_t kWindowsShadowOffset32 = 1ULL << 30;
static const uint64_t kWindowsShadowOffset32 = 3ULL << 28;
static const size_t kMinStackMallocSize = 1 << 6; // 64B
static const size_t kMaxStackMallocSize = 1 << 16; // 64K

View File

@ -71,9 +71,17 @@ class InstrProfiling : public ModulePass {
return isMachO() ? "__DATA,__llvm_prf_data" : "__llvm_prf_data";
}
/// Get the section name for the coverage mapping data.
StringRef getCoverageSection() const {
return isMachO() ? "__DATA,__llvm_covmap" : "__llvm_covmap";
}
/// Replace instrprof_increment with an increment of the appropriate value.
void lowerIncrement(InstrProfIncrementInst *Inc);
/// Set up the section and uses for coverage data and its references.
void lowerCoverageData(GlobalVariable *CoverageData);
/// Get the region counters for an increment, creating them if necessary.
///
/// If the counter array doesn't yet exist, the profile data variables
@ -118,6 +126,10 @@ bool InstrProfiling::runOnModule(Module &M) {
lowerIncrement(Inc);
MadeChange = true;
}
if (GlobalVariable *Coverage = M.getNamedGlobal("__llvm_coverage_mapping")) {
lowerCoverageData(Coverage);
MadeChange = true;
}
if (!MadeChange)
return false;
@ -140,6 +152,35 @@ void InstrProfiling::lowerIncrement(InstrProfIncrementInst *Inc) {
Inc->eraseFromParent();
}
void InstrProfiling::lowerCoverageData(GlobalVariable *CoverageData) {
CoverageData->setSection(getCoverageSection());
CoverageData->setAlignment(8);
Constant *Init = CoverageData->getInitializer();
// We're expecting { i32, i32, i32, i32, [n x { i8*, i32, i32 }], [m x i8] }
// for some C. If not, the frontend's given us something broken.
assert(Init->getNumOperands() == 6 && "bad number of fields in coverage map");
assert(isa<ConstantArray>(Init->getAggregateElement(4)) &&
"invalid function list in coverage map");
ConstantArray *Records = cast<ConstantArray>(Init->getAggregateElement(4));
for (unsigned I = 0, E = Records->getNumOperands(); I < E; ++I) {
Constant *Record = Records->getOperand(I);
Value *V = const_cast<Value *>(Record->getOperand(0))->stripPointerCasts();
assert(isa<GlobalVariable>(V) && "Missing reference to function name");
GlobalVariable *Name = cast<GlobalVariable>(V);
// If we have region counters for this name, we've already handled it.
auto It = RegionCounters.find(Name);
if (It != RegionCounters.end())
continue;
// Move the name variable to the right section.
Name->setSection(getNameSection());
Name->setAlignment(1);
}
}
/// Get the name of a profiling variable for a particular function.
static std::string getVarName(InstrProfIncrementInst *Inc, StringRef VarName) {
auto *Arr = cast<ConstantDataArray>(Inc->getName()->getInitializer());

View File

@ -631,7 +631,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
if (SI.isAtomic()) SI.setOrdering(addReleaseOrdering(SI.getOrdering()));
if (MS.TrackOrigins)
if (MS.TrackOrigins && !SI.isAtomic())
storeOrigin(IRB, Addr, Shadow, getOrigin(Val), SI.getAlignment(),
InstrumentWithCalls);
}

View File

@ -480,6 +480,9 @@ bool EarlyCSE::processNode(DomTreeNode *Node) {
// Ignore volatile loads.
if (!LI->isSimple()) {
LastStore = nullptr;
// Don't CSE across synchronization boundaries.
if (Inst->mayWriteToMemory())
++CurrentGeneration;
continue;
}

View File

@ -750,6 +750,16 @@ bool MemCpyOpt::performCallSlotOptzn(Instruction *cpy,
// its dependence information by changing its parameter.
MD->removeInstruction(C);
// Update AA metadata
// FIXME: MD_tbaa_struct and MD_mem_parallel_loop_access should also be
// handled here, but combineMetadata doesn't support them yet
unsigned KnownIDs[] = {
LLVMContext::MD_tbaa,
LLVMContext::MD_alias_scope,
LLVMContext::MD_noalias,
};
combineMetadata(C, cpy, KnownIDs);
// Remove the memcpy.
MD->removeInstruction(cpy);
++NumMemCpyInstr;

View File

@ -1328,6 +1328,8 @@ void llvm::combineMetadata(Instruction *K, const Instruction *J, ArrayRef<unsign
K->setMetadata(Kind, MDNode::getMostGenericTBAA(JMD, KMD));
break;
case LLVMContext::MD_alias_scope:
K->setMetadata(Kind, MDNode::getMostGenericAliasScope(JMD, KMD));
break;
case LLVMContext::MD_noalias:
K->setMetadata(Kind, MDNode::intersect(JMD, KMD));
break;

View File

@ -154,19 +154,21 @@ static Metadata *mapToSelf(ValueToValueMapTy &VM, const Metadata *MD) {
return mapToMetadata(VM, MD, const_cast<Metadata *>(MD));
}
static Metadata *MapMetadataImpl(const Metadata *MD, ValueToValueMapTy &VM,
RemapFlags Flags,
static Metadata *MapMetadataImpl(const Metadata *MD,
SmallVectorImpl<UniquableMDNode *> &Cycles,
ValueToValueMapTy &VM, RemapFlags Flags,
ValueMapTypeRemapper *TypeMapper,
ValueMaterializer *Materializer);
static Metadata *mapMetadataOp(Metadata *Op, ValueToValueMapTy &VM,
RemapFlags Flags,
ValueMapTypeRemapper *TypeMapper,
ValueMaterializer *Materializer) {
static Metadata *mapMetadataOp(Metadata *Op,
SmallVectorImpl<UniquableMDNode *> &Cycles,
ValueToValueMapTy &VM, RemapFlags Flags,
ValueMapTypeRemapper *TypeMapper,
ValueMaterializer *Materializer) {
if (!Op)
return nullptr;
if (Metadata *MappedOp =
MapMetadataImpl(Op, VM, Flags, TypeMapper, Materializer))
MapMetadataImpl(Op, Cycles, VM, Flags, TypeMapper, Materializer))
return MappedOp;
// Use identity map if MappedOp is null and we can ignore missing entries.
if (Flags & RF_IgnoreMissingEntries)
@ -180,8 +182,9 @@ static Metadata *mapMetadataOp(Metadata *Op, ValueToValueMapTy &VM,
return nullptr;
}
static Metadata *cloneMDTuple(const MDTuple *Node, ValueToValueMapTy &VM,
RemapFlags Flags,
static Metadata *cloneMDTuple(const MDTuple *Node,
SmallVectorImpl<UniquableMDNode *> &Cycles,
ValueToValueMapTy &VM, RemapFlags Flags,
ValueMapTypeRemapper *TypeMapper,
ValueMaterializer *Materializer,
bool IsDistinct) {
@ -192,41 +195,57 @@ static Metadata *cloneMDTuple(const MDTuple *Node, ValueToValueMapTy &VM,
SmallVector<Metadata *, 4> Elts;
Elts.reserve(Node->getNumOperands());
for (unsigned I = 0, E = Node->getNumOperands(); I != E; ++I)
Elts.push_back(mapMetadataOp(Node->getOperand(I), VM, Flags, TypeMapper,
Materializer));
Elts.push_back(mapMetadataOp(Node->getOperand(I), Cycles, VM, Flags,
TypeMapper, Materializer));
return MDTuple::get(Node->getContext(), Elts);
}
static Metadata *cloneMDLocation(const MDLocation *Node, ValueToValueMapTy &VM,
RemapFlags Flags,
static Metadata *cloneMDLocation(const MDLocation *Node,
SmallVectorImpl<UniquableMDNode *> &Cycles,
ValueToValueMapTy &VM, RemapFlags Flags,
ValueMapTypeRemapper *TypeMapper,
ValueMaterializer *Materializer,
bool IsDistinct) {
return (IsDistinct ? MDLocation::getDistinct : MDLocation::get)(
Node->getContext(), Node->getLine(), Node->getColumn(),
mapMetadataOp(Node->getScope(), VM, Flags, TypeMapper, Materializer),
mapMetadataOp(Node->getInlinedAt(), VM, Flags, TypeMapper, Materializer));
mapMetadataOp(Node->getScope(), Cycles, VM, Flags, TypeMapper,
Materializer),
mapMetadataOp(Node->getInlinedAt(), Cycles, VM, Flags, TypeMapper,
Materializer));
}
static Metadata *cloneMDNode(const UniquableMDNode *Node, ValueToValueMapTy &VM,
RemapFlags Flags, ValueMapTypeRemapper *TypeMapper,
static Metadata *cloneMDNode(const UniquableMDNode *Node,
SmallVectorImpl<UniquableMDNode *> &Cycles,
ValueToValueMapTy &VM, RemapFlags Flags,
ValueMapTypeRemapper *TypeMapper,
ValueMaterializer *Materializer, bool IsDistinct) {
switch (Node->getMetadataID()) {
default:
llvm_unreachable("Invalid UniquableMDNode subclass");
#define HANDLE_UNIQUABLE_LEAF(CLASS) \
case Metadata::CLASS##Kind: \
return clone##CLASS(cast<CLASS>(Node), VM, Flags, TypeMapper, \
return clone##CLASS(cast<CLASS>(Node), Cycles, VM, Flags, TypeMapper, \
Materializer, IsDistinct);
#include "llvm/IR/Metadata.def"
}
}
static void
trackCyclesUnderDistinct(const UniquableMDNode *Node,
SmallVectorImpl<UniquableMDNode *> &Cycles) {
// Track any cycles beneath this node.
for (Metadata *Op : Node->operands())
if (auto *N = dyn_cast_or_null<UniquableMDNode>(Op))
if (!N->isResolved())
Cycles.push_back(N);
}
/// \brief Map a distinct MDNode.
///
/// Distinct nodes are not uniqued, so they must always recreated.
static Metadata *mapDistinctNode(const UniquableMDNode *Node,
SmallVectorImpl<UniquableMDNode *> &Cycles,
ValueToValueMapTy &VM, RemapFlags Flags,
ValueMapTypeRemapper *TypeMapper,
ValueMaterializer *Materializer) {
@ -241,9 +260,11 @@ static Metadata *mapDistinctNode(const UniquableMDNode *Node,
// Fix the operands.
for (unsigned I = 0, E = Node->getNumOperands(); I != E; ++I)
NewMD->replaceOperandWith(I, mapMetadataOp(Node->getOperand(I), VM, Flags,
TypeMapper, Materializer));
NewMD->replaceOperandWith(I,
mapMetadataOp(Node->getOperand(I), Cycles, VM,
Flags, TypeMapper, Materializer));
trackCyclesUnderDistinct(NewMD, Cycles);
return NewMD;
}
@ -252,9 +273,11 @@ static Metadata *mapDistinctNode(const UniquableMDNode *Node,
std::unique_ptr<MDNodeFwdDecl> Dummy(
MDNode::getTemporary(Node->getContext(), None));
mapToMetadata(VM, Node, Dummy.get());
Metadata *NewMD = cloneMDNode(Node, VM, Flags, TypeMapper, Materializer,
/* IsDistinct */ true);
auto *NewMD = cast<UniquableMDNode>(cloneMDNode(Node, Cycles, VM, Flags,
TypeMapper, Materializer,
/* IsDistinct */ true));
Dummy->replaceAllUsesWith(NewMD);
trackCyclesUnderDistinct(NewMD, Cycles);
return mapToMetadata(VM, Node, NewMD);
}
@ -263,13 +286,14 @@ static Metadata *mapDistinctNode(const UniquableMDNode *Node,
/// Check whether a uniqued node needs to be remapped (due to any operands
/// changing).
static bool shouldRemapUniquedNode(const UniquableMDNode *Node,
SmallVectorImpl<UniquableMDNode *> &Cycles,
ValueToValueMapTy &VM, RemapFlags Flags,
ValueMapTypeRemapper *TypeMapper,
ValueMaterializer *Materializer) {
// Check all operands to see if any need to be remapped.
for (unsigned I = 0, E = Node->getNumOperands(); I != E; ++I) {
Metadata *Op = Node->getOperand(I);
if (Op != mapMetadataOp(Op, VM, Flags, TypeMapper, Materializer))
if (Op != mapMetadataOp(Op, Cycles, VM, Flags, TypeMapper, Materializer))
return true;
}
return false;
@ -279,9 +303,10 @@ static bool shouldRemapUniquedNode(const UniquableMDNode *Node,
///
/// Uniqued nodes may not need to be recreated (they may map to themselves).
static Metadata *mapUniquedNode(const UniquableMDNode *Node,
ValueToValueMapTy &VM, RemapFlags Flags,
ValueMapTypeRemapper *TypeMapper,
ValueMaterializer *Materializer) {
SmallVectorImpl<UniquableMDNode *> &Cycles,
ValueToValueMapTy &VM, RemapFlags Flags,
ValueMapTypeRemapper *TypeMapper,
ValueMaterializer *Materializer) {
assert(!Node->isDistinct() && "Expected uniqued node");
// Create a dummy node in case we have a metadata cycle.
@ -289,7 +314,8 @@ static Metadata *mapUniquedNode(const UniquableMDNode *Node,
mapToMetadata(VM, Node, Dummy);
// Check all operands to see if any need to be remapped.
if (!shouldRemapUniquedNode(Node, VM, Flags, TypeMapper, Materializer)) {
if (!shouldRemapUniquedNode(Node, Cycles, VM, Flags, TypeMapper,
Materializer)) {
// Use an identity mapping.
mapToSelf(VM, Node);
MDNode::deleteTemporary(Dummy);
@ -297,15 +323,17 @@ static Metadata *mapUniquedNode(const UniquableMDNode *Node,
}
// At least one operand needs remapping.
Metadata *NewMD = cloneMDNode(Node, VM, Flags, TypeMapper, Materializer,
/* IsDistinct */ false);
Metadata *NewMD =
cloneMDNode(Node, Cycles, VM, Flags, TypeMapper, Materializer,
/* IsDistinct */ false);
Dummy->replaceAllUsesWith(NewMD);
MDNode::deleteTemporary(Dummy);
return mapToMetadata(VM, Node, NewMD);
}
static Metadata *MapMetadataImpl(const Metadata *MD, ValueToValueMapTy &VM,
RemapFlags Flags,
static Metadata *MapMetadataImpl(const Metadata *MD,
SmallVectorImpl<UniquableMDNode *> &Cycles,
ValueToValueMapTy &VM, RemapFlags Flags,
ValueMapTypeRemapper *TypeMapper,
ValueMaterializer *Materializer) {
// If the value already exists in the map, use it.
@ -345,18 +373,30 @@ static Metadata *MapMetadataImpl(const Metadata *MD, ValueToValueMapTy &VM,
return mapToSelf(VM, MD);
if (Node->isDistinct())
return mapDistinctNode(Node, VM, Flags, TypeMapper, Materializer);
return mapDistinctNode(Node, Cycles, VM, Flags, TypeMapper, Materializer);
return mapUniquedNode(Node, VM, Flags, TypeMapper, Materializer);
return mapUniquedNode(Node, Cycles, VM, Flags, TypeMapper, Materializer);
}
Metadata *llvm::MapMetadata(const Metadata *MD, ValueToValueMapTy &VM,
RemapFlags Flags, ValueMapTypeRemapper *TypeMapper,
ValueMaterializer *Materializer) {
Metadata *NewMD = MapMetadataImpl(MD, VM, Flags, TypeMapper, Materializer);
if (NewMD && NewMD != MD)
SmallVector<UniquableMDNode *, 8> Cycles;
Metadata *NewMD =
MapMetadataImpl(MD, Cycles, VM, Flags, TypeMapper, Materializer);
// Resolve cycles underneath MD.
if (NewMD && NewMD != MD) {
if (auto *N = dyn_cast<UniquableMDNode>(NewMD))
N->resolveCycles();
for (UniquableMDNode *N : Cycles)
N->resolveCycles();
} else {
// Shouldn't get unresolved cycles if nothing was remapped.
assert(Cycles.empty() && "Expected no unresolved cycles");
}
return NewMD;
}

View File

@ -75,6 +75,18 @@ static const unsigned MinVecRegSize = 128;
static const unsigned RecursionMaxDepth = 12;
/// \brief Predicate for the element types that the SLP vectorizer supports.
///
/// The most important thing to filter here are types which are invalid in LLVM
/// vectors. We also filter target specific types which have absolutely no
/// meaningful vectorization path such as x86_fp80 and ppc_f128. This just
/// avoids spending time checking the cost model and realizing that they will
/// be inevitably scalarized.
static bool isValidElementType(Type *Ty) {
return VectorType::isValidElementType(Ty) && !Ty->isX86_FP80Ty() &&
!Ty->isPPC_FP128Ty();
}
/// \returns the parent basic block if all of the instructions in \p VL
/// are in the same block or null otherwise.
static BasicBlock *getSameBlock(ArrayRef<Value *> VL) {
@ -208,6 +220,8 @@ static Instruction *propagateMetadata(Instruction *I, ArrayRef<Value *> VL) {
MD = MDNode::getMostGenericTBAA(MD, IMD);
break;
case LLVMContext::MD_alias_scope:
MD = MDNode::getMostGenericAliasScope(MD, IMD);
break;
case LLVMContext::MD_noalias:
MD = MDNode::intersect(MD, IMD);
break;
@ -1214,7 +1228,7 @@ void BoUpSLP::buildTree_rec(ArrayRef<Value *> VL, unsigned Depth) {
Type *SrcTy = VL0->getOperand(0)->getType();
for (unsigned i = 0; i < VL.size(); ++i) {
Type *Ty = cast<Instruction>(VL[i])->getOperand(0)->getType();
if (Ty != SrcTy || Ty->isAggregateType() || Ty->isVectorTy()) {
if (Ty != SrcTy || !isValidElementType(Ty)) {
BS.cancelScheduling(VL);
newTreeEntry(VL, false);
DEBUG(dbgs() << "SLP: Gathering casts with different src types.\n");
@ -3128,7 +3142,7 @@ unsigned SLPVectorizer::collectStores(BasicBlock *BB, BoUpSLP &R) {
// Check that the pointer points to scalars.
Type *Ty = SI->getValueOperand()->getType();
if (Ty->isAggregateType() || Ty->isVectorTy())
if (!isValidElementType(Ty))
continue;
// Find the base pointer.
@ -3169,7 +3183,7 @@ bool SLPVectorizer::tryToVectorizeList(ArrayRef<Value *> VL, BoUpSLP &R,
for (int i = 0, e = VL.size(); i < e; ++i) {
Type *Ty = VL[i]->getType();
if (Ty->isAggregateType() || Ty->isVectorTy())
if (!isValidElementType(Ty))
return false;
Instruction *Inst = dyn_cast<Instruction>(VL[i]);
if (!Inst || Inst->getOpcode() != Opcode0)
@ -3389,7 +3403,7 @@ class HorizontalReduction {
return false;
Type *Ty = B->getType();
if (Ty->isVectorTy())
if (!isValidElementType(Ty))
return false;
ReductionOpcode = B->getOpcode();

View File

@ -1,11 +1,11 @@
This is a set of individual patches, which contain all the customizations to
llvm/clang currently in the FreeBSD base system. These can be applied in
alphabetical order to a pristine llvm/clang 3.6.0 RC2 source tree, for example
alphabetical order to a pristine llvm/clang 3.6.0 RC3 source tree, for example
by doing:
svn co https://llvm.org/svn/llvm-project/llvm/tags/RELEASE_351/rc2 llvm-3.6.0-rc2
svn co https://llvm.org/svn/llvm-project/cfe/tags/RELEASE_351/rc2 llvm-3.6.0-rc2/tools/clang
cd llvm-3.6.0-rc2
svn co https://llvm.org/svn/llvm-project/llvm/tags/RELEASE_360/rc3 llvm-3.6.0-rc3
svn co https://llvm.org/svn/llvm-project/cfe/tags/RELEASE_360/rc3 llvm-3.6.0-rc3/tools/clang
cd llvm-3.6.0-rc3
for p in /usr/src/contrib/llvm/patches/patch-*.diff; do
patch -p0 -f -F0 -E -i $p -s || break
done

View File

@ -749,3 +749,6 @@ def SerializedDiagnostics : DiagGroup<"serialized-diagnostics">;
// A warning group for warnings about code that clang accepts when
// compiling CUDA C/C++ but which is not compatible with the CUDA spec.
def CudaCompat : DiagGroup<"cuda-compat">;
// A warning group for things that will change semantics in the future.
def FutureCompat : DiagGroup<"future-compat">;

View File

@ -802,6 +802,10 @@ def warn_cxx98_compat_lambda : Warning<
def err_lambda_missing_parens : Error<
"lambda requires '()' before %select{'mutable'|return type|"
"attribute specifier}0">;
def warn_init_capture_direct_list_init : Warning<
"direct list initialization of a lambda init-capture will change meaning in "
"a future version of Clang; insert an '=' to avoid a change in behavior">,
InGroup<FutureCompat>;
// Availability attribute
def err_expected_version : Error<

View File

@ -1635,6 +1635,10 @@ def err_auto_var_init_multiple_expressions : Error<
def err_auto_var_init_paren_braces : Error<
"cannot deduce type for variable %0 with type %1 from "
"parenthesized initializer list">;
def warn_auto_var_direct_list_init : Warning<
"direct list initialization of a variable with a deduced type will change "
"meaning in a future version of Clang; insert an '=' to avoid a change in "
"behavior">, InGroup<FutureCompat>;
def err_auto_new_ctor_multiple_expressions : Error<
"new expression for type %0 contains multiple constructor arguments">;
def err_auto_missing_trailing_return : Error<

View File

@ -1693,12 +1693,12 @@ const Type *CXXCtorInitializer::getBaseClass() const {
}
SourceLocation CXXCtorInitializer::getSourceLocation() const {
if (isAnyMemberInitializer())
return getMemberLocation();
if (isInClassMemberInitializer())
return getAnyMember()->getLocation();
if (isAnyMemberInitializer())
return getMemberLocation();
if (TypeSourceInfo *TSInfo = Initializee.get<TypeSourceInfo*>())
return TSInfo->getTypeLoc().getLocalSourceRange().getBegin();

View File

@ -36,7 +36,7 @@ std::string getClangRepositoryPath() {
// If the SVN_REPOSITORY is empty, try to use the SVN keyword. This helps us
// pick up a tag in an SVN export, for example.
StringRef SVNRepository("$URL: https://llvm.org/svn/llvm-project/cfe/tags/RELEASE_360/rc2/lib/Basic/Version.cpp $");
StringRef SVNRepository("$URL: https://llvm.org/svn/llvm-project/cfe/tags/RELEASE_360/rc3/lib/Basic/Version.cpp $");
if (URL.empty()) {
URL = SVNRepository.slice(SVNRepository.find(':'),
SVNRepository.find("/lib/Basic"));

View File

@ -544,7 +544,7 @@ static void EmitMemberInitializer(CodeGenFunction &CGF,
CXXCtorInitializer *MemberInit,
const CXXConstructorDecl *Constructor,
FunctionArgList &Args) {
ApplyDebugLocation Loc(CGF, MemberInit->getMemberLocation());
ApplyDebugLocation Loc(CGF, MemberInit->getSourceLocation());
assert(MemberInit->isAnyMemberInitializer() &&
"Must have member initializer!");
assert(MemberInit->getInit() && "Must have initializer!");
@ -598,7 +598,6 @@ static void EmitMemberInitializer(CodeGenFunction &CGF,
ArrayRef<VarDecl *> ArrayIndexes;
if (MemberInit->getNumArrayIndices())
ArrayIndexes = MemberInit->getArrayIndexes();
ApplyDebugLocation DL(CGF, MemberInit->getMemberLocation());
CGF.EmitInitializerForField(Field, LHS, MemberInit->getInit(), ArrayIndexes);
}

View File

@ -3393,11 +3393,12 @@ Value *CodeGenFunction::EmitScalarExpr(const Expr *E, bool IgnoreResultAssign) {
assert(E && hasScalarEvaluationKind(E->getType()) &&
"Invalid scalar expression to emit");
bool hasDebugInfo = getDebugInfo();
if (isa<CXXDefaultArgExpr>(E))
disableDebugInfo();
Value *V = ScalarExprEmitter(*this, IgnoreResultAssign)
.Visit(const_cast<Expr*>(E));
if (isa<CXXDefaultArgExpr>(E))
if (isa<CXXDefaultArgExpr>(E) && hasDebugInfo)
enableDebugInfo();
return V;
}

View File

@ -935,8 +935,8 @@ static void emitUsed(CodeGenModule &CGM, StringRef Name,
UsedArray.resize(List.size());
for (unsigned i = 0, e = List.size(); i != e; ++i) {
UsedArray[i] =
llvm::ConstantExpr::getBitCast(cast<llvm::Constant>(&*List[i]),
CGM.Int8PtrTy);
llvm::ConstantExpr::getPointerBitCastOrAddrSpaceCast(
cast<llvm::Constant>(&*List[i]), CGM.Int8PtrTy);
}
if (UsedArray.empty())

View File

@ -324,7 +324,7 @@ class CodeGenModule : public CodeGenTypeCache {
/// referenced. These get code generated when the module is done.
struct DeferredGlobal {
DeferredGlobal(llvm::GlobalValue *GV, GlobalDecl GD) : GV(GV), GD(GD) {}
llvm::AssertingVH<llvm::GlobalValue> GV;
llvm::TrackingVH<llvm::GlobalValue> GV;
GlobalDecl GD;
};
std::vector<DeferredGlobal> DeferredDeclsToEmit;

View File

@ -13,6 +13,7 @@
#include "clang/Basic/LangOptions.h"
#include "clang/Basic/ObjCRuntime.h"
#include "clang/Basic/Version.h"
#include "clang/Config/config.h"
#include "clang/Driver/Action.h"
#include "clang/Driver/Compilation.h"
#include "clang/Driver/Driver.h"
@ -1538,7 +1539,7 @@ static void AddGoldPlugin(const ToolChain &ToolChain, const ArgList &Args,
// as gold requires -plugin to come before any -plugin-opt that -Wl might
// forward.
CmdArgs.push_back("-plugin");
std::string Plugin = ToolChain.getDriver().Dir + "/../lib/LLVMgold.so";
std::string Plugin = ToolChain.getDriver().Dir + "/../lib" CLANG_LIBDIR_SUFFIX "/LLVMgold.so";
CmdArgs.push_back(Args.MakeArgString(Plugin));
// Try to pass driver level flags relevant to LTO code generation down to

View File

@ -894,11 +894,16 @@ Optional<unsigned> Parser::ParseLambdaIntroducer(LambdaIntroducer &Intro,
// to save the necessary state, and restore it later.
EnterExpressionEvaluationContext EC(Actions,
Sema::PotentiallyEvaluated);
TryConsumeToken(tok::equal);
bool HadEquals = TryConsumeToken(tok::equal);
if (!SkippedInits)
if (!SkippedInits) {
// Warn on constructs that will change meaning when we implement N3922
if (!HadEquals && Tok.is(tok::l_brace)) {
Diag(Tok, diag::warn_init_capture_direct_list_init)
<< FixItHint::CreateInsertion(Tok.getLocation(), "=");
}
Init = ParseInitializer();
else if (Tok.is(tok::l_brace)) {
} else if (Tok.is(tok::l_brace)) {
BalancedDelimiterTracker Braces(*this, tok::l_brace);
Braces.consumeOpen();
Braces.skipToEnd();

View File

@ -8702,6 +8702,14 @@ void Sema::AddInitializerToDecl(Decl *RealDecl, Expr *Init,
CheckVariableDeclarationType(VDecl);
if (VDecl->isInvalidDecl())
return;
// If all looks well, warn if this is a case that will change meaning when
// we implement N3922.
if (DirectInit && !CXXDirectInit && isa<InitListExpr>(Init)) {
Diag(Init->getLocStart(),
diag::warn_auto_var_direct_list_init)
<< FixItHint::CreateInsertion(Init->getLocStart(), "=");
}
}
// dllimport cannot be used on variable definitions.

View File

@ -2500,8 +2500,18 @@ Sema::SpecialMemberOverloadResult *Sema::LookupSpecialMember(CXXRecordDecl *RD,
// will always be a (possibly implicit) declaration to shadow any others.
OverloadCandidateSet OCS(RD->getLocation(), OverloadCandidateSet::CSK_Normal);
DeclContext::lookup_result R = RD->lookup(Name);
assert(!R.empty() &&
"lookup for a constructor or assignment operator was empty");
if (R.empty()) {
// We might have no default constructor because we have a lambda's closure
// type, rather than because there's some other declared constructor.
// Every class has a copy/move constructor, copy/move assignment, and
// destructor.
assert(SM == CXXDefaultConstructor &&
"lookup for a constructor or assignment operator was empty");
Result->setMethod(nullptr);
Result->setKind(SpecialMemberOverloadResult::NoMemberOrDeleted);
return Result;
}
// Copy the candidates as our processing of them may load new declarations
// from an external source and invalidate lookup_result.

View File

@ -370,21 +370,21 @@ void ASTDeclWriter::VisitFunctionDecl(FunctionDecl *D) {
// FunctionDecl's body is handled last at ASTWriterDecl::Visit,
// after everything else is written.
Record.push_back(D->getStorageClass()); // FIXME: stable encoding
Record.push_back((int)D->SClass); // FIXME: stable encoding
Record.push_back(D->IsInline);
Record.push_back(D->isInlineSpecified());
Record.push_back(D->isVirtualAsWritten());
Record.push_back(D->isPure());
Record.push_back(D->hasInheritedPrototype());
Record.push_back(D->hasWrittenPrototype());
Record.push_back(D->isDeletedAsWritten());
Record.push_back(D->isTrivial());
Record.push_back(D->isDefaulted());
Record.push_back(D->isExplicitlyDefaulted());
Record.push_back(D->hasImplicitReturnZero());
Record.push_back(D->isConstexpr());
Record.push_back(D->IsInlineSpecified);
Record.push_back(D->IsVirtualAsWritten);
Record.push_back(D->IsPure);
Record.push_back(D->HasInheritedPrototype);
Record.push_back(D->HasWrittenPrototype);
Record.push_back(D->IsDeleted);
Record.push_back(D->IsTrivial);
Record.push_back(D->IsDefaulted);
Record.push_back(D->IsExplicitlyDefaulted);
Record.push_back(D->HasImplicitReturnZero);
Record.push_back(D->IsConstexpr);
Record.push_back(D->HasSkippedBody);
Record.push_back(D->isLateTemplateParsed());
Record.push_back(D->IsLateTemplateParsed);
Record.push_back(D->getLinkageInternal());
Writer.AddSourceLocation(D->getLocEnd(), Record);
@ -1802,7 +1802,7 @@ void ASTWriter::WriteDeclAbbrevs() {
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // Pure
Abv->Add(BitCodeAbbrevOp(0)); // HasInheritedProto
Abv->Add(BitCodeAbbrevOp(1)); // HasWrittenProto
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // DeletedAsWritten
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // Deleted
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // Trivial
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // Defaulted
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // ExplicitlyDefaulted

View File

@ -6,6 +6,6 @@
#define CLANG_VERSION_PATCHLEVEL 0
#define CLANG_VENDOR "FreeBSD "
#define CLANG_VENDOR_SUFFIX " 20150131"
#define CLANG_VENDOR_SUFFIX " 20150214"
#define SVN_REVISION "227651"
#define SVN_REVISION "229040"

View File

@ -14,6 +14,9 @@
/* Define if we have libxml2 */
/* #undef CLANG_HAVE_LIBXML */
/* Multilib suffix for libdir. */
/* #undef CLANG_LIBDIR_SUFFIX */
/* Relative directory for resource files */
#define CLANG_RESOURCE_DIR ""

View File

@ -7,7 +7,8 @@ LIB= lldbPluginInstrumentationRuntimeAddressSanitizer
SRCDIR= tools/lldb/source/Plugins/InstrumentationRuntime/AddressSanitizer
SRCS= AddressSanitizerRuntime.cpp
TGHDRS= Attrs \
TGHDRS= AttrList \
Attrs \
DiagnosticCommonKinds \
DeclNodes \
StmtNodes \

View File

@ -93,7 +93,6 @@ LIBDEPS=\
llvmlinker \
llvmmcjit \
llvmruntimedyld \
llvmjit \
llvmexecutionengine \
llvmirreader \
llvmipo \