Merge llvm, clang, compiler-rt, libc++, lld and lldb release_40 branch

r295380, and update build glue.
This commit is contained in:
Dimitry Andric 2017-02-17 20:07:35 +00:00
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/projects/clang400-import/; revision=313894
23 changed files with 225 additions and 113 deletions

View File

@ -21,7 +21,7 @@
DEFINE_COMPILERRT_FUNCTION(__subsf3vfp)
#if defined(COMPILER_RT_ARMHF_TARGET)
vsub.f32 s0, s0, s1
#elsee
#else
vmov s14, r0 // move first param from r0 into float register
vmov s15, r1 // move second param from r1 into float register
vsub.f32 s14, s14, s15

View File

@ -31,6 +31,23 @@ class StringRef;
class LLVMContext;
class TargetMachine;
/// Wrapper around MemoryBufferRef, owning the identifier
class ThinLTOBuffer {
std::string OwnedIdentifier;
StringRef Buffer;
public:
ThinLTOBuffer(StringRef Buffer, StringRef Identifier)
: OwnedIdentifier(Identifier), Buffer(Buffer) {}
MemoryBufferRef getMemBuffer() const {
return MemoryBufferRef(Buffer,
{OwnedIdentifier.c_str(), OwnedIdentifier.size()});
}
StringRef getBuffer() const { return Buffer; }
StringRef getBufferIdentifier() const { return OwnedIdentifier; }
};
/// Helper to gather options relevant to the target machine creation
struct TargetMachineBuilder {
Triple TheTriple;
@ -280,7 +297,7 @@ class ThinLTOCodeGenerator {
/// Vector holding the input buffers containing the bitcode modules to
/// process.
std::vector<MemoryBufferRef> Modules;
std::vector<ThinLTOBuffer> Modules;
/// Set of symbols that need to be preserved outside of the set of bitcode
/// files.

View File

@ -13072,9 +13072,15 @@ SDValue DAGCombiner::createBuildVecShuffle(SDLoc DL, SDNode *N,
!TLI.isOperationLegal(ISD::VECTOR_SHUFFLE, InVT1))
return SDValue();
if (InVT1 != InVT2)
// Legalizing INSERT_SUBVECTOR is tricky - you basically have to
// lower it back into a BUILD_VECTOR. So if the inserted type is
// illegal, don't even try.
if (InVT1 != InVT2) {
if (!TLI.isTypeLegal(InVT2))
return SDValue();
VecIn2 = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, InVT1,
DAG.getUNDEF(InVT1), VecIn2, ZeroIdx);
}
ShuffleNumElems = NumElems * 2;
} else {
// Both VecIn1 and VecIn2 are wider than the output, and VecIn2 is wider

View File

@ -502,8 +502,17 @@ void InstrEmitter::EmitSubregNode(SDNode *Node,
const TargetRegisterClass *TRC =
TLI->getRegClassFor(Node->getSimpleValueType(0));
unsigned VReg = getVR(Node->getOperand(0), VRBaseMap);
MachineInstr *DefMI = MRI->getVRegDef(VReg);
unsigned Reg;
MachineInstr *DefMI;
RegisterSDNode *R = dyn_cast<RegisterSDNode>(Node->getOperand(0));
if (R && TargetRegisterInfo::isPhysicalRegister(R->getReg())) {
Reg = R->getReg();
DefMI = nullptr;
} else {
Reg = getVR(Node->getOperand(0), VRBaseMap);
DefMI = MRI->getVRegDef(Reg);
}
unsigned SrcReg, DstReg, DefSubIdx;
if (DefMI &&
TII->isCoalescableExtInstr(*DefMI, SrcReg, DstReg, DefSubIdx) &&
@ -519,20 +528,26 @@ void InstrEmitter::EmitSubregNode(SDNode *Node,
TII->get(TargetOpcode::COPY), VRBase).addReg(SrcReg);
MRI->clearKillFlags(SrcReg);
} else {
// VReg may not support a SubIdx sub-register, and we may need to
// Reg may not support a SubIdx sub-register, and we may need to
// constrain its register class or issue a COPY to a compatible register
// class.
VReg = ConstrainForSubReg(VReg, SubIdx,
Node->getOperand(0).getSimpleValueType(),
Node->getDebugLoc());
if (TargetRegisterInfo::isVirtualRegister(Reg))
Reg = ConstrainForSubReg(Reg, SubIdx,
Node->getOperand(0).getSimpleValueType(),
Node->getDebugLoc());
// Create the destreg if it is missing.
if (VRBase == 0)
VRBase = MRI->createVirtualRegister(TRC);
// Create the extract_subreg machine instruction.
BuildMI(*MBB, InsertPos, Node->getDebugLoc(),
TII->get(TargetOpcode::COPY), VRBase).addReg(VReg, 0, SubIdx);
MachineInstrBuilder CopyMI =
BuildMI(*MBB, InsertPos, Node->getDebugLoc(),
TII->get(TargetOpcode::COPY), VRBase);
if (TargetRegisterInfo::isVirtualRegister(Reg))
CopyMI.addReg(Reg, 0, SubIdx);
else
CopyMI.addReg(TRI->getSubReg(Reg, SubIdx));
}
} else if (Opc == TargetOpcode::INSERT_SUBREG ||
Opc == TargetOpcode::SUBREG_TO_REG) {

View File

@ -5832,6 +5832,15 @@ void SelectionDAGBuilder::LowerCallTo(ImmutableCallSite CS, SDValue Callee,
const Value *SwiftErrorVal = nullptr;
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
// We can't tail call inside a function with a swifterror argument. Lowering
// does not support this yet. It would have to move into the swifterror
// register before the call.
auto *Caller = CS.getInstruction()->getParent()->getParent();
if (TLI.supportSwiftError() &&
Caller->getAttributes().hasAttrSomewhere(Attribute::SwiftError))
isTailCall = false;
for (ImmutableCallSite::arg_iterator i = CS.arg_begin(), e = CS.arg_end();
i != e; ++i) {
const Value *V = *i;

View File

@ -2782,14 +2782,15 @@ struct MatchScope {
/// for this.
class MatchStateUpdater : public SelectionDAG::DAGUpdateListener
{
SmallVectorImpl<std::pair<SDValue, SDNode*> > &RecordedNodes;
SmallVectorImpl<MatchScope> &MatchScopes;
SDNode **NodeToMatch;
SmallVectorImpl<std::pair<SDValue, SDNode *>> &RecordedNodes;
SmallVectorImpl<MatchScope> &MatchScopes;
public:
MatchStateUpdater(SelectionDAG &DAG,
SmallVectorImpl<std::pair<SDValue, SDNode*> > &RN,
SmallVectorImpl<MatchScope> &MS) :
SelectionDAG::DAGUpdateListener(DAG),
RecordedNodes(RN), MatchScopes(MS) { }
MatchStateUpdater(SelectionDAG &DAG, SDNode **NodeToMatch,
SmallVectorImpl<std::pair<SDValue, SDNode *>> &RN,
SmallVectorImpl<MatchScope> &MS)
: SelectionDAG::DAGUpdateListener(DAG), NodeToMatch(NodeToMatch),
RecordedNodes(RN), MatchScopes(MS) {}
void NodeDeleted(SDNode *N, SDNode *E) override {
// Some early-returns here to avoid the search if we deleted the node or
@ -2799,6 +2800,9 @@ class MatchStateUpdater : public SelectionDAG::DAGUpdateListener
// update listener during matching a complex patterns.
if (!E || E->isMachineOpcode())
return;
// Check if NodeToMatch was updated.
if (N == *NodeToMatch)
*NodeToMatch = E;
// Performing linear search here does not matter because we almost never
// run this code. You'd have to have a CSE during complex pattern
// matching.
@ -3091,7 +3095,7 @@ void SelectionDAGISel::SelectCodeCommon(SDNode *NodeToMatch,
// consistent.
std::unique_ptr<MatchStateUpdater> MSU;
if (ComplexPatternFuncMutatesDAG())
MSU.reset(new MatchStateUpdater(*CurDAG, RecordedNodes,
MSU.reset(new MatchStateUpdater(*CurDAG, &NodeToMatch, RecordedNodes,
MatchScopes));
if (!CheckComplexPattern(NodeToMatch, RecordedNodes[RecNo].second,

View File

@ -150,13 +150,13 @@ static void computePrevailingCopies(
}
static StringMap<MemoryBufferRef>
generateModuleMap(const std::vector<MemoryBufferRef> &Modules) {
generateModuleMap(const std::vector<ThinLTOBuffer> &Modules) {
StringMap<MemoryBufferRef> ModuleMap;
for (auto &ModuleBuffer : Modules) {
assert(ModuleMap.find(ModuleBuffer.getBufferIdentifier()) ==
ModuleMap.end() &&
"Expect unique Buffer Identifier");
ModuleMap[ModuleBuffer.getBufferIdentifier()] = ModuleBuffer;
ModuleMap[ModuleBuffer.getBufferIdentifier()] = ModuleBuffer.getMemBuffer();
}
return ModuleMap;
}
@ -522,13 +522,13 @@ static void initTMBuilder(TargetMachineBuilder &TMBuilder,
} // end anonymous namespace
void ThinLTOCodeGenerator::addModule(StringRef Identifier, StringRef Data) {
MemoryBufferRef Buffer(Data, Identifier);
ThinLTOBuffer Buffer(Data, Identifier);
if (Modules.empty()) {
// First module added, so initialize the triple and some options
LLVMContext Context;
StringRef TripleStr;
ErrorOr<std::string> TripleOrErr =
expectedToErrorOrAndEmitErrors(Context, getBitcodeTargetTriple(Buffer));
ErrorOr<std::string> TripleOrErr = expectedToErrorOrAndEmitErrors(
Context, getBitcodeTargetTriple(Buffer.getMemBuffer()));
if (TripleOrErr)
TripleStr = *TripleOrErr;
Triple TheTriple(TripleStr);
@ -538,8 +538,8 @@ void ThinLTOCodeGenerator::addModule(StringRef Identifier, StringRef Data) {
else {
LLVMContext Context;
StringRef TripleStr;
ErrorOr<std::string> TripleOrErr =
expectedToErrorOrAndEmitErrors(Context, getBitcodeTargetTriple(Buffer));
ErrorOr<std::string> TripleOrErr = expectedToErrorOrAndEmitErrors(
Context, getBitcodeTargetTriple(Buffer.getMemBuffer()));
if (TripleOrErr)
TripleStr = *TripleOrErr;
assert(TMBuilder.TheTriple.str() == TripleStr &&
@ -588,7 +588,8 @@ std::unique_ptr<ModuleSummaryIndex> ThinLTOCodeGenerator::linkCombinedIndex() {
uint64_t NextModuleId = 0;
for (auto &ModuleBuffer : Modules) {
Expected<std::unique_ptr<object::ModuleSummaryIndexObjectFile>> ObjOrErr =
object::ModuleSummaryIndexObjectFile::create(ModuleBuffer);
object::ModuleSummaryIndexObjectFile::create(
ModuleBuffer.getMemBuffer());
if (!ObjOrErr) {
// FIXME diagnose
logAllUnhandledErrors(
@ -852,8 +853,9 @@ void ThinLTOCodeGenerator::run() {
Context.setDiscardValueNames(LTODiscardValueNames);
// Parse module now
auto TheModule = loadModuleFromBuffer(ModuleBuffer, Context, false,
/*IsImporting*/ false);
auto TheModule =
loadModuleFromBuffer(ModuleBuffer.getMemBuffer(), Context, false,
/*IsImporting*/ false);
// CodeGen
auto OutputBuffer = codegen(*TheModule);
@ -943,8 +945,8 @@ void ThinLTOCodeGenerator::run() {
std::iota(ModulesOrdering.begin(), ModulesOrdering.end(), 0);
std::sort(ModulesOrdering.begin(), ModulesOrdering.end(),
[&](int LeftIndex, int RightIndex) {
auto LSize = Modules[LeftIndex].getBufferSize();
auto RSize = Modules[RightIndex].getBufferSize();
auto LSize = Modules[LeftIndex].getBuffer().size();
auto RSize = Modules[RightIndex].getBuffer().size();
return LSize > RSize;
});
@ -996,8 +998,9 @@ void ThinLTOCodeGenerator::run() {
}
// Parse module now
auto TheModule = loadModuleFromBuffer(ModuleBuffer, Context, false,
/*IsImporting*/ false);
auto TheModule =
loadModuleFromBuffer(ModuleBuffer.getMemBuffer(), Context, false,
/*IsImporting*/ false);
// Save temps: original file.
saveTempBitcode(*TheModule, SaveTempsDir, count, ".0.original.bc");

View File

@ -91,7 +91,7 @@ def RetCC_AArch64_AAPCS : CallingConv<[
CCIfType<[v2f32], CCBitConvertToType<v2i32>>,
CCIfType<[v2f64, v4f32], CCBitConvertToType<v2i64>>,
CCIfSwiftError<CCIfType<[i64], CCAssignToRegWithShadow<[X19], [W19]>>>,
CCIfSwiftError<CCIfType<[i64], CCAssignToRegWithShadow<[X21], [W21]>>>,
// Big endian vectors must be passed as if they were 1-element vectors so that
// their lanes are in a consistent order.
@ -138,8 +138,8 @@ def CC_AArch64_DarwinPCS : CallingConv<[
// Pass SwiftSelf in a callee saved register.
CCIfSwiftSelf<CCIfType<[i64], CCAssignToRegWithShadow<[X20], [W20]>>>,
// A SwiftError is passed in X19.
CCIfSwiftError<CCIfType<[i64], CCAssignToRegWithShadow<[X19], [W19]>>>,
// A SwiftError is passed in X21.
CCIfSwiftError<CCIfType<[i64], CCAssignToRegWithShadow<[X21], [W21]>>>,
CCIfConsecutiveRegs<CCCustom<"CC_AArch64_Custom_Block">>,
@ -289,7 +289,7 @@ def CSR_AArch64_AAPCS : CalleeSavedRegs<(add LR, FP, X19, X20, X21, X22,
def CSR_AArch64_AAPCS_ThisReturn : CalleeSavedRegs<(add CSR_AArch64_AAPCS, X0)>;
def CSR_AArch64_AAPCS_SwiftError
: CalleeSavedRegs<(sub CSR_AArch64_AAPCS, X19)>;
: CalleeSavedRegs<(sub CSR_AArch64_AAPCS, X21)>;
// The function used by Darwin to obtain the address of a thread-local variable
// guarantees more than a normal AAPCS function. x16 and x17 are used on the

View File

@ -3155,7 +3155,8 @@ AArch64TargetLowering::LowerCall(CallLoweringInfo &CLI,
}
if (VA.isRegLoc()) {
if (realArgIdx == 0 && Flags.isReturned() && Outs[0].VT == MVT::i64) {
if (realArgIdx == 0 && Flags.isReturned() && !Flags.isSwiftSelf() &&
Outs[0].VT == MVT::i64) {
assert(VA.getLocVT() == MVT::i64 &&
"unexpected calling convention register assignment");
assert(!Ins.empty() && Ins[0].VT == MVT::i64 &&

View File

@ -26,8 +26,8 @@ def CC_ARM_APCS : CallingConv<[
// Pass SwiftSelf in a callee saved register.
CCIfSwiftSelf<CCIfType<[i32], CCAssignToReg<[R10]>>>,
// A SwiftError is passed in R6.
CCIfSwiftError<CCIfType<[i32], CCAssignToReg<[R6]>>>,
// A SwiftError is passed in R8.
CCIfSwiftError<CCIfType<[i32], CCAssignToReg<[R8]>>>,
// Handle all vector types as either f64 or v2f64.
CCIfType<[v1i64, v2i32, v4i16, v8i8, v2f32], CCBitConvertToType<f64>>,
@ -51,8 +51,8 @@ def RetCC_ARM_APCS : CallingConv<[
// Pass SwiftSelf in a callee saved register.
CCIfSwiftSelf<CCIfType<[i32], CCAssignToReg<[R10]>>>,
// A SwiftError is returned in R6.
CCIfSwiftError<CCIfType<[i32], CCAssignToReg<[R6]>>>,
// A SwiftError is returned in R8.
CCIfSwiftError<CCIfType<[i32], CCAssignToReg<[R8]>>>,
// Handle all vector types as either f64 or v2f64.
CCIfType<[v1i64, v2i32, v4i16, v8i8, v2f32], CCBitConvertToType<f64>>,
@ -166,8 +166,8 @@ def CC_ARM_AAPCS : CallingConv<[
// Pass SwiftSelf in a callee saved register.
CCIfSwiftSelf<CCIfType<[i32], CCAssignToReg<[R10]>>>,
// A SwiftError is passed in R6.
CCIfSwiftError<CCIfType<[i32], CCAssignToReg<[R6]>>>,
// A SwiftError is passed in R8.
CCIfSwiftError<CCIfType<[i32], CCAssignToReg<[R8]>>>,
CCIfType<[f64, v2f64], CCCustom<"CC_ARM_AAPCS_Custom_f64">>,
CCIfType<[f32], CCBitConvertToType<i32>>,
@ -182,8 +182,8 @@ def RetCC_ARM_AAPCS : CallingConv<[
// Pass SwiftSelf in a callee saved register.
CCIfSwiftSelf<CCIfType<[i32], CCAssignToReg<[R10]>>>,
// A SwiftError is returned in R6.
CCIfSwiftError<CCIfType<[i32], CCAssignToReg<[R6]>>>,
// A SwiftError is returned in R8.
CCIfSwiftError<CCIfType<[i32], CCAssignToReg<[R8]>>>,
CCIfType<[f64, v2f64], CCCustom<"RetCC_ARM_AAPCS_Custom_f64">>,
CCIfType<[f32], CCBitConvertToType<i32>>,
@ -206,8 +206,8 @@ def CC_ARM_AAPCS_VFP : CallingConv<[
// Pass SwiftSelf in a callee saved register.
CCIfSwiftSelf<CCIfType<[i32], CCAssignToReg<[R10]>>>,
// A SwiftError is passed in R6.
CCIfSwiftError<CCIfType<[i32], CCAssignToReg<[R6]>>>,
// A SwiftError is passed in R8.
CCIfSwiftError<CCIfType<[i32], CCAssignToReg<[R8]>>>,
// HFAs are passed in a contiguous block of registers, or on the stack
CCIfConsecutiveRegs<CCCustom<"CC_ARM_AAPCS_Custom_Aggregate">>,
@ -227,8 +227,8 @@ def RetCC_ARM_AAPCS_VFP : CallingConv<[
// Pass SwiftSelf in a callee saved register.
CCIfSwiftSelf<CCIfType<[i32], CCAssignToReg<[R10]>>>,
// A SwiftError is returned in R6.
CCIfSwiftError<CCIfType<[i32], CCAssignToReg<[R6]>>>,
// A SwiftError is returned in R8.
CCIfSwiftError<CCIfType<[i32], CCAssignToReg<[R8]>>>,
CCIfType<[v2f64], CCAssignToReg<[Q0, Q1, Q2, Q3]>>,
CCIfType<[f64], CCAssignToReg<[D0, D1, D2, D3, D4, D5, D6, D7]>>,
@ -267,8 +267,8 @@ def CSR_AAPCS_ThisReturn : CalleeSavedRegs<(add LR, R11, R10, R9, R8, R7, R6,
// Also save R7-R4 first to match the stack frame fixed spill areas.
def CSR_iOS : CalleeSavedRegs<(add LR, R7, R6, R5, R4, (sub CSR_AAPCS, R9))>;
// R6 is used to pass swifterror, remove it from CSR.
def CSR_iOS_SwiftError : CalleeSavedRegs<(sub CSR_iOS, R6)>;
// R8 is used to pass swifterror, remove it from CSR.
def CSR_iOS_SwiftError : CalleeSavedRegs<(sub CSR_iOS, R8)>;
def CSR_iOS_ThisReturn : CalleeSavedRegs<(add LR, R7, R6, R5, R4,
(sub CSR_AAPCS_ThisReturn, R9))>;

View File

@ -1787,7 +1787,8 @@ ARMTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
StackPtr, MemOpChains, Flags);
}
} else if (VA.isRegLoc()) {
if (realArgIdx == 0 && Flags.isReturned() && Outs[0].VT == MVT::i32) {
if (realArgIdx == 0 && Flags.isReturned() && !Flags.isSwiftSelf() &&
Outs[0].VT == MVT::i32) {
assert(VA.getLocVT() == MVT::i32 &&
"unexpected calling convention register assignment");
assert(!Ins.empty() && Ins[0].VT == MVT::i32 &&

View File

@ -1013,7 +1013,9 @@ bool AddressSanitizer::isInterestingAlloca(const AllocaInst &AI) {
(!ClSkipPromotableAllocas || !isAllocaPromotable(&AI)) &&
// inalloca allocas are not treated as static, and we don't want
// dynamic alloca instrumentation for them as well.
!AI.isUsedWithInAlloca());
!AI.isUsedWithInAlloca() &&
// swifterror allocas are register promoted by ISel
!AI.isSwiftError());
ProcessedAllocas[&AI] = IsInteresting;
return IsInteresting;
@ -1088,12 +1090,19 @@ Value *AddressSanitizer::isInterestingMemoryAccess(Instruction *I,
}
}
// Do not instrument acesses from different address spaces; we cannot deal
// with them.
if (PtrOperand) {
// Do not instrument acesses from different address spaces; we cannot deal
// with them.
Type *PtrTy = cast<PointerType>(PtrOperand->getType()->getScalarType());
if (PtrTy->getPointerAddressSpace() != 0)
return nullptr;
// Ignore swifterror addresses.
// swifterror memory addresses are mem2reg promoted by instruction
// selection. As such they cannot have regular uses like an instrumentation
// function and it makes no sense to track them as memory.
if (PtrOperand->isSwiftError())
return nullptr;
}
// Treat memory accesses to promotable allocas as non-interesting since they

View File

@ -488,6 +488,13 @@ bool ThreadSanitizer::instrumentLoadOrStore(Instruction *I,
Value *Addr = IsWrite
? cast<StoreInst>(I)->getPointerOperand()
: cast<LoadInst>(I)->getPointerOperand();
// swifterror memory addresses are mem2reg promoted by instruction selection.
// As such they cannot have regular uses like an instrumentation function and
// it makes no sense to track them as memory.
if (Addr->isSwiftError())
return false;
int Idx = getMemoryAccessFuncIndex(Addr, DL);
if (Idx < 0)
return false;

View File

@ -189,11 +189,14 @@ const Loop* llvm::addClonedBlockToLoopInfo(BasicBlock *OriginalBB,
assert(OriginalBB == OldLoop->getHeader() &&
"Header should be first in RPO");
NewLoop = new Loop();
Loop *NewLoopParent = NewLoops.lookup(OldLoop->getParentLoop());
assert(NewLoopParent &&
"Expected parent loop before sub-loop in RPO");
NewLoop = new Loop;
NewLoopParent->addChildLoop(NewLoop);
if (NewLoopParent)
NewLoopParent->addChildLoop(NewLoop);
else
LI->addTopLevelLoop(NewLoop);
NewLoop->addBasicBlockToLoop(ClonedBB, *LI);
return OldLoop;
} else {

View File

@ -302,17 +302,22 @@ static void CloneLoopBlocks(Loop *L, Value *NewIter,
}
NewLoopsMap NewLoops;
NewLoops[L] = NewLoop;
if (NewLoop)
NewLoops[L] = NewLoop;
else if (ParentLoop)
NewLoops[L] = ParentLoop;
// For each block in the original loop, create a new copy,
// and update the value map with the newly created values.
for (LoopBlocksDFS::RPOIterator BB = BlockBegin; BB != BlockEnd; ++BB) {
BasicBlock *NewBB = CloneBasicBlock(*BB, VMap, "." + suffix, F);
NewBlocks.push_back(NewBB);
if (NewLoop) {
// If we're unrolling the outermost loop, there's no remainder loop,
// and this block isn't in a nested loop, then the new block is not
// in any loop. Otherwise, add it to loopinfo.
if (CreateRemainderLoop || LI->getLoopFor(*BB) != L || ParentLoop)
addClonedBlockToLoopInfo(*BB, NewBB, LI, NewLoops);
} else if (ParentLoop)
ParentLoop->addBasicBlockToLoop(NewBB, *LI);
VMap[*BB] = NewBB;
if (Header == *BB) {

View File

@ -604,10 +604,12 @@ namespace {
/// gets a chance to look at it.
EM_PotentialConstantExpressionUnevaluated,
/// Evaluate as a constant expression. Continue evaluating if either:
/// - We find a MemberExpr with a base that can't be evaluated.
/// - We find a variable initialized with a call to a function that has
/// the alloc_size attribute on it.
/// Evaluate as a constant expression. In certain scenarios, if:
/// - we find a MemberExpr with a base that can't be evaluated, or
/// - we find a variable initialized with a call to a function that has
/// the alloc_size attribute on it
/// then we may consider evaluation to have succeeded.
///
/// In either case, the LValue returned shall have an invalid base; in the
/// former, the base will be the invalid MemberExpr, in the latter, the
/// base will be either the alloc_size CallExpr or a CastExpr wrapping
@ -890,10 +892,6 @@ namespace {
return KeepGoing;
}
bool allowInvalidBaseExpr() const {
return EvalMode == EM_OffsetFold;
}
class ArrayInitLoopIndex {
EvalInfo &Info;
uint64_t OuterIndex;
@ -1394,8 +1392,10 @@ static bool Evaluate(APValue &Result, EvalInfo &Info, const Expr *E);
static bool EvaluateInPlace(APValue &Result, EvalInfo &Info,
const LValue &This, const Expr *E,
bool AllowNonLiteralTypes = false);
static bool EvaluateLValue(const Expr *E, LValue &Result, EvalInfo &Info);
static bool EvaluatePointer(const Expr *E, LValue &Result, EvalInfo &Info);
static bool EvaluateLValue(const Expr *E, LValue &Result, EvalInfo &Info,
bool InvalidBaseOK = false);
static bool EvaluatePointer(const Expr *E, LValue &Result, EvalInfo &Info,
bool InvalidBaseOK = false);
static bool EvaluateMemberPointer(const Expr *E, MemberPtr &Result,
EvalInfo &Info);
static bool EvaluateTemporary(const Expr *E, LValue &Result, EvalInfo &Info);
@ -4803,6 +4803,7 @@ class LValueExprEvaluatorBase
: public ExprEvaluatorBase<Derived> {
protected:
LValue &Result;
bool InvalidBaseOK;
typedef LValueExprEvaluatorBase LValueExprEvaluatorBaseTy;
typedef ExprEvaluatorBase<Derived> ExprEvaluatorBaseTy;
@ -4811,9 +4812,14 @@ class LValueExprEvaluatorBase
return true;
}
bool evaluatePointer(const Expr *E, LValue &Result) {
return EvaluatePointer(E, Result, this->Info, InvalidBaseOK);
}
public:
LValueExprEvaluatorBase(EvalInfo &Info, LValue &Result) :
ExprEvaluatorBaseTy(Info), Result(Result) {}
LValueExprEvaluatorBase(EvalInfo &Info, LValue &Result, bool InvalidBaseOK)
: ExprEvaluatorBaseTy(Info), Result(Result),
InvalidBaseOK(InvalidBaseOK) {}
bool Success(const APValue &V, const Expr *E) {
Result.setFrom(this->Info.Ctx, V);
@ -4825,7 +4831,7 @@ class LValueExprEvaluatorBase
QualType BaseTy;
bool EvalOK;
if (E->isArrow()) {
EvalOK = EvaluatePointer(E->getBase(), Result, this->Info);
EvalOK = evaluatePointer(E->getBase(), Result);
BaseTy = E->getBase()->getType()->castAs<PointerType>()->getPointeeType();
} else if (E->getBase()->isRValue()) {
assert(E->getBase()->getType()->isRecordType());
@ -4836,7 +4842,7 @@ class LValueExprEvaluatorBase
BaseTy = E->getBase()->getType();
}
if (!EvalOK) {
if (!this->Info.allowInvalidBaseExpr())
if (!InvalidBaseOK)
return false;
Result.setInvalid(E);
return true;
@ -4930,8 +4936,8 @@ namespace {
class LValueExprEvaluator
: public LValueExprEvaluatorBase<LValueExprEvaluator> {
public:
LValueExprEvaluator(EvalInfo &Info, LValue &Result) :
LValueExprEvaluatorBaseTy(Info, Result) {}
LValueExprEvaluator(EvalInfo &Info, LValue &Result, bool InvalidBaseOK) :
LValueExprEvaluatorBaseTy(Info, Result, InvalidBaseOK) {}
bool VisitVarDecl(const Expr *E, const VarDecl *VD);
bool VisitUnaryPreIncDec(const UnaryOperator *UO);
@ -4984,10 +4990,11 @@ class LValueExprEvaluator
/// * function designators in C, and
/// * "extern void" objects
/// * @selector() expressions in Objective-C
static bool EvaluateLValue(const Expr *E, LValue &Result, EvalInfo &Info) {
static bool EvaluateLValue(const Expr *E, LValue &Result, EvalInfo &Info,
bool InvalidBaseOK) {
assert(E->isGLValue() || E->getType()->isFunctionType() ||
E->getType()->isVoidType() || isa<ObjCSelectorExpr>(E));
return LValueExprEvaluator(Info, Result).Visit(E);
return LValueExprEvaluator(Info, Result, InvalidBaseOK).Visit(E);
}
bool LValueExprEvaluator::VisitDeclRefExpr(const DeclRefExpr *E) {
@ -5148,7 +5155,7 @@ bool LValueExprEvaluator::VisitArraySubscriptExpr(const ArraySubscriptExpr *E) {
if (E->getBase()->getType()->isVectorType())
return Error(E);
if (!EvaluatePointer(E->getBase(), Result, Info))
if (!evaluatePointer(E->getBase(), Result))
return false;
APSInt Index;
@ -5160,7 +5167,7 @@ bool LValueExprEvaluator::VisitArraySubscriptExpr(const ArraySubscriptExpr *E) {
}
bool LValueExprEvaluator::VisitUnaryDeref(const UnaryOperator *E) {
return EvaluatePointer(E->getSubExpr(), Result, Info);
return evaluatePointer(E->getSubExpr(), Result);
}
bool LValueExprEvaluator::VisitUnaryReal(const UnaryOperator *E) {
@ -5308,7 +5315,7 @@ static bool getBytesReturnedByAllocSizeCall(const ASTContext &Ctx,
/// and mark Result's Base as invalid.
static bool evaluateLValueAsAllocSize(EvalInfo &Info, APValue::LValueBase Base,
LValue &Result) {
if (!Info.allowInvalidBaseExpr() || Base.isNull())
if (Base.isNull())
return false;
// Because we do no form of static analysis, we only support const variables.
@ -5342,17 +5349,27 @@ namespace {
class PointerExprEvaluator
: public ExprEvaluatorBase<PointerExprEvaluator> {
LValue &Result;
bool InvalidBaseOK;
bool Success(const Expr *E) {
Result.set(E);
return true;
}
bool evaluateLValue(const Expr *E, LValue &Result) {
return EvaluateLValue(E, Result, Info, InvalidBaseOK);
}
bool evaluatePointer(const Expr *E, LValue &Result) {
return EvaluatePointer(E, Result, Info, InvalidBaseOK);
}
bool visitNonBuiltinCallExpr(const CallExpr *E);
public:
PointerExprEvaluator(EvalInfo &info, LValue &Result)
: ExprEvaluatorBaseTy(info), Result(Result) {}
PointerExprEvaluator(EvalInfo &info, LValue &Result, bool InvalidBaseOK)
: ExprEvaluatorBaseTy(info), Result(Result),
InvalidBaseOK(InvalidBaseOK) {}
bool Success(const APValue &V, const Expr *E) {
Result.setFrom(Info.Ctx, V);
@ -5399,9 +5416,10 @@ class PointerExprEvaluator
};
} // end anonymous namespace
static bool EvaluatePointer(const Expr* E, LValue& Result, EvalInfo &Info) {
static bool EvaluatePointer(const Expr* E, LValue& Result, EvalInfo &Info,
bool InvalidBaseOK) {
assert(E->isRValue() && E->getType()->hasPointerRepresentation());
return PointerExprEvaluator(Info, Result).Visit(E);
return PointerExprEvaluator(Info, Result, InvalidBaseOK).Visit(E);
}
bool PointerExprEvaluator::VisitBinaryOperator(const BinaryOperator *E) {
@ -5414,7 +5432,7 @@ bool PointerExprEvaluator::VisitBinaryOperator(const BinaryOperator *E) {
if (IExp->getType()->isPointerType())
std::swap(PExp, IExp);
bool EvalPtrOK = EvaluatePointer(PExp, Result, Info);
bool EvalPtrOK = evaluatePointer(PExp, Result);
if (!EvalPtrOK && !Info.noteFailure())
return false;
@ -5432,7 +5450,7 @@ bool PointerExprEvaluator::VisitBinaryOperator(const BinaryOperator *E) {
}
bool PointerExprEvaluator::VisitUnaryAddrOf(const UnaryOperator *E) {
return EvaluateLValue(E->getSubExpr(), Result, Info);
return evaluateLValue(E->getSubExpr(), Result);
}
bool PointerExprEvaluator::VisitCastExpr(const CastExpr* E) {
@ -5466,7 +5484,7 @@ bool PointerExprEvaluator::VisitCastExpr(const CastExpr* E) {
case CK_DerivedToBase:
case CK_UncheckedDerivedToBase:
if (!EvaluatePointer(E->getSubExpr(), Result, Info))
if (!evaluatePointer(E->getSubExpr(), Result))
return false;
if (!Result.Base && Result.Offset.isZero())
return true;
@ -5513,7 +5531,7 @@ bool PointerExprEvaluator::VisitCastExpr(const CastExpr* E) {
}
case CK_ArrayToPointerDecay:
if (SubExpr->isGLValue()) {
if (!EvaluateLValue(SubExpr, Result, Info))
if (!evaluateLValue(SubExpr, Result))
return false;
} else {
Result.set(SubExpr, Info.CurrentCall->Index);
@ -5530,18 +5548,19 @@ bool PointerExprEvaluator::VisitCastExpr(const CastExpr* E) {
return true;
case CK_FunctionToPointerDecay:
return EvaluateLValue(SubExpr, Result, Info);
return evaluateLValue(SubExpr, Result);
case CK_LValueToRValue: {
LValue LVal;
if (!EvaluateLValue(E->getSubExpr(), LVal, Info))
if (!evaluateLValue(E->getSubExpr(), LVal))
return false;
APValue RVal;
// Note, we use the subexpression's type in order to retain cv-qualifiers.
if (!handleLValueToRValueConversion(Info, E, E->getSubExpr()->getType(),
LVal, RVal))
return evaluateLValueAsAllocSize(Info, LVal.Base, Result);
return InvalidBaseOK &&
evaluateLValueAsAllocSize(Info, LVal.Base, Result);
return Success(RVal, E);
}
}
@ -5586,7 +5605,7 @@ bool PointerExprEvaluator::visitNonBuiltinCallExpr(const CallExpr *E) {
if (ExprEvaluatorBaseTy::VisitCallExpr(E))
return true;
if (!(Info.allowInvalidBaseExpr() && getAllocSizeAttr(E)))
if (!(InvalidBaseOK && getAllocSizeAttr(E)))
return false;
Result.setInvalid(E);
@ -5609,12 +5628,12 @@ bool PointerExprEvaluator::VisitBuiltinCallExpr(const CallExpr *E,
unsigned BuiltinOp) {
switch (BuiltinOp) {
case Builtin::BI__builtin_addressof:
return EvaluateLValue(E->getArg(0), Result, Info);
return evaluateLValue(E->getArg(0), Result);
case Builtin::BI__builtin_assume_aligned: {
// We need to be very careful here because: if the pointer does not have the
// asserted alignment, then the behavior is undefined, and undefined
// behavior is non-constant.
if (!EvaluatePointer(E->getArg(0), Result, Info))
if (!evaluatePointer(E->getArg(0), Result))
return false;
LValue OffsetResult(Result);
@ -6255,7 +6274,7 @@ class TemporaryExprEvaluator
: public LValueExprEvaluatorBase<TemporaryExprEvaluator> {
public:
TemporaryExprEvaluator(EvalInfo &Info, LValue &Result) :
LValueExprEvaluatorBaseTy(Info, Result) {}
LValueExprEvaluatorBaseTy(Info, Result, false) {}
/// Visit an expression which constructs the value of this temporary.
bool VisitConstructExpr(const Expr *E) {
@ -7358,7 +7377,8 @@ static bool tryEvaluateBuiltinObjectSize(const Expr *E, unsigned Type,
if (!EvaluateAsRValue(Info, E, RVal))
return false;
LVal.setFrom(Info.Ctx, RVal);
} else if (!EvaluatePointer(ignorePointerCastsAndParens(E), LVal, Info))
} else if (!EvaluatePointer(ignorePointerCastsAndParens(E), LVal, Info,
/*InvalidBaseOK=*/true))
return false;
}

View File

@ -166,7 +166,7 @@ struct ObjCEntrypoints {
/// void objc_release(id);
llvm::Constant *objc_release;
/// id objc_storeStrong(id*, id);
/// void objc_storeStrong(id*, id);
llvm::Constant *objc_storeStrong;
/// id objc_storeWeak(id*, id);

View File

@ -2408,7 +2408,7 @@ Parser::ParseParenExpression(ParenParseOption &ExprType, bool stopIfCastExpr,
// fold-expressions, we'll need to allow multiple ArgExprs here.
if (ArgExprs.size() == 1 && isFoldOperator(Tok.getKind()) &&
NextToken().is(tok::ellipsis))
return ParseFoldExpression(Result, T);
return ParseFoldExpression(ArgExprs[0], T);
ExprType = SimpleExpr;
Result = Actions.ActOnParenListExpr(OpenLoc, Tok.getLocation(),

View File

@ -2831,6 +2831,9 @@ Sema::SpecialMemberOverloadResult *Sema::LookupSpecialMember(CXXRecordDecl *RD,
assert((SM != CXXDefaultConstructor && SM != CXXDestructor) &&
"parameter-less special members can't have qualified arguments");
// FIXME: Get the caller to pass in a location for the lookup.
SourceLocation LookupLoc = RD->getLocation();
llvm::FoldingSetNodeID ID;
ID.AddPointer(RD);
ID.AddInteger(SM);
@ -2912,7 +2915,7 @@ Sema::SpecialMemberOverloadResult *Sema::LookupSpecialMember(CXXRecordDecl *RD,
VK = VK_RValue;
}
OpaqueValueExpr FakeArg(SourceLocation(), ArgType, VK);
OpaqueValueExpr FakeArg(LookupLoc, ArgType, VK);
if (SM != CXXDefaultConstructor) {
NumArgs = 1;
@ -2926,13 +2929,13 @@ Sema::SpecialMemberOverloadResult *Sema::LookupSpecialMember(CXXRecordDecl *RD,
if (VolatileThis)
ThisTy.addVolatile();
Expr::Classification Classification =
OpaqueValueExpr(SourceLocation(), ThisTy,
OpaqueValueExpr(LookupLoc, ThisTy,
RValueThis ? VK_RValue : VK_LValue).Classify(Context);
// Now we perform lookup on the name we computed earlier and do overload
// resolution. Lookup is only performed directly into the class since there
// will always be a (possibly implicit) declaration to shadow any others.
OverloadCandidateSet OCS(RD->getLocation(), OverloadCandidateSet::CSK_Normal);
OverloadCandidateSet OCS(LookupLoc, OverloadCandidateSet::CSK_Normal);
DeclContext::lookup_result R = RD->lookup(Name);
if (R.empty()) {
@ -2987,7 +2990,7 @@ Sema::SpecialMemberOverloadResult *Sema::LookupSpecialMember(CXXRecordDecl *RD,
}
OverloadCandidateSet::iterator Best;
switch (OCS.BestViableFunction(*this, SourceLocation(), Best)) {
switch (OCS.BestViableFunction(*this, LookupLoc, Best)) {
case OR_Success:
Result->setMethod(cast<CXXMethodDecl>(Best->Function));
Result->setKind(SpecialMemberOverloadResult::Success);

View File

@ -2743,15 +2743,17 @@ bool Sema::isCopyElisionCandidate(QualType ReturnType, const VarDecl *VD,
// ...automatic...
if (!VD->hasLocalStorage()) return false;
// Return false if VD is a __block variable. We don't want to implicitly move
// out of a __block variable during a return because we cannot assume the
// variable will no longer be used.
if (VD->hasAttr<BlocksAttr>()) return false;
if (AllowParamOrMoveConstructible)
return true;
// ...non-volatile...
if (VD->getType().isVolatileQualified()) return false;
// __block variables can't be allocated in a way that permits NRVO.
if (VD->hasAttr<BlocksAttr>()) return false;
// Variables with higher required alignment than their type's ABI
// alignment cannot use NRVO.
if (!VD->getType()->isDependentType() && VD->hasAttr<AlignedAttr>() &&

View File

@ -1014,6 +1014,11 @@ ExprResult Sema::ActOnCXXFoldExpr(SourceLocation LParenLoc, Expr *LHS,
CheckFoldOperand(*this, LHS);
CheckFoldOperand(*this, RHS);
auto DiscardOperands = [&] {
CorrectDelayedTyposInExpr(LHS);
CorrectDelayedTyposInExpr(RHS);
};
// [expr.prim.fold]p3:
// In a binary fold, op1 and op2 shall be the same fold-operator, and
// either e1 shall contain an unexpanded parameter pack or e2 shall contain
@ -1021,6 +1026,7 @@ ExprResult Sema::ActOnCXXFoldExpr(SourceLocation LParenLoc, Expr *LHS,
if (LHS && RHS &&
LHS->containsUnexpandedParameterPack() ==
RHS->containsUnexpandedParameterPack()) {
DiscardOperands();
return Diag(EllipsisLoc,
LHS->containsUnexpandedParameterPack()
? diag::err_fold_expression_packs_both_sides
@ -1034,6 +1040,7 @@ ExprResult Sema::ActOnCXXFoldExpr(SourceLocation LParenLoc, Expr *LHS,
if (!LHS || !RHS) {
Expr *Pack = LHS ? LHS : RHS;
assert(Pack && "fold expression with neither LHS nor RHS");
DiscardOperands();
if (!Pack->containsUnexpandedParameterPack())
return Diag(EllipsisLoc, diag::err_pack_expansion_without_parameter_packs)
<< Pack->getSourceRange();

View File

@ -8,4 +8,4 @@
#define CLANG_VENDOR "FreeBSD "
#define SVN_REVISION "294803"
#define SVN_REVISION "295380"

View File

@ -4,5 +4,5 @@
#define LLD_VERSION_STRING "4.0.0"
#define LLD_VERSION_MAJOR 4
#define LLD_VERSION_MINOR 0
#define LLD_REVISION_STRING "294803"
#define LLD_REVISION_STRING "295380"
#define LLD_REPOSITORY_STRING "FreeBSD"