diff --git a/contrib/libc++/include/__bsd_locale_defaults.h b/contrib/libc++/include/__bsd_locale_defaults.h index f315ca2949e3..cbc407d10305 100644 --- a/contrib/libc++/include/__bsd_locale_defaults.h +++ b/contrib/libc++/include/__bsd_locale_defaults.h @@ -15,6 +15,10 @@ #ifndef _LIBCPP_BSD_LOCALE_DEFAULTS_H #define _LIBCPP_BSD_LOCALE_DEFAULTS_H +#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER) +#pragma GCC system_header +#endif + #define __libcpp_mb_cur_max_l(loc) MB_CUR_MAX_L(loc) #define __libcpp_btowc_l(ch, loc) btowc_l(ch, loc) #define __libcpp_wctob_l(wch, loc) wctob_l(wch, loc) diff --git a/contrib/libc++/include/__bsd_locale_fallbacks.h b/contrib/libc++/include/__bsd_locale_fallbacks.h index 9489452905c1..5e9e09483072 100644 --- a/contrib/libc++/include/__bsd_locale_fallbacks.h +++ b/contrib/libc++/include/__bsd_locale_fallbacks.h @@ -18,6 +18,10 @@ #include #include +#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER) +#pragma GCC system_header +#endif + _LIBCPP_BEGIN_NAMESPACE_STD inline _LIBCPP_ALWAYS_INLINE diff --git a/contrib/libc++/include/__locale b/contrib/libc++/include/__locale index 766842294ca3..91ed9e709ee3 100644 --- a/contrib/libc++/include/__locale +++ b/contrib/libc++/include/__locale @@ -34,7 +34,7 @@ # include #elif defined(_NEWLIB_VERSION) # include -#elif (defined(__GLIBC__) || defined(__APPLE__) || defined(__FreeBSD__) \ +#elif (defined(__APPLE__) || defined(__FreeBSD__) \ || defined(__EMSCRIPTEN__) || defined(__IBMCPP__)) # include #elif defined(__Fuchsia__) diff --git a/contrib/libc++/include/mutex b/contrib/libc++/include/mutex index 1557ed8770d7..fbcc0989fa98 100644 --- a/contrib/libc++/include/mutex +++ b/contrib/libc++/include/mutex @@ -116,7 +116,7 @@ public: using mutex_type = Mutex; // If MutexTypes... consists of the single type Mutex explicit scoped_lock(MutexTypes&... m); - scoped_lock(MutexTypes&... m, adopt_lock_t); + scoped_lock(adopt_lock_t, MutexTypes&... m); ~scoped_lock(); scoped_lock(scoped_lock const&) = delete; scoped_lock& operator=(scoped_lock const&) = delete; @@ -500,7 +500,7 @@ public: ~scoped_lock() _LIBCPP_THREAD_SAFETY_ANNOTATION(release_capability()) {__m_.unlock();} _LIBCPP_INLINE_VISIBILITY - explicit scoped_lock(mutex_type& __m, adopt_lock_t) _LIBCPP_THREAD_SAFETY_ANNOTATION(requires_capability(__m)) + explicit scoped_lock(adopt_lock_t, mutex_type& __m) _LIBCPP_THREAD_SAFETY_ANNOTATION(requires_capability(__m)) : __m_(__m) {} scoped_lock(scoped_lock const&) = delete; @@ -522,7 +522,7 @@ public: } _LIBCPP_INLINE_VISIBILITY - scoped_lock(_MArgs&... __margs, adopt_lock_t) + scoped_lock(adopt_lock_t, _MArgs&... __margs) : __t_(__margs...) { } diff --git a/contrib/libc++/include/sstream b/contrib/libc++/include/sstream index b9903f961823..fe65fd7db53d 100644 --- a/contrib/libc++/include/sstream +++ b/contrib/libc++/include/sstream @@ -249,7 +249,8 @@ basic_stringbuf<_CharT, _Traits, _Allocator>::basic_stringbuf(ios_base::openmode template basic_stringbuf<_CharT, _Traits, _Allocator>::basic_stringbuf(const string_type& __s, ios_base::openmode __wch) - : __hm_(0), + : __str_(__s.get_allocator()), + __hm_(0), __mode_(__wch) { str(__s); diff --git a/contrib/llvm/include/llvm/Analysis/ValueTracking.h b/contrib/llvm/include/llvm/Analysis/ValueTracking.h index f4c57d4289fc..da058b1d3918 100644 --- a/contrib/llvm/include/llvm/Analysis/ValueTracking.h +++ b/contrib/llvm/include/llvm/Analysis/ValueTracking.h @@ -312,6 +312,12 @@ template class ArrayRef; const DataLayout &DL, LoopInfo *LI = nullptr, unsigned MaxLookup = 6); + /// This is a wrapper around GetUnderlyingObjects and adds support for basic + /// ptrtoint+arithmetic+inttoptr sequences. + void getUnderlyingObjectsForCodeGen(const Value *V, + SmallVectorImpl &Objects, + const DataLayout &DL); + /// Return true if the only users of this pointer are lifetime markers. bool onlyUsedByLifetimeMarkers(const Value *V); diff --git a/contrib/llvm/include/llvm/CodeGen/MachineFunction.h b/contrib/llvm/include/llvm/CodeGen/MachineFunction.h index 19173fa39bdc..010d7032c516 100644 --- a/contrib/llvm/include/llvm/CodeGen/MachineFunction.h +++ b/contrib/llvm/include/llvm/CodeGen/MachineFunction.h @@ -661,6 +661,12 @@ class MachineFunction { MachineMemOperand *getMachineMemOperand(const MachineMemOperand *MMO, int64_t Offset, uint64_t Size); + /// Allocate a new MachineMemOperand by copying an existing one, + /// replacing only AliasAnalysis information. MachineMemOperands are owned + /// by the MachineFunction and need not be explicitly deallocated. + MachineMemOperand *getMachineMemOperand(const MachineMemOperand *MMO, + const AAMDNodes &AAInfo); + using OperandCapacity = ArrayRecycler::Capacity; /// Allocate an array of MachineOperands. This is only intended for use by diff --git a/contrib/llvm/include/llvm/CodeGen/MachineInstr.h b/contrib/llvm/include/llvm/CodeGen/MachineInstr.h index 95401e98b297..b87aff102d47 100644 --- a/contrib/llvm/include/llvm/CodeGen/MachineInstr.h +++ b/contrib/llvm/include/llvm/CodeGen/MachineInstr.h @@ -379,6 +379,9 @@ class MachineInstr return NumMemRefs == 1; } + /// Return the number of memory operands. + unsigned getNumMemOperands() const { return NumMemRefs; } + /// API for querying MachineInstr properties. They are the same as MCInstrDesc /// queries but they are bundle aware. diff --git a/contrib/llvm/lib/Analysis/ValueTracking.cpp b/contrib/llvm/lib/Analysis/ValueTracking.cpp index 9e042da8801d..439b21a81258 100644 --- a/contrib/llvm/lib/Analysis/ValueTracking.cpp +++ b/contrib/llvm/lib/Analysis/ValueTracking.cpp @@ -3277,6 +3277,69 @@ void llvm::GetUnderlyingObjects(Value *V, SmallVectorImpl &Objects, } while (!Worklist.empty()); } +/// This is the function that does the work of looking through basic +/// ptrtoint+arithmetic+inttoptr sequences. +static const Value *getUnderlyingObjectFromInt(const Value *V) { + do { + if (const Operator *U = dyn_cast(V)) { + // If we find a ptrtoint, we can transfer control back to the + // regular getUnderlyingObjectFromInt. + if (U->getOpcode() == Instruction::PtrToInt) + return U->getOperand(0); + // If we find an add of a constant, a multiplied value, or a phi, it's + // likely that the other operand will lead us to the base + // object. We don't have to worry about the case where the + // object address is somehow being computed by the multiply, + // because our callers only care when the result is an + // identifiable object. + if (U->getOpcode() != Instruction::Add || + (!isa(U->getOperand(1)) && + Operator::getOpcode(U->getOperand(1)) != Instruction::Mul && + !isa(U->getOperand(1)))) + return V; + V = U->getOperand(0); + } else { + return V; + } + assert(V->getType()->isIntegerTy() && "Unexpected operand type!"); + } while (true); +} + +/// This is a wrapper around GetUnderlyingObjects and adds support for basic +/// ptrtoint+arithmetic+inttoptr sequences. +void llvm::getUnderlyingObjectsForCodeGen(const Value *V, + SmallVectorImpl &Objects, + const DataLayout &DL) { + SmallPtrSet Visited; + SmallVector Working(1, V); + do { + V = Working.pop_back_val(); + + SmallVector Objs; + GetUnderlyingObjects(const_cast(V), Objs, DL); + + for (Value *V : Objs) { + if (!Visited.insert(V).second) + continue; + if (Operator::getOpcode(V) == Instruction::IntToPtr) { + const Value *O = + getUnderlyingObjectFromInt(cast(V)->getOperand(0)); + if (O->getType()->isPointerTy()) { + Working.push_back(O); + continue; + } + } + // If GetUnderlyingObjects fails to find an identifiable object, + // getUnderlyingObjectsForCodeGen also fails for safety. + if (!isIdentifiedObject(V)) { + Objects.clear(); + return; + } + Objects.push_back(const_cast(V)); + } + } while (!Working.empty()); +} + /// Return true if the only users of this pointer are lifetime markers. bool llvm::onlyUsedByLifetimeMarkers(const Value *V) { for (const User *U : V->users()) { diff --git a/contrib/llvm/lib/CodeGen/BranchFolding.cpp b/contrib/llvm/lib/CodeGen/BranchFolding.cpp index 530954976292..3c439e66944b 100644 --- a/contrib/llvm/lib/CodeGen/BranchFolding.cpp +++ b/contrib/llvm/lib/CodeGen/BranchFolding.cpp @@ -1475,13 +1475,14 @@ bool BranchFolder::OptimizeBlock(MachineBasicBlock *MBB) { bool PredAnalyzable = !TII->analyzeBranch(*Pred, PredTBB, PredFBB, PredCond, true); - if (PredAnalyzable && !PredCond.empty() && PredTBB == MBB) { + if (PredAnalyzable && !PredCond.empty() && PredTBB == MBB && + PredTBB != PredFBB) { // The predecessor has a conditional branch to this block which consists // of only a tail call. Try to fold the tail call into the conditional // branch. if (TII->canMakeTailCallConditional(PredCond, TailCall)) { // TODO: It would be nice if analyzeBranch() could provide a pointer - // to the branch insturction so replaceBranchWithTailCall() doesn't + // to the branch instruction so replaceBranchWithTailCall() doesn't // have to search for it. TII->replaceBranchWithTailCall(*Pred, PredCond, TailCall); ++NumTailCalls; diff --git a/contrib/llvm/lib/CodeGen/MachineFunction.cpp b/contrib/llvm/lib/CodeGen/MachineFunction.cpp index f88e175a9776..742b095d955e 100644 --- a/contrib/llvm/lib/CodeGen/MachineFunction.cpp +++ b/contrib/llvm/lib/CodeGen/MachineFunction.cpp @@ -330,6 +330,20 @@ MachineFunction::getMachineMemOperand(const MachineMemOperand *MMO, MMO->getOrdering(), MMO->getFailureOrdering()); } +MachineMemOperand * +MachineFunction::getMachineMemOperand(const MachineMemOperand *MMO, + const AAMDNodes &AAInfo) { + MachinePointerInfo MPI = MMO->getValue() ? + MachinePointerInfo(MMO->getValue(), MMO->getOffset()) : + MachinePointerInfo(MMO->getPseudoValue(), MMO->getOffset()); + + return new (Allocator) + MachineMemOperand(MPI, MMO->getFlags(), MMO->getSize(), + MMO->getBaseAlignment(), AAInfo, + MMO->getRanges(), MMO->getSyncScopeID(), + MMO->getOrdering(), MMO->getFailureOrdering()); +} + MachineInstr::mmo_iterator MachineFunction::allocateMemRefsArray(unsigned long Num) { return Allocator.Allocate(Num); diff --git a/contrib/llvm/lib/CodeGen/MachineInstr.cpp b/contrib/llvm/lib/CodeGen/MachineInstr.cpp index afea5575a3ae..535757ed87c1 100644 --- a/contrib/llvm/lib/CodeGen/MachineInstr.cpp +++ b/contrib/llvm/lib/CodeGen/MachineInstr.cpp @@ -578,10 +578,8 @@ bool MachinePointerInfo::isDereferenceable(unsigned Size, LLVMContext &C, if (BasePtr == nullptr) return false; - return isDereferenceableAndAlignedPointer(BasePtr, 1, - APInt(DL.getPointerSize(), - Offset + Size), - DL); + return isDereferenceableAndAlignedPointer( + BasePtr, 1, APInt(DL.getPointerSizeInBits(), Offset + Size), DL); } /// getConstantPool - Return a MachinePointerInfo record that refers to the diff --git a/contrib/llvm/lib/CodeGen/ScheduleDAGInstrs.cpp b/contrib/llvm/lib/CodeGen/ScheduleDAGInstrs.cpp index ccd937950a74..99baa07390eb 100644 --- a/contrib/llvm/lib/CodeGen/ScheduleDAGInstrs.cpp +++ b/contrib/llvm/lib/CodeGen/ScheduleDAGInstrs.cpp @@ -121,63 +121,6 @@ ScheduleDAGInstrs::ScheduleDAGInstrs(MachineFunction &mf, SchedModel.init(ST.getSchedModel(), &ST, TII); } -/// This is the function that does the work of looking through basic -/// ptrtoint+arithmetic+inttoptr sequences. -static const Value *getUnderlyingObjectFromInt(const Value *V) { - do { - if (const Operator *U = dyn_cast(V)) { - // If we find a ptrtoint, we can transfer control back to the - // regular getUnderlyingObjectFromInt. - if (U->getOpcode() == Instruction::PtrToInt) - return U->getOperand(0); - // If we find an add of a constant, a multiplied value, or a phi, it's - // likely that the other operand will lead us to the base - // object. We don't have to worry about the case where the - // object address is somehow being computed by the multiply, - // because our callers only care when the result is an - // identifiable object. - if (U->getOpcode() != Instruction::Add || - (!isa(U->getOperand(1)) && - Operator::getOpcode(U->getOperand(1)) != Instruction::Mul && - !isa(U->getOperand(1)))) - return V; - V = U->getOperand(0); - } else { - return V; - } - assert(V->getType()->isIntegerTy() && "Unexpected operand type!"); - } while (true); -} - -/// This is a wrapper around GetUnderlyingObjects and adds support for basic -/// ptrtoint+arithmetic+inttoptr sequences. -static void getUnderlyingObjects(const Value *V, - SmallVectorImpl &Objects, - const DataLayout &DL) { - SmallPtrSet Visited; - SmallVector Working(1, V); - do { - V = Working.pop_back_val(); - - SmallVector Objs; - GetUnderlyingObjects(const_cast(V), Objs, DL); - - for (Value *V : Objs) { - if (!Visited.insert(V).second) - continue; - if (Operator::getOpcode(V) == Instruction::IntToPtr) { - const Value *O = - getUnderlyingObjectFromInt(cast(V)->getOperand(0)); - if (O->getType()->isPointerTy()) { - Working.push_back(O); - continue; - } - } - Objects.push_back(const_cast(V)); - } - } while (!Working.empty()); -} - /// If this machine instr has memory reference information and it can be tracked /// to a normal reference to a known object, return the Value for that object. static void getUnderlyingObjectsForInstr(const MachineInstr *MI, @@ -208,12 +151,10 @@ static void getUnderlyingObjectsForInstr(const MachineInstr *MI, Objects.push_back(UnderlyingObjectsVector::value_type(PSV, MayAlias)); } else if (const Value *V = MMO->getValue()) { SmallVector Objs; - getUnderlyingObjects(V, Objs, DL); + getUnderlyingObjectsForCodeGen(V, Objs, DL); for (Value *V : Objs) { - if (!isIdentifiedObject(V)) - return false; - + assert(isIdentifiedObject(V)); Objects.push_back(UnderlyingObjectsVector::value_type(V, true)); } } else diff --git a/contrib/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp b/contrib/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp index 41c3f5f235ea..127312076207 100644 --- a/contrib/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp +++ b/contrib/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp @@ -99,6 +99,27 @@ LimitFPPrecision("limit-float-precision", // store [4096 x i8] %data, [4096 x i8]* %buffer static const unsigned MaxParallelChains = 64; +// True if the Value passed requires ABI mangling as it is a parameter to a +// function or a return value from a function which is not an intrinsic. +static bool isABIRegCopy(const Value * V) { + const bool IsRetInst = V && isa(V); + const bool IsCallInst = V && isa(V); + const bool IsInLineAsm = + IsCallInst && static_cast(V)->isInlineAsm(); + const bool IsIndirectFunctionCall = + IsCallInst && !IsInLineAsm && + !static_cast(V)->getCalledFunction(); + // It is possible that the call instruction is an inline asm statement or an + // indirect function call in which case the return value of + // getCalledFunction() would be nullptr. + const bool IsInstrinsicCall = + IsCallInst && !IsInLineAsm && !IsIndirectFunctionCall && + static_cast(V)->getCalledFunction()->getIntrinsicID() != + Intrinsic::not_intrinsic; + + return IsRetInst || (IsCallInst && (!IsInLineAsm && !IsInstrinsicCall)); +} + static SDValue getCopyFromPartsVector(SelectionDAG &DAG, const SDLoc &DL, const SDValue *Parts, unsigned NumParts, MVT PartVT, EVT ValueVT, const Value *V, @@ -1026,13 +1047,9 @@ SDValue SelectionDAGBuilder::getCopyFromRegs(const Value *V, Type *Ty) { if (It != FuncInfo.ValueMap.end()) { unsigned InReg = It->second; - bool IsABIRegCopy = - V && ((isa(V) && - !(static_cast(V))->isInlineAsm()) || - isa(V)); RegsForValue RFV(*DAG.getContext(), DAG.getTargetLoweringInfo(), - DAG.getDataLayout(), InReg, Ty, IsABIRegCopy); + DAG.getDataLayout(), InReg, Ty, isABIRegCopy(V)); SDValue Chain = DAG.getEntryNode(); Result = RFV.getCopyFromRegs(DAG, FuncInfo, getCurSDLoc(), Chain, nullptr, V); @@ -1221,13 +1238,9 @@ SDValue SelectionDAGBuilder::getValueImpl(const Value *V) { // If this is an instruction which fast-isel has deferred, select it now. if (const Instruction *Inst = dyn_cast(V)) { unsigned InReg = FuncInfo.InitializeRegForValue(Inst); - bool IsABIRegCopy = - V && ((isa(V) && - !(static_cast(V))->isInlineAsm()) || - isa(V)); RegsForValue RFV(*DAG.getContext(), TLI, DAG.getDataLayout(), InReg, - Inst->getType(), IsABIRegCopy); + Inst->getType(), isABIRegCopy(V)); SDValue Chain = DAG.getEntryNode(); return RFV.getCopyFromRegs(DAG, FuncInfo, getCurSDLoc(), Chain, nullptr, V); } @@ -8281,13 +8294,9 @@ SelectionDAGBuilder::CopyValueToVirtualRegister(const Value *V, unsigned Reg) { const TargetLowering &TLI = DAG.getTargetLoweringInfo(); // If this is an InlineAsm we have to match the registers required, not the // notional registers required by the type. - bool IsABIRegCopy = - V && ((isa(V) && - !(static_cast(V))->isInlineAsm()) || - isa(V)); RegsForValue RFV(V->getContext(), TLI, DAG.getDataLayout(), Reg, - V->getType(), IsABIRegCopy); + V->getType(), isABIRegCopy(V)); SDValue Chain = DAG.getEntryNode(); ISD::NodeType ExtendType = (FuncInfo.PreferredExtendType.find(V) == diff --git a/contrib/llvm/lib/CodeGen/StackColoring.cpp b/contrib/llvm/lib/CodeGen/StackColoring.cpp index 6bac39c7ee77..e5fc5402cb41 100644 --- a/contrib/llvm/lib/CodeGen/StackColoring.cpp +++ b/contrib/llvm/lib/CodeGen/StackColoring.cpp @@ -37,6 +37,7 @@ #include "llvm/CodeGen/MachineRegisterInfo.h" #include "llvm/CodeGen/Passes.h" #include "llvm/CodeGen/PseudoSourceValue.h" +#include "llvm/CodeGen/SelectionDAGNodes.h" #include "llvm/CodeGen/SlotIndexes.h" #include "llvm/CodeGen/StackProtector.h" #include "llvm/CodeGen/WinEHFuncInfo.h" @@ -889,6 +890,10 @@ void StackColoring::remapInstructions(DenseMap &SlotRemap) { // Keep a list of *allocas* which need to be remapped. DenseMap Allocas; + + // Keep a list of allocas which has been affected by the remap. + SmallPtrSet MergedAllocas; + for (const std::pair &SI : SlotRemap) { const AllocaInst *From = MFI->getObjectAllocation(SI.first); const AllocaInst *To = MFI->getObjectAllocation(SI.second); @@ -908,6 +913,10 @@ void StackColoring::remapInstructions(DenseMap &SlotRemap) { Inst = Cast; } + // We keep both slots to maintain AliasAnalysis metadata later. + MergedAllocas.insert(From); + MergedAllocas.insert(To); + // Allow the stack protector to adjust its value map to account for the // upcoming replacement. SP->adjustForColoring(From, To); @@ -939,13 +948,6 @@ void StackColoring::remapInstructions(DenseMap &SlotRemap) { // Update the MachineMemOperand to use the new alloca. for (MachineMemOperand *MMO : I.memoperands()) { - // FIXME: In order to enable the use of TBAA when using AA in CodeGen, - // we'll also need to update the TBAA nodes in MMOs with values - // derived from the merged allocas. When doing this, we'll need to use - // the same variant of GetUnderlyingObjects that is used by the - // instruction scheduler (that can look through ptrtoint/inttoptr - // pairs). - // We've replaced IR-level uses of the remapped allocas, so we only // need to replace direct uses here. const AllocaInst *AI = dyn_cast_or_null(MMO->getValue()); @@ -997,6 +999,48 @@ void StackColoring::remapInstructions(DenseMap &SlotRemap) { MO.setIndex(ToSlot); FixedInstr++; } + + // We adjust AliasAnalysis information for merged stack slots. + MachineSDNode::mmo_iterator NewMemOps = + MF->allocateMemRefsArray(I.getNumMemOperands()); + unsigned MemOpIdx = 0; + bool ReplaceMemOps = false; + for (MachineMemOperand *MMO : I.memoperands()) { + // If this memory location can be a slot remapped here, + // we remove AA information. + bool MayHaveConflictingAAMD = false; + if (MMO->getAAInfo()) { + if (const Value *MMOV = MMO->getValue()) { + SmallVector Objs; + getUnderlyingObjectsForCodeGen(MMOV, Objs, MF->getDataLayout()); + + if (Objs.empty()) + MayHaveConflictingAAMD = true; + else + for (Value *V : Objs) { + // If this memory location comes from a known stack slot + // that is not remapped, we continue checking. + // Otherwise, we need to invalidate AA infomation. + const AllocaInst *AI = dyn_cast_or_null(V); + if (AI && MergedAllocas.count(AI)) { + MayHaveConflictingAAMD = true; + break; + } + } + } + } + if (MayHaveConflictingAAMD) { + NewMemOps[MemOpIdx++] = MF->getMachineMemOperand(MMO, AAMDNodes()); + ReplaceMemOps = true; + } + else + NewMemOps[MemOpIdx++] = MMO; + } + + // If any memory operand is updated, set memory references of + // this instruction. + if (ReplaceMemOps) + I.setMemRefs(std::make_pair(NewMemOps, I.getNumMemOperands())); } // Update the location of C++ catch objects for the MSVC personality routine. diff --git a/contrib/llvm/lib/IR/ConstantFold.cpp b/contrib/llvm/lib/IR/ConstantFold.cpp index 23ccd8d4cf42..311b0a76ce8a 100644 --- a/contrib/llvm/lib/IR/ConstantFold.cpp +++ b/contrib/llvm/lib/IR/ConstantFold.cpp @@ -2097,15 +2097,19 @@ Constant *llvm::ConstantFoldGetElementPtr(Type *PointeeTy, Constant *C, // Subsequent evaluation would get confused and produce erroneous results. // // The following prohibits such a GEP from being formed by checking to see - // if the index is in-range with respect to an array or vector. + // if the index is in-range with respect to an array. + // TODO: This code may be extended to handle vectors as well. bool PerformFold = false; if (Idx0->isNullValue()) PerformFold = true; else if (LastI.isSequential()) if (ConstantInt *CI = dyn_cast(Idx0)) - PerformFold = - !LastI.isBoundedSequential() || - isIndexInRangeOfArrayType(LastI.getSequentialNumElements(), CI); + PerformFold = (!LastI.isBoundedSequential() || + isIndexInRangeOfArrayType( + LastI.getSequentialNumElements(), CI)) && + !CE->getOperand(CE->getNumOperands() - 1) + ->getType() + ->isVectorTy(); if (PerformFold) { SmallVector NewIndices; diff --git a/contrib/llvm/lib/Object/COFFImportFile.cpp b/contrib/llvm/lib/Object/COFFImportFile.cpp index d1f46fdfa292..a515bc8ad16d 100644 --- a/contrib/llvm/lib/Object/COFFImportFile.cpp +++ b/contrib/llvm/lib/Object/COFFImportFile.cpp @@ -542,15 +542,12 @@ NewArchiveMember ObjectFactory::createWeakExternal(StringRef Sym, SymbolTable[2].Name.Offset.Offset = sizeof(uint32_t); //__imp_ String Table - if (Imp) { - SymbolTable[3].Name.Offset.Offset = sizeof(uint32_t) + Sym.size() + 7; - writeStringTable(Buffer, {std::string("__imp_").append(Sym), - std::string("__imp_").append(Weak)}); - } else { - SymbolTable[3].Name.Offset.Offset = sizeof(uint32_t) + Sym.size() + 1; - writeStringTable(Buffer, {Sym, Weak}); - } + StringRef Prefix = Imp ? "__imp_" : ""; + SymbolTable[3].Name.Offset.Offset = + sizeof(uint32_t) + Sym.size() + Prefix.size() + 1; append(Buffer, SymbolTable); + writeStringTable(Buffer, {(Prefix + Sym).str(), + (Prefix + Weak).str()}); // Copied here so we can still use writeStringTable char *Buf = Alloc.Allocate(Buffer.size()); diff --git a/contrib/llvm/lib/Target/AArch64/AArch64ExpandPseudoInsts.cpp b/contrib/llvm/lib/Target/AArch64/AArch64ExpandPseudoInsts.cpp index 160107cd7e2b..d52cd84246a1 100644 --- a/contrib/llvm/lib/Target/AArch64/AArch64ExpandPseudoInsts.cpp +++ b/contrib/llvm/lib/Target/AArch64/AArch64ExpandPseudoInsts.cpp @@ -946,6 +946,18 @@ bool AArch64ExpandPseudo::expandMI(MachineBasicBlock &MBB, case AArch64::CMP_SWAP_128: return expandCMP_SWAP_128(MBB, MBBI, NextMBBI); + case AArch64::AESMCrrTied: + case AArch64::AESIMCrrTied: { + MachineInstrBuilder MIB = + BuildMI(MBB, MBBI, MI.getDebugLoc(), + TII->get(Opcode == AArch64::AESMCrrTied ? AArch64::AESMCrr : + AArch64::AESIMCrr)) + .add(MI.getOperand(0)) + .add(MI.getOperand(1)); + transferImpOps(MI, MIB, MIB); + MI.eraseFromParent(); + return true; + } } return false; } diff --git a/contrib/llvm/lib/Target/AArch64/AArch64FrameLowering.cpp b/contrib/llvm/lib/Target/AArch64/AArch64FrameLowering.cpp index 4907d082eda0..7c6a99990406 100644 --- a/contrib/llvm/lib/Target/AArch64/AArch64FrameLowering.cpp +++ b/contrib/llvm/lib/Target/AArch64/AArch64FrameLowering.cpp @@ -506,19 +506,23 @@ void AArch64FrameLowering::emitPrologue(MachineFunction &MF, return; } - auto CSStackSize = AFI->getCalleeSavedStackSize(); + bool IsWin64 = + Subtarget.isCallingConvWin64(MF.getFunction()->getCallingConv()); + unsigned FixedObject = IsWin64 ? alignTo(AFI->getVarArgsGPRSize(), 16) : 0; + + auto PrologueSaveSize = AFI->getCalleeSavedStackSize() + FixedObject; // All of the remaining stack allocations are for locals. - AFI->setLocalStackSize(NumBytes - CSStackSize); + AFI->setLocalStackSize(NumBytes - PrologueSaveSize); bool CombineSPBump = shouldCombineCSRLocalStackBump(MF, NumBytes); if (CombineSPBump) { emitFrameOffset(MBB, MBBI, DL, AArch64::SP, AArch64::SP, -NumBytes, TII, MachineInstr::FrameSetup); NumBytes = 0; - } else if (CSStackSize != 0) { + } else if (PrologueSaveSize != 0) { MBBI = convertCalleeSaveRestoreToSPPrePostIncDec(MBB, MBBI, DL, TII, - -CSStackSize); - NumBytes -= CSStackSize; + -PrologueSaveSize); + NumBytes -= PrologueSaveSize; } assert(NumBytes >= 0 && "Negative stack allocation size!?"); @@ -532,8 +536,9 @@ void AArch64FrameLowering::emitPrologue(MachineFunction &MF, ++MBBI; } if (HasFP) { - // Only set up FP if we actually need to. Frame pointer is fp = sp - 16. - int FPOffset = CSStackSize - 16; + // Only set up FP if we actually need to. Frame pointer is fp = + // sp - fixedobject - 16. + int FPOffset = AFI->getCalleeSavedStackSize() - 16; if (CombineSPBump) FPOffset += AFI->getLocalStackSize(); @@ -672,8 +677,8 @@ void AArch64FrameLowering::emitPrologue(MachineFunction &MF, if (HasFP) { // Define the current CFA rule to use the provided FP. unsigned Reg = RegInfo->getDwarfRegNum(FramePtr, true); - unsigned CFIIndex = MF.addFrameInst( - MCCFIInstruction::createDefCfa(nullptr, Reg, 2 * StackGrowth)); + unsigned CFIIndex = MF.addFrameInst(MCCFIInstruction::createDefCfa( + nullptr, Reg, 2 * StackGrowth - FixedObject)); BuildMI(MBB, MBBI, DL, TII->get(TargetOpcode::CFI_INSTRUCTION)) .addCFIIndex(CFIIndex) .setMIFlags(MachineInstr::FrameSetup); @@ -759,12 +764,16 @@ void AArch64FrameLowering::emitEpilogue(MachineFunction &MF, // AArch64TargetLowering::LowerCall figures out ArgumentPopSize and keeps // it as the 2nd argument of AArch64ISD::TC_RETURN. - auto CSStackSize = AFI->getCalleeSavedStackSize(); + bool IsWin64 = + Subtarget.isCallingConvWin64(MF.getFunction()->getCallingConv()); + unsigned FixedObject = IsWin64 ? alignTo(AFI->getVarArgsGPRSize(), 16) : 0; + + auto PrologueSaveSize = AFI->getCalleeSavedStackSize() + FixedObject; bool CombineSPBump = shouldCombineCSRLocalStackBump(MF, NumBytes); - if (!CombineSPBump && CSStackSize != 0) + if (!CombineSPBump && PrologueSaveSize != 0) convertCalleeSaveRestoreToSPPrePostIncDec( - MBB, std::prev(MBB.getFirstTerminator()), DL, TII, CSStackSize); + MBB, std::prev(MBB.getFirstTerminator()), DL, TII, PrologueSaveSize); // Move past the restores of the callee-saved registers. MachineBasicBlock::iterator LastPopI = MBB.getFirstTerminator(); @@ -786,7 +795,7 @@ void AArch64FrameLowering::emitEpilogue(MachineFunction &MF, return; } - NumBytes -= CSStackSize; + NumBytes -= PrologueSaveSize; assert(NumBytes >= 0 && "Negative stack allocation size!?"); if (!hasFP(MF)) { @@ -796,7 +805,7 @@ void AArch64FrameLowering::emitEpilogue(MachineFunction &MF, if (RedZone && ArgumentPopSize == 0) return; - bool NoCalleeSaveRestore = CSStackSize == 0; + bool NoCalleeSaveRestore = PrologueSaveSize == 0; int StackRestoreBytes = RedZone ? 0 : NumBytes; if (NoCalleeSaveRestore) StackRestoreBytes += ArgumentPopSize; @@ -815,7 +824,8 @@ void AArch64FrameLowering::emitEpilogue(MachineFunction &MF, // be able to save any instructions. if (MFI.hasVarSizedObjects() || AFI->isStackRealigned()) emitFrameOffset(MBB, LastPopI, DL, AArch64::SP, AArch64::FP, - -CSStackSize + 16, TII, MachineInstr::FrameDestroy); + -AFI->getCalleeSavedStackSize() + 16, TII, + MachineInstr::FrameDestroy); else if (NumBytes) emitFrameOffset(MBB, LastPopI, DL, AArch64::SP, AArch64::SP, NumBytes, TII, MachineInstr::FrameDestroy); @@ -845,7 +855,11 @@ int AArch64FrameLowering::resolveFrameIndexReference(const MachineFunction &MF, const AArch64RegisterInfo *RegInfo = static_cast( MF.getSubtarget().getRegisterInfo()); const AArch64FunctionInfo *AFI = MF.getInfo(); - int FPOffset = MFI.getObjectOffset(FI) + 16; + const AArch64Subtarget &Subtarget = MF.getSubtarget(); + bool IsWin64 = + Subtarget.isCallingConvWin64(MF.getFunction()->getCallingConv()); + unsigned FixedObject = IsWin64 ? alignTo(AFI->getVarArgsGPRSize(), 16) : 0; + int FPOffset = MFI.getObjectOffset(FI) + FixedObject + 16; int Offset = MFI.getObjectOffset(FI) + MFI.getStackSize(); bool isFixed = MFI.isFixedObjectIndex(FI); @@ -956,12 +970,6 @@ static void computeCalleeSaveRegisterPairs( "Odd number of callee-saved regs to spill!"); int Offset = AFI->getCalleeSavedStackSize(); - unsigned GPRSaveSize = AFI->getVarArgsGPRSize(); - const AArch64Subtarget &Subtarget = MF.getSubtarget(); - bool IsWin64 = Subtarget.isCallingConvWin64(MF.getFunction()->getCallingConv()); - if (IsWin64) - Offset -= alignTo(GPRSaveSize, 16); - for (unsigned i = 0; i < Count; ++i) { RegPairInfo RPI; RPI.Reg1 = CSI[i].getReg(); diff --git a/contrib/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/contrib/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp index 8c30c4410c09..9d879886d39d 100644 --- a/contrib/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp +++ b/contrib/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp @@ -9586,8 +9586,8 @@ static bool performTBISimplification(SDValue Addr, SelectionDAG &DAG) { APInt DemandedMask = APInt::getLowBitsSet(64, 56); KnownBits Known; - TargetLowering::TargetLoweringOpt TLO(DAG, DCI.isBeforeLegalize(), - DCI.isBeforeLegalizeOps()); + TargetLowering::TargetLoweringOpt TLO(DAG, !DCI.isBeforeLegalize(), + !DCI.isBeforeLegalizeOps()); const TargetLowering &TLI = DAG.getTargetLoweringInfo(); if (TLI.SimplifyDemandedBits(Addr, DemandedMask, Known, TLO)) { DCI.CommitTargetLoweringOpt(TLO); diff --git a/contrib/llvm/lib/Target/AArch64/AArch64InstrInfo.td b/contrib/llvm/lib/Target/AArch64/AArch64InstrInfo.td index 0dcf07f98412..5049a39814f1 100644 --- a/contrib/llvm/lib/Target/AArch64/AArch64InstrInfo.td +++ b/contrib/llvm/lib/Target/AArch64/AArch64InstrInfo.td @@ -37,6 +37,9 @@ def HasFullFP16 : Predicate<"Subtarget->hasFullFP16()">, AssemblerPredicate<"FeatureFullFP16", "fullfp16">; def HasSPE : Predicate<"Subtarget->hasSPE()">, AssemblerPredicate<"FeatureSPE", "spe">; +def HasFuseAES : Predicate<"Subtarget->hasFuseAES()">, + AssemblerPredicate<"FeatureFuseAES", + "fuse-aes">; def HasSVE : Predicate<"Subtarget->hasSVE()">, AssemblerPredicate<"FeatureSVE", "sve">; @@ -5304,6 +5307,31 @@ def AESDrr : AESTiedInst<0b0101, "aesd", int_aarch64_crypto_aesd>; def AESMCrr : AESInst< 0b0110, "aesmc", int_aarch64_crypto_aesmc>; def AESIMCrr : AESInst< 0b0111, "aesimc", int_aarch64_crypto_aesimc>; +// Pseudo instructions for AESMCrr/AESIMCrr with a register constraint required +// for AES fusion on some CPUs. +let hasSideEffects = 0, mayStore = 0, mayLoad = 0 in { +def AESMCrrTied: Pseudo<(outs V128:$Rd), (ins V128:$Rn), [], "$Rn = $Rd">, + Sched<[WriteV]>; +def AESIMCrrTied: Pseudo<(outs V128:$Rd), (ins V128:$Rn), [], "$Rn = $Rd">, + Sched<[WriteV]>; +} + +// Only use constrained versions of AES(I)MC instructions if they are paired with +// AESE/AESD. +def : Pat<(v16i8 (int_aarch64_crypto_aesmc + (v16i8 (int_aarch64_crypto_aese (v16i8 V128:$src1), + (v16i8 V128:$src2))))), + (v16i8 (AESMCrrTied (v16i8 (AESErr (v16i8 V128:$src1), + (v16i8 V128:$src2)))))>, + Requires<[HasFuseAES]>; + +def : Pat<(v16i8 (int_aarch64_crypto_aesimc + (v16i8 (int_aarch64_crypto_aesd (v16i8 V128:$src1), + (v16i8 V128:$src2))))), + (v16i8 (AESIMCrrTied (v16i8 (AESDrr (v16i8 V128:$src1), + (v16i8 V128:$src2)))))>, + Requires<[HasFuseAES]>; + def SHA1Crrr : SHATiedInstQSV<0b000, "sha1c", int_aarch64_crypto_sha1c>; def SHA1Prrr : SHATiedInstQSV<0b001, "sha1p", int_aarch64_crypto_sha1p>; def SHA1Mrrr : SHATiedInstQSV<0b010, "sha1m", int_aarch64_crypto_sha1m>; diff --git a/contrib/llvm/lib/Target/AArch64/AArch64MacroFusion.cpp b/contrib/llvm/lib/Target/AArch64/AArch64MacroFusion.cpp index ccc9d2ad1b48..963cfadc54fd 100644 --- a/contrib/llvm/lib/Target/AArch64/AArch64MacroFusion.cpp +++ b/contrib/llvm/lib/Target/AArch64/AArch64MacroFusion.cpp @@ -118,11 +118,13 @@ static bool shouldScheduleAdjacent(const TargetInstrInfo &TII, // Fuse AES crypto operations. switch(SecondOpcode) { // AES encode. - case AArch64::AESMCrr : + case AArch64::AESMCrr: + case AArch64::AESMCrrTied: return FirstOpcode == AArch64::AESErr || FirstOpcode == AArch64::INSTRUCTION_LIST_END; // AES decode. case AArch64::AESIMCrr: + case AArch64::AESIMCrrTied: return FirstOpcode == AArch64::AESDrr || FirstOpcode == AArch64::INSTRUCTION_LIST_END; } diff --git a/contrib/llvm/lib/Target/X86/X86ISelLowering.cpp b/contrib/llvm/lib/Target/X86/X86ISelLowering.cpp index ba8eb8656585..7563bffd8f87 100644 --- a/contrib/llvm/lib/Target/X86/X86ISelLowering.cpp +++ b/contrib/llvm/lib/Target/X86/X86ISelLowering.cpp @@ -3984,6 +3984,13 @@ bool MatchingStackOffset(SDValue Arg, unsigned Offset, ISD::ArgFlagsTy Flags, if (Offset != MFI.getObjectOffset(FI)) return false; + // If this is not byval, check that the argument stack object is immutable. + // inalloca and argument copy elision can create mutable argument stack + // objects. Byval objects can be mutated, but a byval call intends to pass the + // mutated memory. + if (!Flags.isByVal() && !MFI.isImmutableObjectIndex(FI)) + return false; + if (VA.getLocVT().getSizeInBits() > Arg.getValueSizeInBits()) { // If the argument location is wider than the argument type, check that any // extension flags match. @@ -30605,8 +30612,8 @@ static SDValue combineSelect(SDNode *N, SelectionDAG &DAG, assert(BitWidth >= 8 && BitWidth <= 64 && "Invalid mask size"); APInt DemandedMask(APInt::getSignMask(BitWidth)); KnownBits Known; - TargetLowering::TargetLoweringOpt TLO(DAG, DCI.isBeforeLegalize(), - DCI.isBeforeLegalizeOps()); + TargetLowering::TargetLoweringOpt TLO(DAG, !DCI.isBeforeLegalize(), + !DCI.isBeforeLegalizeOps()); if (TLI.ShrinkDemandedConstant(Cond, DemandedMask, TLO) || TLI.SimplifyDemandedBits(Cond, DemandedMask, Known, TLO)) { // If we changed the computation somewhere in the DAG, this change will diff --git a/contrib/llvm/lib/Target/X86/X86InstrSSE.td b/contrib/llvm/lib/Target/X86/X86InstrSSE.td index fe87bbd99473..650e4fc8716c 100644 --- a/contrib/llvm/lib/Target/X86/X86InstrSSE.td +++ b/contrib/llvm/lib/Target/X86/X86InstrSSE.td @@ -3697,8 +3697,7 @@ let SchedRW = [WriteNop] in { // Pause. This "instruction" is encoded as "rep; nop", so even though it // was introduced with SSE2, it's backward compatible. def PAUSE : I<0x90, RawFrm, (outs), (ins), - "pause", [(int_x86_sse2_pause)], IIC_SSE_PAUSE>, - OBXS, Requires<[HasSSE2]>; + "pause", [(int_x86_sse2_pause)], IIC_SSE_PAUSE>, OBXS; } let SchedRW = [WriteFence] in { diff --git a/contrib/llvm/lib/Transforms/IPO/ArgumentPromotion.cpp b/contrib/llvm/lib/Transforms/IPO/ArgumentPromotion.cpp index 53223ab44316..72bae203ee94 100644 --- a/contrib/llvm/lib/Transforms/IPO/ArgumentPromotion.cpp +++ b/contrib/llvm/lib/Transforms/IPO/ArgumentPromotion.cpp @@ -356,7 +356,7 @@ doPromotion(Function *F, SmallPtrSetImpl &ArgsToPromote, // Just add all the struct element types. Type *AgTy = cast(I->getType())->getElementType(); Value *TheAlloca = new AllocaInst(AgTy, DL.getAllocaAddrSpace(), nullptr, - "", InsertPt); + I->getParamAlignment(), "", InsertPt); StructType *STy = cast(AgTy); Value *Idxs[2] = {ConstantInt::get(Type::getInt32Ty(F->getContext()), 0), nullptr}; diff --git a/contrib/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp b/contrib/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp index 057f746e052d..f8d255273b2a 100644 --- a/contrib/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp +++ b/contrib/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp @@ -756,7 +756,8 @@ struct FunctionStackPoisoner : public InstVisitor { bool runOnFunction() { if (!ClStack) return false; - if (ClRedzoneByvalArgs) copyArgsPassedByValToAllocas(); + if (ClRedzoneByvalArgs && Mapping.Offset != kDynamicShadowSentinel) + copyArgsPassedByValToAllocas(); // Collect alloca, ret, lifetime instructions etc. for (BasicBlock *BB : depth_first(&F.getEntryBlock())) visit(*BB); diff --git a/contrib/llvm/lib/Transforms/Scalar/SCCP.cpp b/contrib/llvm/lib/Transforms/Scalar/SCCP.cpp index a738ebb4607e..4822cf7cce0f 100644 --- a/contrib/llvm/lib/Transforms/Scalar/SCCP.cpp +++ b/contrib/llvm/lib/Transforms/Scalar/SCCP.cpp @@ -1790,7 +1790,8 @@ static bool runIPSCCP(Module &M, const DataLayout &DL, // variables that do not have their 'addresses taken'. If they don't have // their addresses taken, we can propagate constants through them. for (GlobalVariable &G : M.globals()) - if (!G.isConstant() && G.hasLocalLinkage() && !AddressIsTaken(&G)) + if (!G.isConstant() && G.hasLocalLinkage() && + G.hasDefinitiveInitializer() && !AddressIsTaken(&G)) Solver.TrackValueOfGlobalVariable(&G); // Solve for constants. diff --git a/contrib/llvm/lib/Transforms/Utils/CloneModule.cpp b/contrib/llvm/lib/Transforms/Utils/CloneModule.cpp index d27cb45c7d7f..e5392b53050d 100644 --- a/contrib/llvm/lib/Transforms/Utils/CloneModule.cpp +++ b/contrib/llvm/lib/Transforms/Utils/CloneModule.cpp @@ -132,7 +132,8 @@ std::unique_ptr llvm::CloneModule( SmallVector, 1> MDs; I->getAllMetadata(MDs); for (auto MD : MDs) - GV->addMetadata(MD.first, *MapMetadata(MD.second, VMap)); + GV->addMetadata(MD.first, + *MapMetadata(MD.second, VMap, RF_MoveDistinctMDs)); copyComdat(GV, &*I); } diff --git a/contrib/llvm/tools/clang/include/clang/AST/StmtCXX.h b/contrib/llvm/tools/clang/include/clang/AST/StmtCXX.h index d6c9654fefa4..77f81838e5eb 100644 --- a/contrib/llvm/tools/clang/include/clang/AST/StmtCXX.h +++ b/contrib/llvm/tools/clang/include/clang/AST/StmtCXX.h @@ -317,6 +317,7 @@ class CoroutineBodyStmt final unsigned NumParams; friend class ASTStmtReader; + friend class ASTReader; friend TrailingObjects; Stmt **getStoredStmts() { return getTrailingObjects(); } @@ -347,6 +348,8 @@ class CoroutineBodyStmt final public: static CoroutineBodyStmt *Create(const ASTContext &C, CtorArgs const &Args); + static CoroutineBodyStmt *Create(const ASTContext &C, EmptyShell, + unsigned NumParams); bool hasDependentPromiseType() const { return getPromiseDecl()->getType()->isDependentType(); @@ -444,6 +447,8 @@ class CoreturnStmt : public Stmt { SubStmts[SubStmt::PromiseCall] = PromiseCall; } + CoreturnStmt(EmptyShell) : CoreturnStmt({}, {}, {}) {} + SourceLocation getKeywordLoc() const { return CoreturnLoc; } /// \brief Retrieve the operand of the 'co_return' statement. Will be nullptr diff --git a/contrib/llvm/tools/clang/include/clang/Basic/BuiltinsX86.def b/contrib/llvm/tools/clang/include/clang/Basic/BuiltinsX86.def index a516bf6bf06c..6d3a478ac360 100644 --- a/contrib/llvm/tools/clang/include/clang/Basic/BuiltinsX86.def +++ b/contrib/llvm/tools/clang/include/clang/Basic/BuiltinsX86.def @@ -338,8 +338,8 @@ TARGET_BUILTIN(__builtin_ia32_lfence, "v", "", "sse2") TARGET_HEADER_BUILTIN(_mm_lfence, "v", "h", "emmintrin.h", ALL_LANGUAGES, "sse2") TARGET_BUILTIN(__builtin_ia32_mfence, "v", "", "sse2") TARGET_HEADER_BUILTIN(_mm_mfence, "v", "h", "emmintrin.h", ALL_LANGUAGES, "sse2") -TARGET_BUILTIN(__builtin_ia32_pause, "v", "", "sse2") -TARGET_HEADER_BUILTIN(_mm_pause, "v", "h", "emmintrin.h", ALL_LANGUAGES, "sse2") +TARGET_BUILTIN(__builtin_ia32_pause, "v", "", "") +TARGET_HEADER_BUILTIN(_mm_pause, "v", "h", "emmintrin.h", ALL_LANGUAGES, "") TARGET_BUILTIN(__builtin_ia32_pmuludq128, "V2LLiV4iV4i", "", "sse2") TARGET_BUILTIN(__builtin_ia32_psraw128, "V8sV8sV8s", "", "sse2") TARGET_BUILTIN(__builtin_ia32_psrad128, "V4iV4iV4i", "", "sse2") diff --git a/contrib/llvm/tools/clang/include/clang/Driver/Options.td b/contrib/llvm/tools/clang/include/clang/Driver/Options.td index 753c178eec6a..3c0674f598d1 100644 --- a/contrib/llvm/tools/clang/include/clang/Driver/Options.td +++ b/contrib/llvm/tools/clang/include/clang/Driver/Options.td @@ -2019,10 +2019,6 @@ def mdspr2 : Flag<["-"], "mdspr2">, Group; def mno_dspr2 : Flag<["-"], "mno-dspr2">, Group; def msingle_float : Flag<["-"], "msingle-float">, Group; def mdouble_float : Flag<["-"], "mdouble-float">, Group; -def mmadd4 : Flag<["-"], "mmadd4">, Group, - HelpText<"Enable the generation of 4-operand madd.s, madd.d and related instructions.">; -def mno_madd4 : Flag<["-"], "mno-madd4">, Group, - HelpText<"Disable the generation of 4-operand madd.s, madd.d and related instructions.">; def mmsa : Flag<["-"], "mmsa">, Group, HelpText<"Enable MSA ASE (MIPS only)">; def mno_msa : Flag<["-"], "mno-msa">, Group, diff --git a/contrib/llvm/tools/clang/include/clang/Serialization/ASTBitCodes.h b/contrib/llvm/tools/clang/include/clang/Serialization/ASTBitCodes.h index 6b40781a1239..9227b33d2c53 100644 --- a/contrib/llvm/tools/clang/include/clang/Serialization/ASTBitCodes.h +++ b/contrib/llvm/tools/clang/include/clang/Serialization/ASTBitCodes.h @@ -1545,9 +1545,14 @@ namespace clang { // ARC EXPR_OBJC_BRIDGED_CAST, // ObjCBridgedCastExpr - + STMT_MS_DEPENDENT_EXISTS, // MSDependentExistsStmt - EXPR_LAMBDA // LambdaExpr + EXPR_LAMBDA, // LambdaExpr + STMT_COROUTINE_BODY, + STMT_CORETURN, + EXPR_COAWAIT, + EXPR_COYIELD, + EXPR_DEPENDENT_COAWAIT, }; /// \brief The kinds of designators that can occur in a diff --git a/contrib/llvm/tools/clang/lib/AST/ODRHash.cpp b/contrib/llvm/tools/clang/lib/AST/ODRHash.cpp index b19135384cfd..121724a73152 100644 --- a/contrib/llvm/tools/clang/lib/AST/ODRHash.cpp +++ b/contrib/llvm/tools/clang/lib/AST/ODRHash.cpp @@ -378,8 +378,12 @@ void ODRHash::AddCXXRecordDecl(const CXXRecordDecl *Record) { assert(Record && Record->hasDefinition() && "Expected non-null record to be a definition."); - if (isa(Record)) { - return; + const DeclContext *DC = Record; + while (DC) { + if (isa(DC)) { + return; + } + DC = DC->getParent(); } AddDecl(Record); diff --git a/contrib/llvm/tools/clang/lib/AST/StmtCXX.cpp b/contrib/llvm/tools/clang/lib/AST/StmtCXX.cpp index 8466cd61f055..666f5dcc9d97 100644 --- a/contrib/llvm/tools/clang/lib/AST/StmtCXX.cpp +++ b/contrib/llvm/tools/clang/lib/AST/StmtCXX.cpp @@ -96,6 +96,20 @@ CoroutineBodyStmt *CoroutineBodyStmt::Create( return new (Mem) CoroutineBodyStmt(Args); } +CoroutineBodyStmt *CoroutineBodyStmt::Create(const ASTContext &C, EmptyShell, + unsigned NumParams) { + std::size_t Size = totalSizeToAlloc( + CoroutineBodyStmt::FirstParamMove + NumParams); + + void *Mem = C.Allocate(Size, alignof(CoroutineBodyStmt)); + auto *Result = new (Mem) CoroutineBodyStmt(CtorArgs()); + Result->NumParams = NumParams; + auto *ParamBegin = Result->getStoredStmts() + SubStmt::FirstParamMove; + std::uninitialized_fill(ParamBegin, ParamBegin + NumParams, + static_cast(nullptr)); + return Result; +} + CoroutineBodyStmt::CoroutineBodyStmt(CoroutineBodyStmt::CtorArgs const &Args) : Stmt(CoroutineBodyStmtClass), NumParams(Args.ParamMoves.size()) { Stmt **SubStmts = getStoredStmts(); diff --git a/contrib/llvm/tools/clang/lib/Basic/Targets.cpp b/contrib/llvm/tools/clang/lib/Basic/Targets.cpp index 5d75aa5a7528..73be2e173fda 100644 --- a/contrib/llvm/tools/clang/lib/Basic/Targets.cpp +++ b/contrib/llvm/tools/clang/lib/Basic/Targets.cpp @@ -8050,7 +8050,6 @@ class MipsTargetInfo : public TargetInfo { NoDSP, DSP1, DSP2 } DspRev; bool HasMSA; - bool DisableMadd4; protected: bool HasFP64; @@ -8061,7 +8060,7 @@ class MipsTargetInfo : public TargetInfo { : TargetInfo(Triple), IsMips16(false), IsMicromips(false), IsNan2008(false), IsSingleFloat(false), IsNoABICalls(false), CanUseBSDABICalls(false), FloatABI(HardFloat), DspRev(NoDSP), - HasMSA(false), DisableMadd4(false), HasFP64(false) { + HasMSA(false), HasFP64(false) { TheCXXABI.set(TargetCXXABI::GenericMIPS); setABI((getTriple().getArch() == llvm::Triple::mips || @@ -8307,9 +8306,6 @@ class MipsTargetInfo : public TargetInfo { if (HasMSA) Builder.defineMacro("__mips_msa", Twine(1)); - if (DisableMadd4) - Builder.defineMacro("__mips_no_madd4", Twine(1)); - Builder.defineMacro("_MIPS_SZPTR", Twine(getPointerWidth(0))); Builder.defineMacro("_MIPS_SZINT", Twine(getIntWidth())); Builder.defineMacro("_MIPS_SZLONG", Twine(getLongWidth())); @@ -8472,8 +8468,6 @@ class MipsTargetInfo : public TargetInfo { DspRev = std::max(DspRev, DSP2); else if (Feature == "+msa") HasMSA = true; - else if (Feature == "+nomadd4") - DisableMadd4 = true; else if (Feature == "+fp64") HasFP64 = true; else if (Feature == "-fp64") diff --git a/contrib/llvm/tools/clang/lib/Driver/ToolChains/Arch/Mips.cpp b/contrib/llvm/tools/clang/lib/Driver/ToolChains/Arch/Mips.cpp index b45dcd6db678..1da90d1dc7ba 100644 --- a/contrib/llvm/tools/clang/lib/Driver/ToolChains/Arch/Mips.cpp +++ b/contrib/llvm/tools/clang/lib/Driver/ToolChains/Arch/Mips.cpp @@ -297,8 +297,6 @@ void mips::getMIPSTargetFeatures(const Driver &D, const llvm::Triple &Triple, AddTargetFeature(Args, Features, options::OPT_mno_odd_spreg, options::OPT_modd_spreg, "nooddspreg"); - AddTargetFeature(Args, Features, options::OPT_mno_madd4, options::OPT_mmadd4, - "nomadd4"); AddTargetFeature(Args, Features, options::OPT_mlong_calls, options::OPT_mno_long_calls, "long-calls"); AddTargetFeature(Args, Features, options::OPT_mmt, options::OPT_mno_mt,"mt"); diff --git a/contrib/llvm/tools/clang/lib/Driver/ToolChains/OpenBSD.cpp b/contrib/llvm/tools/clang/lib/Driver/ToolChains/OpenBSD.cpp index c5f266ec8fdc..1d54a1e9cbb0 100644 --- a/contrib/llvm/tools/clang/lib/Driver/ToolChains/OpenBSD.cpp +++ b/contrib/llvm/tools/clang/lib/Driver/ToolChains/OpenBSD.cpp @@ -133,6 +133,8 @@ void openbsd::Linker::ConstructJob(Compilation &C, const JobAction &JA, } } + if (Args.hasArg(options::OPT_pie)) + CmdArgs.push_back("-pie"); if (Args.hasArg(options::OPT_nopie)) CmdArgs.push_back("-nopie"); diff --git a/contrib/llvm/tools/clang/lib/Headers/avx2intrin.h b/contrib/llvm/tools/clang/lib/Headers/avx2intrin.h index 5d83a8db484b..576f761b2542 100644 --- a/contrib/llvm/tools/clang/lib/Headers/avx2intrin.h +++ b/contrib/llvm/tools/clang/lib/Headers/avx2intrin.h @@ -832,7 +832,8 @@ _mm256_xor_si256(__m256i __a, __m256i __b) static __inline__ __m256i __DEFAULT_FN_ATTRS _mm256_stream_load_si256(__m256i const *__V) { - return (__m256i)__builtin_nontemporal_load((const __v4di *)__V); + typedef __v4di __v4di_aligned __attribute__((aligned(32))); + return (__m256i)__builtin_nontemporal_load((const __v4di_aligned *)__V); } static __inline__ __m128 __DEFAULT_FN_ATTRS diff --git a/contrib/llvm/tools/clang/lib/Headers/avx512fintrin.h b/contrib/llvm/tools/clang/lib/Headers/avx512fintrin.h index b556d04efbb7..4ce694531100 100644 --- a/contrib/llvm/tools/clang/lib/Headers/avx512fintrin.h +++ b/contrib/llvm/tools/clang/lib/Headers/avx512fintrin.h @@ -4289,7 +4289,7 @@ static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_maskz_cvtps_epu32 ( __mmask16 __U, __m512 __A) { return (__m512i) __builtin_ia32_cvtps2udq512_mask ((__v16sf) __A, - (__v16si) + (__v16si) _mm512_setzero_si512 (), (__mmask16) __U , _MM_FROUND_CUR_DIRECTION); @@ -9035,25 +9035,29 @@ _mm512_kxor (__mmask16 __A, __mmask16 __B) static __inline__ void __DEFAULT_FN_ATTRS _mm512_stream_si512 (__m512i * __P, __m512i __A) { - __builtin_nontemporal_store((__v8di)__A, (__v8di*)__P); + typedef __v8di __v8di_aligned __attribute__((aligned(64))); + __builtin_nontemporal_store((__v8di_aligned)__A, (__v8di_aligned*)__P); } static __inline__ __m512i __DEFAULT_FN_ATTRS _mm512_stream_load_si512 (void *__P) { - return (__m512i) __builtin_nontemporal_load((const __v8di *)__P); + typedef __v8di __v8di_aligned __attribute__((aligned(64))); + return (__m512i) __builtin_nontemporal_load((const __v8di_aligned *)__P); } static __inline__ void __DEFAULT_FN_ATTRS _mm512_stream_pd (double *__P, __m512d __A) { - __builtin_nontemporal_store((__v8df)__A, (__v8df*)__P); + typedef __v8df __v8df_aligned __attribute__((aligned(64))); + __builtin_nontemporal_store((__v8df_aligned)__A, (__v8df_aligned*)__P); } static __inline__ void __DEFAULT_FN_ATTRS _mm512_stream_ps (float *__P, __m512 __A) { - __builtin_nontemporal_store((__v16sf)__A, (__v16sf*)__P); + typedef __v16sf __v16sf_aligned __attribute__((aligned(64))); + __builtin_nontemporal_store((__v16sf_aligned)__A, (__v16sf_aligned*)__P); } static __inline__ __m512d __DEFAULT_FN_ATTRS @@ -9217,39 +9221,39 @@ _mm512_maskz_moveldup_ps (__mmask16 __U, __m512 __A) static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_mask_move_ss (__m128 __W, __mmask8 __U, __m128 __A, __m128 __B) { - __m128 res = __A; + __m128 res = __A; res[0] = (__U & 1) ? __B[0] : __W[0]; - return res; + return res; } static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_maskz_move_ss (__mmask8 __U, __m128 __A, __m128 __B) { - __m128 res = __A; - res[0] = (__U & 1) ? __B[0] : 0; - return res; + __m128 res = __A; + res[0] = (__U & 1) ? __B[0] : 0; + return res; } static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_mask_move_sd (__m128d __W, __mmask8 __U, __m128d __A, __m128d __B) { - __m128d res = __A; + __m128d res = __A; res[0] = (__U & 1) ? __B[0] : __W[0]; - return res; + return res; } static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_maskz_move_sd (__mmask8 __U, __m128d __A, __m128d __B) { - __m128d res = __A; - res[0] = (__U & 1) ? __B[0] : 0; - return res; + __m128d res = __A; + res[0] = (__U & 1) ? __B[0] : 0; + return res; } static __inline__ void __DEFAULT_FN_ATTRS _mm_mask_store_ss (float * __W, __mmask8 __U, __m128 __A) { - __builtin_ia32_storess128_mask ((__v16sf *)__W, + __builtin_ia32_storess128_mask ((__v16sf *)__W, (__v16sf) _mm512_castps128_ps512(__A), (__mmask16) __U & (__mmask16)1); } @@ -9257,7 +9261,7 @@ _mm_mask_store_ss (float * __W, __mmask8 __U, __m128 __A) static __inline__ void __DEFAULT_FN_ATTRS _mm_mask_store_sd (double * __W, __mmask8 __U, __m128d __A) { - __builtin_ia32_storesd128_mask ((__v8df *)__W, + __builtin_ia32_storesd128_mask ((__v8df *)__W, (__v8df) _mm512_castpd128_pd512(__A), (__mmask8) __U & 1); } @@ -9606,7 +9610,7 @@ _mm_mask_cvtsd_ss (__m128 __W, __mmask8 __U, __m128 __A, __m128d __B) { return __builtin_ia32_cvtsd2ss_round_mask ((__v4sf)(__A), (__v2df)(__B), - (__v4sf)(__W), + (__v4sf)(__W), (__mmask8)(__U), _MM_FROUND_CUR_DIRECTION); } @@ -9615,7 +9619,7 @@ _mm_maskz_cvtsd_ss (__mmask8 __U, __m128 __A, __m128d __B) { return __builtin_ia32_cvtsd2ss_round_mask ((__v4sf)(__A), (__v2df)(__B), - (__v4sf)_mm_setzero_ps(), + (__v4sf)_mm_setzero_ps(), (__mmask8)(__U), _MM_FROUND_CUR_DIRECTION); } @@ -9680,7 +9684,7 @@ _mm_mask_cvtss_sd (__m128d __W, __mmask8 __U, __m128d __A, __m128 __B) return __builtin_ia32_cvtss2sd_round_mask((__v2df)(__A), (__v4sf)(__B), (__v2df)(__W), - (__mmask8)(__U), _MM_FROUND_CUR_DIRECTION); + (__mmask8)(__U), _MM_FROUND_CUR_DIRECTION); } static __inline__ __m128d __DEFAULT_FN_ATTRS @@ -9688,8 +9692,8 @@ _mm_maskz_cvtss_sd (__mmask8 __U, __m128d __A, __m128 __B) { return __builtin_ia32_cvtss2sd_round_mask((__v2df)(__A), (__v4sf)(__B), - (__v2df)_mm_setzero_pd(), - (__mmask8)(__U), _MM_FROUND_CUR_DIRECTION); + (__v2df)_mm_setzero_pd(), + (__mmask8)(__U), _MM_FROUND_CUR_DIRECTION); } static __inline__ __m128d __DEFAULT_FN_ATTRS @@ -9935,7 +9939,7 @@ static __inline__ double __DEFAULT_FN_ATTRS _mm512_reduce_mul_pd(__m512d __W) { } // Vec512 - Vector with size 512. -// Vec512Neutral - All vector elements set to the identity element. +// Vec512Neutral - All vector elements set to the identity element. // Identity element: {+,0},{*,1},{&,0xFFFFFFFFFFFFFFFF},{|,0} // Operator - Can be one of following: +,*,&,| // Mask - Intrinsic Mask @@ -9965,19 +9969,19 @@ _mm512_mask_reduce_mul_epi64(__mmask8 __M, __m512i __W) { static __inline__ long long __DEFAULT_FN_ATTRS _mm512_mask_reduce_and_epi64(__mmask8 __M, __m512i __W) { - _mm512_mask_reduce_operator_64bit(__W, _mm512_set1_epi64(0xFFFFFFFFFFFFFFFF), + _mm512_mask_reduce_operator_64bit(__W, _mm512_set1_epi64(0xFFFFFFFFFFFFFFFF), &, __M, i, i, q); } static __inline__ long long __DEFAULT_FN_ATTRS _mm512_mask_reduce_or_epi64(__mmask8 __M, __m512i __W) { - _mm512_mask_reduce_operator_64bit(__W, _mm512_set1_epi64(0), |, __M, + _mm512_mask_reduce_operator_64bit(__W, _mm512_set1_epi64(0), |, __M, i, i, q); } static __inline__ double __DEFAULT_FN_ATTRS _mm512_mask_reduce_add_pd(__mmask8 __M, __m512d __W) { - _mm512_mask_reduce_operator_64bit(__W, _mm512_set1_pd(0), +, __M, + _mm512_mask_reduce_operator_64bit(__W, _mm512_set1_pd(0), +, __M, f, d, pd); } @@ -10039,17 +10043,17 @@ _mm512_reduce_add_epi32(__m512i __W) { _mm512_reduce_operator_32bit(__W, +, i, i); } -static __inline__ int __DEFAULT_FN_ATTRS +static __inline__ int __DEFAULT_FN_ATTRS _mm512_reduce_mul_epi32(__m512i __W) { _mm512_reduce_operator_32bit(__W, *, i, i); } -static __inline__ int __DEFAULT_FN_ATTRS +static __inline__ int __DEFAULT_FN_ATTRS _mm512_reduce_and_epi32(__m512i __W) { _mm512_reduce_operator_32bit(__W, &, i, i); } -static __inline__ int __DEFAULT_FN_ATTRS +static __inline__ int __DEFAULT_FN_ATTRS _mm512_reduce_or_epi32(__m512i __W) { _mm512_reduce_operator_32bit(__W, |, i, i); } @@ -10065,7 +10069,7 @@ _mm512_reduce_mul_ps(__m512 __W) { } // Vec512 - Vector with size 512. -// Vec512Neutral - All vector elements set to the identity element. +// Vec512Neutral - All vector elements set to the identity element. // Identity element: {+,0},{*,1},{&,0xFFFFFFFF},{|,0} // Operator - Can be one of following: +,*,&,| // Mask - Intrinsic Mask @@ -10095,7 +10099,7 @@ _mm512_mask_reduce_mul_epi32( __mmask16 __M, __m512i __W) { static __inline__ int __DEFAULT_FN_ATTRS _mm512_mask_reduce_and_epi32( __mmask16 __M, __m512i __W) { - _mm512_mask_reduce_operator_32bit(__W, _mm512_set1_epi32(0xFFFFFFFF), &, __M, + _mm512_mask_reduce_operator_32bit(__W, _mm512_set1_epi32(0xFFFFFFFF), &, __M, i, i, d); } @@ -10158,7 +10162,7 @@ _mm512_mask_reduce_mul_ps(__mmask16 __M, __m512 __W) { return Vec512[0]; \ }) -static __inline__ long long __DEFAULT_FN_ATTRS +static __inline__ long long __DEFAULT_FN_ATTRS _mm512_reduce_max_epi64(__m512i __V) { _mm512_reduce_maxMin_64bit(__V, max_epi64, i, i); } @@ -10168,7 +10172,7 @@ _mm512_reduce_max_epu64(__m512i __V) { _mm512_reduce_maxMin_64bit(__V, max_epu64, i, i); } -static __inline__ double __DEFAULT_FN_ATTRS +static __inline__ double __DEFAULT_FN_ATTRS _mm512_reduce_max_pd(__m512d __V) { _mm512_reduce_maxMin_64bit(__V, max_pd, d, f); } @@ -10183,7 +10187,7 @@ _mm512_reduce_min_epu64(__m512i __V) { _mm512_reduce_maxMin_64bit(__V, min_epu64, i, i); } -static __inline__ double __DEFAULT_FN_ATTRS +static __inline__ double __DEFAULT_FN_ATTRS _mm512_reduce_min_pd(__m512d __V) { _mm512_reduce_maxMin_64bit(__V, min_pd, d, f); } diff --git a/contrib/llvm/tools/clang/lib/Headers/avxintrin.h b/contrib/llvm/tools/clang/lib/Headers/avxintrin.h index 78bb70740bbf..dff5897b6bb6 100644 --- a/contrib/llvm/tools/clang/lib/Headers/avxintrin.h +++ b/contrib/llvm/tools/clang/lib/Headers/avxintrin.h @@ -3590,7 +3590,8 @@ _mm_maskstore_ps(float *__p, __m128i __m, __m128 __a) static __inline void __DEFAULT_FN_ATTRS _mm256_stream_si256(__m256i *__a, __m256i __b) { - __builtin_nontemporal_store((__v4di)__b, (__v4di*)__a); + typedef __v4di __v4di_aligned __attribute__((aligned(32))); + __builtin_nontemporal_store((__v4di_aligned)__b, (__v4di_aligned*)__a); } /// \brief Moves double-precision values from a 256-bit vector of [4 x double] @@ -3609,7 +3610,8 @@ _mm256_stream_si256(__m256i *__a, __m256i __b) static __inline void __DEFAULT_FN_ATTRS _mm256_stream_pd(double *__a, __m256d __b) { - __builtin_nontemporal_store((__v4df)__b, (__v4df*)__a); + typedef __v4df __v4df_aligned __attribute__((aligned(32))); + __builtin_nontemporal_store((__v4df_aligned)__b, (__v4df_aligned*)__a); } /// \brief Moves single-precision floating point values from a 256-bit vector @@ -3629,7 +3631,8 @@ _mm256_stream_pd(double *__a, __m256d __b) static __inline void __DEFAULT_FN_ATTRS _mm256_stream_ps(float *__p, __m256 __a) { - __builtin_nontemporal_store((__v8sf)__a, (__v8sf*)__p); + typedef __v8sf __v8sf_aligned __attribute__((aligned(32))); + __builtin_nontemporal_store((__v8sf_aligned)__a, (__v8sf_aligned*)__p); } /* Create vectors */ diff --git a/contrib/llvm/tools/clang/lib/Headers/float.h b/contrib/llvm/tools/clang/lib/Headers/float.h index 0f453d87cbcb..502143d4e481 100644 --- a/contrib/llvm/tools/clang/lib/Headers/float.h +++ b/contrib/llvm/tools/clang/lib/Headers/float.h @@ -33,6 +33,15 @@ */ #if (defined(__APPLE__) || (defined(__MINGW32__) || defined(_MSC_VER))) && \ __STDC_HOSTED__ && __has_include_next() + +/* Prior to Apple's 10.7 SDK, float.h SDK header used to apply an extra level + * of #include_next to keep Metrowerks compilers happy. Avoid this + * extra indirection. + */ +#ifdef __APPLE__ +#define _FLOAT_H_ +#endif + # include_next /* Undefine anything that we'll be redefining below. */ diff --git a/contrib/llvm/tools/clang/lib/Sema/Sema.cpp b/contrib/llvm/tools/clang/lib/Sema/Sema.cpp index 6f0db6ce1c6a..a18f71422fde 100644 --- a/contrib/llvm/tools/clang/lib/Sema/Sema.cpp +++ b/contrib/llvm/tools/clang/lib/Sema/Sema.cpp @@ -542,6 +542,9 @@ void Sema::getUndefinedButUsed( // __attribute__((weakref)) is basically a definition. if (ND->hasAttr()) continue; + if (isa(ND)) + continue; + if (FunctionDecl *FD = dyn_cast(ND)) { if (FD->isDefined()) continue; diff --git a/contrib/llvm/tools/clang/lib/Sema/SemaExpr.cpp b/contrib/llvm/tools/clang/lib/Sema/SemaExpr.cpp index ead80b61586a..d3d7d8b67c70 100644 --- a/contrib/llvm/tools/clang/lib/Sema/SemaExpr.cpp +++ b/contrib/llvm/tools/clang/lib/Sema/SemaExpr.cpp @@ -8288,7 +8288,7 @@ QualType Sema::CheckVectorOperands(ExprResult &LHS, ExprResult &RHS, // type. Note that this is already done by non-compound assignments in // CheckAssignmentConstraints. If it's a scalar type, only bitcast for // <1 x T> -> T. The result is also a vector type. - } else if (OtherType->isExtVectorType() || + } else if (OtherType->isExtVectorType() || OtherType->isVectorType() || (OtherType->isScalarType() && VT->getNumElements() == 1)) { ExprResult *RHSExpr = &RHS; *RHSExpr = ImpCastExprToType(RHSExpr->get(), LHSType, CK_BitCast); diff --git a/contrib/llvm/tools/clang/lib/Serialization/ASTReaderStmt.cpp b/contrib/llvm/tools/clang/lib/Serialization/ASTReaderStmt.cpp index 21adcddd3a4a..3f5da029947c 100644 --- a/contrib/llvm/tools/clang/lib/Serialization/ASTReaderStmt.cpp +++ b/contrib/llvm/tools/clang/lib/Serialization/ASTReaderStmt.cpp @@ -367,28 +367,45 @@ void ASTStmtReader::VisitMSAsmStmt(MSAsmStmt *S) { } void ASTStmtReader::VisitCoroutineBodyStmt(CoroutineBodyStmt *S) { - // FIXME: Implement coroutine serialization. - llvm_unreachable("unimplemented"); + VisitStmt(S); + assert(Record.peekInt() == S->NumParams); + Record.skipInts(1); + auto *StoredStmts = S->getStoredStmts(); + for (unsigned i = 0; + i < CoroutineBodyStmt::SubStmt::FirstParamMove + S->NumParams; ++i) + StoredStmts[i] = Record.readSubStmt(); } void ASTStmtReader::VisitCoreturnStmt(CoreturnStmt *S) { - // FIXME: Implement coroutine serialization. - llvm_unreachable("unimplemented"); + VisitStmt(S); + S->CoreturnLoc = Record.readSourceLocation(); + for (auto &SubStmt: S->SubStmts) + SubStmt = Record.readSubStmt(); + S->IsImplicit = Record.readInt() != 0; } -void ASTStmtReader::VisitCoawaitExpr(CoawaitExpr *S) { - // FIXME: Implement coroutine serialization. - llvm_unreachable("unimplemented"); +void ASTStmtReader::VisitCoawaitExpr(CoawaitExpr *E) { + VisitExpr(E); + E->KeywordLoc = ReadSourceLocation(); + for (auto &SubExpr: E->SubExprs) + SubExpr = Record.readSubStmt(); + E->OpaqueValue = cast_or_null(Record.readSubStmt()); + E->setIsImplicit(Record.readInt() != 0); } -void ASTStmtReader::VisitDependentCoawaitExpr(DependentCoawaitExpr *S) { - // FIXME: Implement coroutine serialization. - llvm_unreachable("unimplemented"); +void ASTStmtReader::VisitCoyieldExpr(CoyieldExpr *E) { + VisitExpr(E); + E->KeywordLoc = ReadSourceLocation(); + for (auto &SubExpr: E->SubExprs) + SubExpr = Record.readSubStmt(); + E->OpaqueValue = cast_or_null(Record.readSubStmt()); } -void ASTStmtReader::VisitCoyieldExpr(CoyieldExpr *S) { - // FIXME: Implement coroutine serialization. - llvm_unreachable("unimplemented"); +void ASTStmtReader::VisitDependentCoawaitExpr(DependentCoawaitExpr *E) { + VisitExpr(E); + E->KeywordLoc = ReadSourceLocation(); + for (auto &SubExpr: E->SubExprs) + SubExpr = Record.readSubStmt(); } void ASTStmtReader::VisitCapturedStmt(CapturedStmt *S) { @@ -3947,6 +3964,29 @@ Stmt *ASTReader::ReadStmtFromStream(ModuleFile &F) { S = LambdaExpr::CreateDeserialized(Context, NumCaptures); break; } + + case STMT_COROUTINE_BODY: { + unsigned NumParams = Record[ASTStmtReader::NumStmtFields]; + S = CoroutineBodyStmt::Create(Context, Empty, NumParams); + break; + } + + case STMT_CORETURN: + S = new (Context) CoreturnStmt(Empty); + break; + + case EXPR_COAWAIT: + S = new (Context) CoawaitExpr(Empty); + break; + + case EXPR_COYIELD: + S = new (Context) CoyieldExpr(Empty); + break; + + case EXPR_DEPENDENT_COAWAIT: + S = new (Context) DependentCoawaitExpr(Empty); + break; + } // We hit a STMT_STOP, so we're done with this expression. diff --git a/contrib/llvm/tools/clang/lib/Serialization/ASTWriterStmt.cpp b/contrib/llvm/tools/clang/lib/Serialization/ASTWriterStmt.cpp index ae2e0b88c311..6971339663f0 100644 --- a/contrib/llvm/tools/clang/lib/Serialization/ASTWriterStmt.cpp +++ b/contrib/llvm/tools/clang/lib/Serialization/ASTWriterStmt.cpp @@ -286,7 +286,7 @@ void ASTStmtWriter::VisitMSAsmStmt(MSAsmStmt *S) { } // Outputs - for (unsigned I = 0, N = S->getNumOutputs(); I != N; ++I) { + for (unsigned I = 0, N = S->getNumOutputs(); I != N; ++I) { Record.AddStmt(S->getOutputExpr(I)); Record.AddString(S->getOutputConstraint(I)); } @@ -300,29 +300,48 @@ void ASTStmtWriter::VisitMSAsmStmt(MSAsmStmt *S) { Code = serialization::STMT_MSASM; } -void ASTStmtWriter::VisitCoroutineBodyStmt(CoroutineBodyStmt *S) { - // FIXME: Implement coroutine serialization. - llvm_unreachable("unimplemented"); +void ASTStmtWriter::VisitCoroutineBodyStmt(CoroutineBodyStmt *CoroStmt) { + VisitStmt(CoroStmt); + Record.push_back(CoroStmt->getParamMoves().size()); + for (Stmt *S : CoroStmt->children()) + Record.AddStmt(S); + Code = serialization::STMT_COROUTINE_BODY; } void ASTStmtWriter::VisitCoreturnStmt(CoreturnStmt *S) { - // FIXME: Implement coroutine serialization. - llvm_unreachable("unimplemented"); + VisitStmt(S); + Record.AddSourceLocation(S->getKeywordLoc()); + Record.AddStmt(S->getOperand()); + Record.AddStmt(S->getPromiseCall()); + Record.push_back(S->isImplicit()); + Code = serialization::STMT_CORETURN; } -void ASTStmtWriter::VisitCoawaitExpr(CoawaitExpr *S) { - // FIXME: Implement coroutine serialization. - llvm_unreachable("unimplemented"); +void ASTStmtWriter::VisitCoroutineSuspendExpr(CoroutineSuspendExpr *E) { + VisitExpr(E); + Record.AddSourceLocation(E->getKeywordLoc()); + for (Stmt *S : E->children()) + Record.AddStmt(S); + Record.AddStmt(E->getOpaqueValue()); } -void ASTStmtWriter::VisitDependentCoawaitExpr(DependentCoawaitExpr *S) { - // FIXME: Implement coroutine serialization. - llvm_unreachable("unimplemented"); +void ASTStmtWriter::VisitCoawaitExpr(CoawaitExpr *E) { + VisitCoroutineSuspendExpr(E); + Record.push_back(E->isImplicit()); + Code = serialization::EXPR_COAWAIT; } -void ASTStmtWriter::VisitCoyieldExpr(CoyieldExpr *S) { - // FIXME: Implement coroutine serialization. - llvm_unreachable("unimplemented"); +void ASTStmtWriter::VisitCoyieldExpr(CoyieldExpr *E) { + VisitCoroutineSuspendExpr(E); + Code = serialization::EXPR_COYIELD; +} + +void ASTStmtWriter::VisitDependentCoawaitExpr(DependentCoawaitExpr *E) { + VisitExpr(E); + Record.AddSourceLocation(E->getKeywordLoc()); + for (Stmt *S : E->children()) + Record.AddStmt(S); + Code = serialization::EXPR_DEPENDENT_COAWAIT; } void ASTStmtWriter::VisitCapturedStmt(CapturedStmt *S) { diff --git a/lib/clang/include/clang/Basic/Version.inc b/lib/clang/include/clang/Basic/Version.inc index 7a2b6c63a361..2a8168733a35 100644 --- a/lib/clang/include/clang/Basic/Version.inc +++ b/lib/clang/include/clang/Basic/Version.inc @@ -8,4 +8,4 @@ #define CLANG_VENDOR "FreeBSD " -#define SVN_REVISION "309439" +#define SVN_REVISION "310316" diff --git a/lib/clang/include/lld/Config/Version.inc b/lib/clang/include/lld/Config/Version.inc index 9b1f8136be4c..ef92c2ddbac0 100644 --- a/lib/clang/include/lld/Config/Version.inc +++ b/lib/clang/include/lld/Config/Version.inc @@ -4,5 +4,5 @@ #define LLD_VERSION_STRING "5.0.0" #define LLD_VERSION_MAJOR 5 #define LLD_VERSION_MINOR 0 -#define LLD_REVISION_STRING "309439" +#define LLD_REVISION_STRING "310316" #define LLD_REPOSITORY_STRING "FreeBSD" diff --git a/lib/clang/include/llvm/Support/VCSRevision.h b/lib/clang/include/llvm/Support/VCSRevision.h index db5f51e6a37f..256c374a0696 100644 --- a/lib/clang/include/llvm/Support/VCSRevision.h +++ b/lib/clang/include/llvm/Support/VCSRevision.h @@ -1,2 +1,2 @@ /* $FreeBSD$ */ -#define LLVM_REVISION "svn-r309439" +#define LLVM_REVISION "svn-r310316"