Merge llvm, clang, lld, lldb, compiler-rt and libc++ release_70 branch

r339999, resolve conflicts, and bump version numbers.

PR:		230240,230355
This commit is contained in:
Dimitry Andric 2018-08-18 12:11:17 +00:00
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/projects/clang700-import/; revision=338014
49 changed files with 943 additions and 461 deletions

View File

@ -328,6 +328,31 @@
# define _LIBCPP_NO_CFI
#endif
#if __ISO_C_VISIBLE >= 2011 || __cplusplus >= 201103L
# if defined(__FreeBSD__)
# define _LIBCPP_HAS_QUICK_EXIT
# define _LIBCPP_HAS_C11_FEATURES
# elif defined(__Fuchsia__)
# define _LIBCPP_HAS_QUICK_EXIT
# define _LIBCPP_HAS_TIMESPEC_GET
# define _LIBCPP_HAS_C11_FEATURES
# elif defined(__linux__)
# if !defined(_LIBCPP_HAS_MUSL_LIBC)
# if _LIBCPP_GLIBC_PREREQ(2, 15) || defined(__BIONIC__)
# define _LIBCPP_HAS_QUICK_EXIT
# endif
# if _LIBCPP_GLIBC_PREREQ(2, 17)
# define _LIBCPP_HAS_C11_FEATURES
# define _LIBCPP_HAS_TIMESPEC_GET
# endif
# else // defined(_LIBCPP_HAS_MUSL_LIBC)
# define _LIBCPP_HAS_QUICK_EXIT
# define _LIBCPP_HAS_TIMESPEC_GET
# define _LIBCPP_HAS_C11_FEATURES
# endif
# endif // __linux__
#endif
#if defined(_LIBCPP_COMPILER_CLANG)
// _LIBCPP_ALTERNATE_STRING_LAYOUT is an old name for
@ -430,28 +455,6 @@ typedef __char32_t char32_t;
#define _LIBCPP_HAS_NO_VARIABLE_TEMPLATES
#endif
#if __ISO_C_VISIBLE >= 2011 || __cplusplus >= 201103L
# if defined(__FreeBSD__)
# define _LIBCPP_HAS_QUICK_EXIT
# define _LIBCPP_HAS_C11_FEATURES
# elif defined(__Fuchsia__)
# define _LIBCPP_HAS_QUICK_EXIT
# define _LIBCPP_HAS_C11_FEATURES
# elif defined(__linux__)
# if !defined(_LIBCPP_HAS_MUSL_LIBC)
# if _LIBCPP_GLIBC_PREREQ(2, 15) || defined(__BIONIC__)
# define _LIBCPP_HAS_QUICK_EXIT
# endif
# if _LIBCPP_GLIBC_PREREQ(2, 17)
# define _LIBCPP_HAS_C11_FEATURES
# endif
# else // defined(_LIBCPP_HAS_MUSL_LIBC)
# define _LIBCPP_HAS_QUICK_EXIT
# define _LIBCPP_HAS_C11_FEATURES
# endif
# endif // __linux__
#endif
#if !(__has_feature(cxx_noexcept))
#define _LIBCPP_HAS_NO_NOEXCEPT
#endif
@ -801,8 +804,20 @@ namespace std {
# define _LIBCPP_INTERNAL_LINKAGE _LIBCPP_ALWAYS_INLINE
#endif
#ifndef _LIBCPP_HIDE_FROM_ABI_PER_TU
# ifndef _LIBCPP_HIDE_FROM_ABI_PER_TU_BY_DEFAULT
# define _LIBCPP_HIDE_FROM_ABI_PER_TU 0
# else
# define _LIBCPP_HIDE_FROM_ABI_PER_TU 1
# endif
#endif
#ifndef _LIBCPP_HIDE_FROM_ABI
# define _LIBCPP_HIDE_FROM_ABI _LIBCPP_HIDDEN _LIBCPP_INTERNAL_LINKAGE
# if _LIBCPP_HIDE_FROM_ABI_PER_TU
# define _LIBCPP_HIDE_FROM_ABI _LIBCPP_HIDDEN _LIBCPP_INTERNAL_LINKAGE
# else
# define _LIBCPP_HIDE_FROM_ABI _LIBCPP_HIDDEN _LIBCPP_ALWAYS_INLINE
# endif
#endif
// Just so we can migrate to _LIBCPP_HIDE_FROM_ABI gradually.
@ -991,6 +1006,11 @@ template <unsigned> struct __static_assert_check {};
# endif
#endif // defined(__APPLE__)
#if !defined(_LIBCPP_HAS_NO_ALIGNED_ALLOCATION) && \
!defined(_LIBCPP_BUILDING_LIBRARY) && \
(!defined(__cpp_aligned_new) || __cpp_aligned_new < 201606)
# define _LIBCPP_HAS_NO_ALIGNED_ALLOCATION
#endif
#if defined(__APPLE__) || defined(__FreeBSD__)
#define _LIBCPP_HAS_DEFAULTRUNELOCALE

View File

@ -151,11 +151,11 @@ using ::mbtowc;
using ::wctomb;
using ::mbstowcs;
using ::wcstombs;
#ifdef _LIBCPP_HAS_QUICK_EXIT
#if !defined(_LIBCPP_CXX03_LANG) && defined(_LIBCPP_HAS_QUICK_EXIT)
using ::at_quick_exit;
using ::quick_exit;
#endif
#ifdef _LIBCPP_HAS_C11_FEATURES
#if _LIBCPP_STD_VER > 14 && defined(_LIBCPP_HAS_C11_FEATURES)
using ::aligned_alloc;
#endif

View File

@ -73,7 +73,7 @@ using ::gmtime;
using ::localtime;
#endif
using ::strftime;
#if _LIBCPP_STD_VER > 14 && defined(_LIBCPP_HAS_C11_FEATURES)
#if _LIBCPP_STD_VER > 14 && defined(_LIBCPP_HAS_TIMESPEC_GET)
using ::timespec_get;
#endif

View File

@ -108,13 +108,6 @@ void operator delete[](void* ptr, void*) noexcept;
# define _LIBCPP_HAS_NO_SIZED_DEALLOCATION
#endif
#if !defined(_LIBCPP_HAS_NO_ALIGNED_ALLOCATION) && \
(!(defined(_LIBCPP_BUILDING_LIBRARY) || _LIBCPP_STD_VER > 14 || \
(defined(__cpp_aligned_new) && __cpp_aligned_new >= 201606)))
# define _LIBCPP_HAS_NO_ALIGNED_ALLOCATION
#endif
#if !__has_builtin(__builtin_operator_new) || \
__has_builtin(__builtin_operator_new) < 201802L || \
defined(_LIBCPP_HAS_NO_ALIGNED_ALLOCATION) || \

View File

@ -801,14 +801,15 @@ ModRefInfo BasicAAResult::getModRefInfo(ImmutableCallSite CS,
const Value *Object = GetUnderlyingObject(Loc.Ptr, DL);
// If this is a tail call and Loc.Ptr points to a stack location, we know that
// the tail call cannot access or modify the local stack.
// We cannot exclude byval arguments here; these belong to the caller of
// the current function not to the current function, and a tail callee
// may reference them.
// Calls marked 'tail' cannot read or write allocas from the current frame
// because the current frame might be destroyed by the time they run. However,
// a tail call may use an alloca with byval. Calling with byval copies the
// contents of the alloca into argument registers or stack slots, so there is
// no lifetime issue.
if (isa<AllocaInst>(Object))
if (const CallInst *CI = dyn_cast<CallInst>(CS.getInstruction()))
if (CI->isTailCall())
if (CI->isTailCall() &&
!CI->getAttributes().hasAttrSomewhere(Attribute::ByVal))
return ModRefInfo::NoModRef;
// If the pointer is to a locally allocated object that does not escape,

View File

@ -1338,7 +1338,7 @@ static Value *SimplifyLShrInst(Value *Op0, Value *Op1, bool isExact,
const KnownBits YKnown = computeKnownBits(Y, Q.DL, 0, Q.AC, Q.CxtI, Q.DT);
const unsigned Width = Op0->getType()->getScalarSizeInBits();
const unsigned EffWidthY = Width - YKnown.countMinLeadingZeros();
if (EffWidthY <= ShRAmt->getZExtValue())
if (ShRAmt->uge(EffWidthY))
return X;
}
@ -1878,9 +1878,9 @@ static Value *SimplifyAndInst(Value *Op0, Value *Op1, const SimplifyQuery &Q,
match(Op0, m_c_Or(m_CombineAnd(m_NUWShl(m_Value(X), m_APInt(ShAmt)),
m_Value(XShifted)),
m_Value(Y)))) {
const unsigned ShftCnt = ShAmt->getZExtValue();
const KnownBits YKnown = computeKnownBits(Y, Q.DL, 0, Q.AC, Q.CxtI, Q.DT);
const unsigned Width = Op0->getType()->getScalarSizeInBits();
const unsigned ShftCnt = ShAmt->getLimitedValue(Width);
const KnownBits YKnown = computeKnownBits(Y, Q.DL, 0, Q.AC, Q.CxtI, Q.DT);
const unsigned EffWidthY = Width - YKnown.countMinLeadingZeros();
if (EffWidthY <= ShftCnt) {
const KnownBits XKnown = computeKnownBits(X, Q.DL, 0, Q.AC, Q.CxtI,

View File

@ -258,13 +258,18 @@ static ClobberAlias instructionClobbersQuery(MemoryDef *MD,
if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(DefInst)) {
// These intrinsics will show up as affecting memory, but they are just
// markers.
// markers, mostly.
//
// FIXME: We probably don't actually want MemorySSA to model these at all
// (including creating MemoryAccesses for them): we just end up inventing
// clobbers where they don't really exist at all. Please see D43269 for
// context.
switch (II->getIntrinsicID()) {
case Intrinsic::lifetime_start:
if (UseCS)
return {false, NoAlias};
AR = AA.alias(MemoryLocation(II->getArgOperand(1)), UseLoc);
return {AR == MustAlias, AR};
return {AR != NoAlias, AR};
case Intrinsic::lifetime_end:
case Intrinsic::invariant_start:
case Intrinsic::invariant_end:

View File

@ -1778,15 +1778,16 @@ SDValue DAGTypeLegalizer::PromoteFloatOp_BITCAST(SDNode *N, unsigned OpNo) {
SDValue Op = N->getOperand(0);
EVT OpVT = Op->getValueType(0);
EVT IVT = EVT::getIntegerVT(*DAG.getContext(), OpVT.getSizeInBits());
assert (IVT == N->getValueType(0) && "Bitcast to type of different size");
SDValue Promoted = GetPromotedFloat(N->getOperand(0));
EVT PromotedVT = Promoted->getValueType(0);
// Convert the promoted float value to the desired IVT.
return DAG.getNode(GetPromotionOpcode(PromotedVT, OpVT), SDLoc(N), IVT,
Promoted);
EVT IVT = EVT::getIntegerVT(*DAG.getContext(), OpVT.getSizeInBits());
SDValue Convert = DAG.getNode(GetPromotionOpcode(PromotedVT, OpVT), SDLoc(N),
IVT, Promoted);
// The final result type might not be an scalar so we need a bitcast. The
// bitcast will be further legalized if needed.
return DAG.getBitcast(N->getValueType(0), Convert);
}
// Promote Operand 1 of FCOPYSIGN. Operand 0 ought to be handled by
@ -1941,8 +1942,12 @@ void DAGTypeLegalizer::PromoteFloatResult(SDNode *N, unsigned ResNo) {
SDValue DAGTypeLegalizer::PromoteFloatRes_BITCAST(SDNode *N) {
EVT VT = N->getValueType(0);
EVT NVT = TLI.getTypeToTransformTo(*DAG.getContext(), VT);
return DAG.getNode(GetPromotionOpcode(VT, NVT), SDLoc(N), NVT,
N->getOperand(0));
// Input type isn't guaranteed to be a scalar int so bitcast if not. The
// bitcast will be legalized further if necessary.
EVT IVT = EVT::getIntegerVT(*DAG.getContext(),
N->getOperand(0).getValueType().getSizeInBits());
SDValue Cast = DAG.getBitcast(IVT, N->getOperand(0));
return DAG.getNode(GetPromotionOpcode(VT, NVT), SDLoc(N), NVT, Cast);
}
SDValue DAGTypeLegalizer::PromoteFloatRes_ConstantFP(SDNode *N) {

View File

@ -269,8 +269,8 @@ SDValue DAGTypeLegalizer::PromoteIntRes_BITCAST(SDNode *N) {
return DAG.getNode(ISD::ANY_EXTEND, dl, NOutVT, GetSoftenedFloat(InOp));
case TargetLowering::TypePromoteFloat: {
// Convert the promoted float by hand.
SDValue PromotedOp = GetPromotedFloat(InOp);
return DAG.getNode(ISD::FP_TO_FP16, dl, NOutVT, PromotedOp);
if (!NOutVT.isVector())
return DAG.getNode(ISD::FP_TO_FP16, dl, NOutVT, GetPromotedFloat(InOp));
break;
}
case TargetLowering::TypeExpandInteger:

View File

@ -2374,7 +2374,7 @@ void SelectionDAG::computeKnownBits(SDValue Op, KnownBits &Known,
if (SubIdx && SubIdx->getAPIntValue().ule(NumSrcElts - NumElts)) {
// Offset the demanded elts by the subvector index.
uint64_t Idx = SubIdx->getZExtValue();
APInt DemandedSrc = DemandedElts.zext(NumSrcElts).shl(Idx);
APInt DemandedSrc = DemandedElts.zextOrSelf(NumSrcElts).shl(Idx);
computeKnownBits(Src, Known, DemandedSrc, Depth + 1);
} else {
computeKnownBits(Src, Known, Depth + 1);
@ -3533,7 +3533,7 @@ unsigned SelectionDAG::ComputeNumSignBits(SDValue Op, const APInt &DemandedElts,
if (SubIdx && SubIdx->getAPIntValue().ule(NumSrcElts - NumElts)) {
// Offset the demanded elts by the subvector index.
uint64_t Idx = SubIdx->getZExtValue();
APInt DemandedSrc = DemandedElts.zext(NumSrcElts).shl(Idx);
APInt DemandedSrc = DemandedElts.zextOrSelf(NumSrcElts).shl(Idx);
return ComputeNumSignBits(Src, DemandedSrc, Depth + 1);
}
return ComputeNumSignBits(Src, Depth + 1);

View File

@ -7768,10 +7768,29 @@ void SelectionDAGBuilder::visitInlineAsm(ImmutableCallSite CS) {
SDValue Val = RetValRegs.getCopyFromRegs(DAG, FuncInfo, getCurSDLoc(),
Chain, &Flag, CS.getInstruction());
// FIXME: Why don't we do this for inline asms with MRVs?
if (CS.getType()->isSingleValueType() && CS.getType()->isSized()) {
EVT ResultType = TLI.getValueType(DAG.getDataLayout(), CS.getType());
llvm::Type *CSResultType = CS.getType();
unsigned numRet;
ArrayRef<Type *> ResultTypes;
SmallVector<SDValue, 1> ResultValues(1);
if (CSResultType->isSingleValueType()) {
numRet = 1;
ResultValues[0] = Val;
ResultTypes = makeArrayRef(CSResultType);
} else {
numRet = CSResultType->getNumContainedTypes();
assert(Val->getNumOperands() == numRet &&
"Mismatch in number of output operands in asm result");
ResultTypes = CSResultType->subtypes();
ArrayRef<SDUse> ValueUses = Val->ops();
ResultValues.resize(numRet);
std::transform(ValueUses.begin(), ValueUses.end(), ResultValues.begin(),
[](const SDUse &u) -> SDValue { return u.get(); });
}
SmallVector<EVT, 1> ResultVTs(numRet);
for (unsigned i = 0; i < numRet; i++) {
EVT ResultVT = TLI.getValueType(DAG.getDataLayout(), ResultTypes[i]);
SDValue Val = ResultValues[i];
assert(ResultTypes[i]->isSized() && "Unexpected unsized type");
// If the type of the inline asm call site return value is different but
// has same size as the type of the asm output bitcast it. One example
// of this is for vectors with different width / number of elements.
@ -7782,22 +7801,24 @@ void SelectionDAGBuilder::visitInlineAsm(ImmutableCallSite CS) {
// This can also happen for a return value that disagrees with the
// register class it is put in, eg. a double in a general-purpose
// register on a 32-bit machine.
if (ResultType != Val.getValueType() &&
ResultType.getSizeInBits() == Val.getValueSizeInBits()) {
Val = DAG.getNode(ISD::BITCAST, getCurSDLoc(),
ResultType, Val);
} else if (ResultType != Val.getValueType() &&
ResultType.isInteger() && Val.getValueType().isInteger()) {
// If a result value was tied to an input value, the computed result may
// have a wider width than the expected result. Extract the relevant
// portion.
Val = DAG.getNode(ISD::TRUNCATE, getCurSDLoc(), ResultType, Val);
if (ResultVT != Val.getValueType() &&
ResultVT.getSizeInBits() == Val.getValueSizeInBits())
Val = DAG.getNode(ISD::BITCAST, getCurSDLoc(), ResultVT, Val);
else if (ResultVT != Val.getValueType() && ResultVT.isInteger() &&
Val.getValueType().isInteger()) {
// If a result value was tied to an input value, the computed result
// may have a wider width than the expected result. Extract the
// relevant portion.
Val = DAG.getNode(ISD::TRUNCATE, getCurSDLoc(), ResultVT, Val);
}
assert(ResultType == Val.getValueType() && "Asm result value mismatch!");
assert(ResultVT == Val.getValueType() && "Asm result value mismatch!");
ResultVTs[i] = ResultVT;
ResultValues[i] = Val;
}
Val = DAG.getNode(ISD::MERGE_VALUES, getCurSDLoc(),
DAG.getVTList(ResultVTs), ResultValues);
setValue(CS.getInstruction(), Val);
// Don't need to use this as a chain in this case.
if (!IA->hasSideEffects() && !hasMemory && IndirectStoresToEmit.empty())

View File

@ -12007,10 +12007,15 @@ static SDValue combineBVOfVecSExt(SDNode *N, SelectionDAG &DAG) {
auto isSExtOfVecExtract = [&](SDValue Op) -> bool {
if (!Op)
return false;
if (Op.getOpcode() != ISD::SIGN_EXTEND)
if (Op.getOpcode() != ISD::SIGN_EXTEND &&
Op.getOpcode() != ISD::SIGN_EXTEND_INREG)
return false;
// A SIGN_EXTEND_INREG might be fed by an ANY_EXTEND to produce a value
// of the right width.
SDValue Extract = Op.getOperand(0);
if (Extract.getOpcode() == ISD::ANY_EXTEND)
Extract = Extract.getOperand(0);
if (Extract.getOpcode() != ISD::EXTRACT_VECTOR_ELT)
return false;
@ -12098,8 +12103,10 @@ SDValue PPCTargetLowering::DAGCombineBuildVector(SDNode *N,
return Reduced;
// If we're building a vector out of extended elements from another vector
// we have P9 vector integer extend instructions.
if (Subtarget.hasP9Altivec()) {
// we have P9 vector integer extend instructions. The code assumes legal
// input types (i.e. it can't handle things like v4i16) so do not run before
// legalization.
if (Subtarget.hasP9Altivec() && !DCI.isBeforeLegalize()) {
Reduced = combineBVOfVecSExt(N, DAG);
if (Reduced)
return Reduced;

View File

@ -97,6 +97,7 @@ class X86FlagsCopyLoweringPass : public MachineFunctionPass {
private:
MachineRegisterInfo *MRI;
const X86Subtarget *Subtarget;
const X86InstrInfo *TII;
const TargetRegisterInfo *TRI;
const TargetRegisterClass *PromoteRC;
@ -346,10 +347,10 @@ bool X86FlagsCopyLoweringPass::runOnMachineFunction(MachineFunction &MF) {
LLVM_DEBUG(dbgs() << "********** " << getPassName() << " : " << MF.getName()
<< " **********\n");
auto &Subtarget = MF.getSubtarget<X86Subtarget>();
Subtarget = &MF.getSubtarget<X86Subtarget>();
MRI = &MF.getRegInfo();
TII = Subtarget.getInstrInfo();
TRI = Subtarget.getRegisterInfo();
TII = Subtarget->getInstrInfo();
TRI = Subtarget->getRegisterInfo();
MDT = &getAnalysis<MachineDominatorTree>();
PromoteRC = &X86::GR8RegClass;
@ -960,10 +961,14 @@ void X86FlagsCopyLoweringPass::rewriteSetCarryExtended(
.addReg(Reg)
.addImm(SubRegIdx[OrigRegSize]);
} else if (OrigRegSize > TargetRegSize) {
BuildMI(MBB, SetPos, SetLoc, TII->get(TargetOpcode::EXTRACT_SUBREG),
if (TargetRegSize == 1 && !Subtarget->is64Bit()) {
// Need to constrain the register class.
MRI->constrainRegClass(Reg, &X86::GR32_ABCDRegClass);
}
BuildMI(MBB, SetPos, SetLoc, TII->get(TargetOpcode::COPY),
NewReg)
.addReg(Reg)
.addImm(SubRegIdx[TargetRegSize]);
.addReg(Reg, 0, SubRegIdx[TargetRegSize]);
} else {
BuildMI(MBB, SetPos, SetLoc, TII->get(TargetOpcode::COPY), NewReg)
.addReg(Reg);

View File

@ -3109,14 +3109,8 @@ Value *BoUpSLP::vectorizeTree(TreeEntry *E) {
}
if (NeedToShuffleReuses) {
// TODO: Merge this shuffle with the ReorderShuffleMask.
if (!E->ReorderIndices.empty())
if (E->ReorderIndices.empty())
Builder.SetInsertPoint(VL0);
else if (auto *I = dyn_cast<Instruction>(V))
Builder.SetInsertPoint(I->getParent(),
std::next(I->getIterator()));
else
Builder.SetInsertPoint(&F->getEntryBlock(),
F->getEntryBlock().getFirstInsertionPt());
V = Builder.CreateShuffleVector(V, UndefValue::get(VecTy),
E->ReuseShuffleIndices, "shuffle");
}

View File

@ -2718,7 +2718,7 @@ class ASTContext : public RefCountedBase<ASTContext> {
/// predicate.
void forEachMultiversionedFunctionVersion(
const FunctionDecl *FD,
llvm::function_ref<void(const FunctionDecl *)> Pred) const;
llvm::function_ref<void(FunctionDecl *)> Pred) const;
const CXXConstructorDecl *
getCopyConstructorForExceptionObject(CXXRecordDecl *RD);

View File

@ -922,8 +922,11 @@ class OMPScheduleClause : public OMPClause, public OMPClauseWithPreInit {
/// \endcode
/// In this example directive '#pragma omp for' has 'ordered' clause with
/// parameter 2.
class OMPOrderedClause : public OMPClause {
class OMPOrderedClause final
: public OMPClause,
private llvm::TrailingObjects<OMPOrderedClause, Expr *> {
friend class OMPClauseReader;
friend TrailingObjects;
/// Location of '('.
SourceLocation LParenLoc;
@ -931,6 +934,26 @@ class OMPOrderedClause : public OMPClause {
/// Number of for-loops.
Stmt *NumForLoops = nullptr;
/// Real number of loops.
unsigned NumberOfLoops = 0;
/// Build 'ordered' clause.
///
/// \param Num Expression, possibly associated with this clause.
/// \param NumLoops Number of loops, associated with this clause.
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
OMPOrderedClause(Expr *Num, unsigned NumLoops, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation EndLoc)
: OMPClause(OMPC_ordered, StartLoc, EndLoc), LParenLoc(LParenLoc),
NumForLoops(Num), NumberOfLoops(NumLoops) {}
/// Build an empty clause.
explicit OMPOrderedClause(unsigned NumLoops)
: OMPClause(OMPC_ordered, SourceLocation(), SourceLocation()),
NumberOfLoops(NumLoops) {}
/// Set the number of associated for-loops.
void setNumForLoops(Expr *Num) { NumForLoops = Num; }
@ -938,17 +961,17 @@ class OMPOrderedClause : public OMPClause {
/// Build 'ordered' clause.
///
/// \param Num Expression, possibly associated with this clause.
/// \param NumLoops Number of loops, associated with this clause.
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
OMPOrderedClause(Expr *Num, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation EndLoc)
: OMPClause(OMPC_ordered, StartLoc, EndLoc), LParenLoc(LParenLoc),
NumForLoops(Num) {}
static OMPOrderedClause *Create(const ASTContext &C, Expr *Num,
unsigned NumLoops, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Build an empty clause.
explicit OMPOrderedClause()
: OMPClause(OMPC_ordered, SourceLocation(), SourceLocation()) {}
static OMPOrderedClause* CreateEmpty(const ASTContext &C, unsigned NumLoops);
/// Sets the location of '('.
void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; }
@ -959,6 +982,17 @@ class OMPOrderedClause : public OMPClause {
/// Return the number of associated for-loops.
Expr *getNumForLoops() const { return cast_or_null<Expr>(NumForLoops); }
/// Set number of iterations for the specified loop.
void setLoopNumIterations(unsigned NumLoop, Expr *NumIterations);
/// Get number of iterations for all the loops.
ArrayRef<Expr *> getLoopNumIterations() const;
/// Set loop counter for the specified loop.
void setLoopCounter(unsigned NumLoop, Expr *Counter);
/// Get loops counter for the specified loop.
Expr *getLoopCunter(unsigned NumLoop);
const Expr *getLoopCunter(unsigned NumLoop) const;
child_range children() { return child_range(&NumForLoops, &NumForLoops + 1); }
static bool classof(const OMPClause *T) {
@ -3087,24 +3121,32 @@ class OMPDependClause final
/// Colon location.
SourceLocation ColonLoc;
/// Number of loops, associated with the depend clause.
unsigned NumLoops = 0;
/// Build clause with number of variables \a N.
///
/// \param StartLoc Starting location of the clause.
/// \param LParenLoc Location of '('.
/// \param EndLoc Ending location of the clause.
/// \param N Number of the variables in the clause.
/// \param NumLoops Number of loops that is associated with this depend
/// clause.
OMPDependClause(SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc, unsigned N)
SourceLocation EndLoc, unsigned N, unsigned NumLoops)
: OMPVarListClause<OMPDependClause>(OMPC_depend, StartLoc, LParenLoc,
EndLoc, N) {}
EndLoc, N), NumLoops(NumLoops) {}
/// Build an empty clause.
///
/// \param N Number of variables.
explicit OMPDependClause(unsigned N)
/// \param NumLoops Number of loops that is associated with this depend
/// clause.
explicit OMPDependClause(unsigned N, unsigned NumLoops)
: OMPVarListClause<OMPDependClause>(OMPC_depend, SourceLocation(),
SourceLocation(), SourceLocation(),
N) {}
N),
NumLoops(NumLoops) {}
/// Set dependency kind.
void setDependencyKind(OpenMPDependClauseKind K) { DepKind = K; }
@ -3126,16 +3168,23 @@ class OMPDependClause final
/// \param DepLoc Location of the dependency type.
/// \param ColonLoc Colon location.
/// \param VL List of references to the variables.
static OMPDependClause *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc, OpenMPDependClauseKind DepKind,
SourceLocation DepLoc, SourceLocation ColonLoc, ArrayRef<Expr *> VL);
/// \param NumLoops Number of loops that is associated with this depend
/// clause.
static OMPDependClause *Create(const ASTContext &C, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc,
OpenMPDependClauseKind DepKind,
SourceLocation DepLoc, SourceLocation ColonLoc,
ArrayRef<Expr *> VL, unsigned NumLoops);
/// Creates an empty clause with \a N variables.
///
/// \param C AST context.
/// \param N The number of variables.
static OMPDependClause *CreateEmpty(const ASTContext &C, unsigned N);
/// \param NumLoops Number of loops that is associated with this depend
/// clause.
static OMPDependClause *CreateEmpty(const ASTContext &C, unsigned N,
unsigned NumLoops);
/// Get dependency type.
OpenMPDependClauseKind getDependencyKind() const { return DepKind; }
@ -3146,15 +3195,16 @@ class OMPDependClause final
/// Get colon location.
SourceLocation getColonLoc() const { return ColonLoc; }
/// Set the loop counter value for the depend clauses with 'sink|source' kind
/// of dependency. Required for codegen.
void setCounterValue(Expr *V);
/// Get number of loops associated with the clause.
unsigned getNumLoops() const { return NumLoops; }
/// Get the loop counter value.
Expr *getCounterValue();
/// Set the loop data for the depend clauses with 'sink|source' kind of
/// dependency.
void setLoopData(unsigned NumLoop, Expr *Cnt);
/// Get the loop counter value.
const Expr *getCounterValue() const;
/// Get the loop data.
Expr *getLoopData(unsigned NumLoop);
const Expr *getLoopData(unsigned NumLoop) const;
child_range children() {
return child_range(reinterpret_cast<Stmt **>(varlist_begin()),

View File

@ -2938,9 +2938,10 @@ def OMPDeclareSimdDecl : Attr {
}];
}
def OMPDeclareTargetDecl : Attr {
def OMPDeclareTargetDecl : InheritableAttr {
let Spellings = [Pragma<"omp", "declare target">];
let SemaHandler = 0;
let Subjects = SubjectList<[Function, SharedVar]>;
let Documentation = [OMPDeclareTargetDocs];
let Args = [
EnumArgument<"MapType", "MapTypeTy",
@ -2953,6 +2954,15 @@ def OMPDeclareTargetDecl : Attr {
if (getMapType() != MT_To)
OS << ' ' << ConvertMapTypeTyToStr(getMapType());
}
static llvm::Optional<MapTypeTy>
isDeclareTargetDeclaration(const ValueDecl *VD) {
if (!VD->hasAttrs())
return llvm::None;
if (const auto *Attr = VD->getAttr<OMPDeclareTargetDeclAttr>())
return Attr->getMapType();
return llvm::None;
}
}];
}

View File

@ -364,7 +364,6 @@ def NonVirtualDtor : DiagGroup<"non-virtual-dtor">;
def NullPointerArithmetic : DiagGroup<"null-pointer-arithmetic">;
def : DiagGroup<"effc++", [NonVirtualDtor]>;
def OveralignedType : DiagGroup<"over-aligned">;
def AlignedAllocationUnavailable : DiagGroup<"aligned-allocation-unavailable">;
def OldStyleCast : DiagGroup<"old-style-cast">;
def : DiagGroup<"old-style-definition">;
def OutOfLineDeclaration : DiagGroup<"out-of-line-declaration">;

View File

@ -6465,12 +6465,12 @@ def warn_overaligned_type : Warning<
"type %0 requires %1 bytes of alignment and the default allocator only "
"guarantees %2 bytes">,
InGroup<OveralignedType>, DefaultIgnore;
def warn_aligned_allocation_unavailable :Warning<
def err_aligned_allocation_unavailable : Error<
"aligned %select{allocation|deallocation}0 function of type '%1' is only "
"available on %2 %3 or newer">, InGroup<AlignedAllocationUnavailable>, DefaultError;
"available on %2 %3 or newer">;
def note_silence_unligned_allocation_unavailable : Note<
"if you supply your own aligned allocation functions, use "
"-Wno-aligned-allocation-unavailable to silence this diagnostic">;
"-faligned-allocation to silence this diagnostic">;
def err_conditional_void_nonvoid : Error<
"%select{left|right}1 operand to ? is void, but %select{right|left}1 operand "

View File

@ -1488,7 +1488,7 @@ def fobjc_weak : Flag<["-"], "fobjc-weak">, Group<f_Group>, Flags<[CC1Option]>,
HelpText<"Enable ARC-style weak references in Objective-C">;
// Objective-C ABI options.
def fobjc_runtime_EQ : Joined<["-"], "fobjc-runtime=">, Group<f_Group>, Flags<[CC1Option]>,
def fobjc_runtime_EQ : Joined<["-"], "fobjc-runtime=">, Group<f_Group>, Flags<[CC1Option, CoreOption]>,
HelpText<"Specify the target Objective-C runtime kind and version">;
def fobjc_abi_version_EQ : Joined<["-"], "fobjc-abi-version=">, Group<f_Group>;
def fobjc_nonfragile_abi_version_EQ : Joined<["-"], "fobjc-nonfragile-abi-version=">, Group<f_Group>;

View File

@ -9798,20 +9798,16 @@ bool ASTContext::DeclMustBeEmitted(const Decl *D) {
return true;
// If the decl is marked as `declare target`, it should be emitted.
for (const auto *Decl : D->redecls()) {
if (!Decl->hasAttrs())
continue;
if (const auto *Attr = Decl->getAttr<OMPDeclareTargetDeclAttr>())
if (Attr->getMapType() != OMPDeclareTargetDeclAttr::MT_Link)
return true;
}
if (const llvm::Optional<OMPDeclareTargetDeclAttr::MapTypeTy> Res =
OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD))
return *Res != OMPDeclareTargetDeclAttr::MT_Link;
return false;
}
void ASTContext::forEachMultiversionedFunctionVersion(
const FunctionDecl *FD,
llvm::function_ref<void(const FunctionDecl *)> Pred) const {
llvm::function_ref<void(FunctionDecl *)> Pred) const {
assert(FD->isMultiVersion() && "Only valid for multiversioned functions");
llvm::SmallDenseSet<const FunctionDecl*, 4> SeenDecls;
FD = FD->getCanonicalDecl();

View File

@ -1091,6 +1091,10 @@ void DeclPrinter::VisitFunctionTemplateDecl(FunctionTemplateDecl *D) {
printTemplateParameters(FD->getTemplateParameterList(I));
}
VisitRedeclarableTemplateDecl(D);
// Declare target attribute is special one, natural spelling for the pragma
// assumes "ending" construct so print it here.
if (D->getTemplatedDecl()->hasAttr<OMPDeclareTargetDeclAttr>())
Out << "#pragma omp end declare target\n";
// Never print "instantiations" for deduction guides (they don't really
// have them).

View File

@ -445,7 +445,7 @@ void MicrosoftCXXNameMangler::mangle(const NamedDecl *D, StringRef Prefix) {
mangleFunctionEncoding(FD, Context.shouldMangleDeclName(FD));
else if (const VarDecl *VD = dyn_cast<VarDecl>(D))
mangleVariableEncoding(VD);
else
else if (!isa<ObjCInterfaceDecl>(D))
llvm_unreachable("Tried to mangle unexpected NamedDecl!");
}
@ -1884,13 +1884,13 @@ void MicrosoftCXXNameMangler::mangleType(const BuiltinType *T, Qualifiers,
llvm_unreachable("placeholder types shouldn't get to name mangling");
case BuiltinType::ObjCId:
mangleArtificalTagType(TTK_Struct, "objc_object");
mangleArtificalTagType(TTK_Struct, ".objc_object");
break;
case BuiltinType::ObjCClass:
mangleArtificalTagType(TTK_Struct, "objc_class");
mangleArtificalTagType(TTK_Struct, ".objc_class");
break;
case BuiltinType::ObjCSel:
mangleArtificalTagType(TTK_Struct, "objc_selector");
mangleArtificalTagType(TTK_Struct, ".objc_selector");
break;
#define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \
@ -2570,9 +2570,10 @@ void MicrosoftCXXNameMangler::mangleType(const DependentAddressSpaceType *T,
void MicrosoftCXXNameMangler::mangleType(const ObjCInterfaceType *T, Qualifiers,
SourceRange) {
// ObjC interfaces have structs underlying them.
// ObjC interfaces are mangled as if they were structs with a name that is
// not a valid C/C++ identifier
mangleTagTypeKind(TTK_Struct);
mangleName(T->getDecl());
mangle(T->getDecl(), ".objc_cls_");
}
void MicrosoftCXXNameMangler::mangleType(const ObjCObjectType *T, Qualifiers,
@ -2590,11 +2591,11 @@ void MicrosoftCXXNameMangler::mangleType(const ObjCObjectType *T, Qualifiers,
Out << "?$";
if (T->isObjCId())
mangleSourceName("objc_object");
mangleSourceName(".objc_object");
else if (T->isObjCClass())
mangleSourceName("objc_class");
mangleSourceName(".objc_class");
else
mangleSourceName(T->getInterface()->getName());
mangleSourceName((".objc_cls_" + T->getInterface()->getName()).str());
for (const auto &Q : T->quals())
mangleObjCProtocol(Q);

View File

@ -181,6 +181,57 @@ const OMPClauseWithPostUpdate *OMPClauseWithPostUpdate::get(const OMPClause *C)
return nullptr;
}
OMPOrderedClause *OMPOrderedClause::Create(const ASTContext &C, Expr *Num,
unsigned NumLoops,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc) {
void *Mem = C.Allocate(totalSizeToAlloc<Expr *>(2 * NumLoops));
auto *Clause =
new (Mem) OMPOrderedClause(Num, NumLoops, StartLoc, LParenLoc, EndLoc);
for (unsigned I = 0; I < NumLoops; ++I) {
Clause->setLoopNumIterations(I, nullptr);
Clause->setLoopCounter(I, nullptr);
}
return Clause;
}
OMPOrderedClause *OMPOrderedClause::CreateEmpty(const ASTContext &C,
unsigned NumLoops) {
void *Mem = C.Allocate(totalSizeToAlloc<Expr *>(2 * NumLoops));
auto *Clause = new (Mem) OMPOrderedClause(NumLoops);
for (unsigned I = 0; I < NumLoops; ++I) {
Clause->setLoopNumIterations(I, nullptr);
Clause->setLoopCounter(I, nullptr);
}
return Clause;
}
void OMPOrderedClause::setLoopNumIterations(unsigned NumLoop,
Expr *NumIterations) {
assert(NumLoop < NumberOfLoops && "out of loops number.");
getTrailingObjects<Expr *>()[NumLoop] = NumIterations;
}
ArrayRef<Expr *> OMPOrderedClause::getLoopNumIterations() const {
return llvm::makeArrayRef(getTrailingObjects<Expr *>(), NumberOfLoops);
}
void OMPOrderedClause::setLoopCounter(unsigned NumLoop, Expr *Counter) {
assert(NumLoop < NumberOfLoops && "out of loops number.");
getTrailingObjects<Expr *>()[NumberOfLoops + NumLoop] = Counter;
}
Expr *OMPOrderedClause::getLoopCunter(unsigned NumLoop) {
assert(NumLoop < NumberOfLoops && "out of loops number.");
return getTrailingObjects<Expr *>()[NumberOfLoops + NumLoop];
}
const Expr *OMPOrderedClause::getLoopCunter(unsigned NumLoop) const {
assert(NumLoop < NumberOfLoops && "out of loops number.");
return getTrailingObjects<Expr *>()[NumberOfLoops + NumLoop];
}
void OMPPrivateClause::setPrivateCopies(ArrayRef<Expr *> VL) {
assert(VL.size() == varlist_size() &&
"Number of private copies is not the same as the preallocated buffer");
@ -653,44 +704,58 @@ OMPFlushClause *OMPFlushClause::CreateEmpty(const ASTContext &C, unsigned N) {
return new (Mem) OMPFlushClause(N);
}
OMPDependClause *OMPDependClause::Create(
const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc, OpenMPDependClauseKind DepKind,
SourceLocation DepLoc, SourceLocation ColonLoc, ArrayRef<Expr *> VL) {
void *Mem = C.Allocate(totalSizeToAlloc<Expr *>(VL.size() + 1));
OMPDependClause *Clause =
new (Mem) OMPDependClause(StartLoc, LParenLoc, EndLoc, VL.size());
OMPDependClause *
OMPDependClause::Create(const ASTContext &C, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation EndLoc,
OpenMPDependClauseKind DepKind, SourceLocation DepLoc,
SourceLocation ColonLoc, ArrayRef<Expr *> VL,
unsigned NumLoops) {
void *Mem = C.Allocate(totalSizeToAlloc<Expr *>(VL.size() + NumLoops));
OMPDependClause *Clause = new (Mem)
OMPDependClause(StartLoc, LParenLoc, EndLoc, VL.size(), NumLoops);
Clause->setVarRefs(VL);
Clause->setDependencyKind(DepKind);
Clause->setDependencyLoc(DepLoc);
Clause->setColonLoc(ColonLoc);
Clause->setCounterValue(nullptr);
for (unsigned I = 0 ; I < NumLoops; ++I)
Clause->setLoopData(I, nullptr);
return Clause;
}
OMPDependClause *OMPDependClause::CreateEmpty(const ASTContext &C, unsigned N) {
void *Mem = C.Allocate(totalSizeToAlloc<Expr *>(N + 1));
return new (Mem) OMPDependClause(N);
OMPDependClause *OMPDependClause::CreateEmpty(const ASTContext &C, unsigned N,
unsigned NumLoops) {
void *Mem = C.Allocate(totalSizeToAlloc<Expr *>(N + NumLoops));
return new (Mem) OMPDependClause(N, NumLoops);
}
void OMPDependClause::setCounterValue(Expr *V) {
assert(getDependencyKind() == OMPC_DEPEND_sink ||
getDependencyKind() == OMPC_DEPEND_source || V == nullptr);
*getVarRefs().end() = V;
void OMPDependClause::setLoopData(unsigned NumLoop, Expr *Cnt) {
assert((getDependencyKind() == OMPC_DEPEND_sink ||
getDependencyKind() == OMPC_DEPEND_source) &&
NumLoop < NumLoops &&
"Expected sink or source depend + loop index must be less number of "
"loops.");
auto It = std::next(getVarRefs().end(), NumLoop);
*It = Cnt;
}
const Expr *OMPDependClause::getCounterValue() const {
auto *V = *getVarRefs().end();
assert(getDependencyKind() == OMPC_DEPEND_sink ||
getDependencyKind() == OMPC_DEPEND_source || V == nullptr);
return V;
Expr *OMPDependClause::getLoopData(unsigned NumLoop) {
assert((getDependencyKind() == OMPC_DEPEND_sink ||
getDependencyKind() == OMPC_DEPEND_source) &&
NumLoop < NumLoops &&
"Expected sink or source depend + loop index must be less number of "
"loops.");
auto It = std::next(getVarRefs().end(), NumLoop);
return *It;
}
Expr *OMPDependClause::getCounterValue() {
auto *V = *getVarRefs().end();
assert(getDependencyKind() == OMPC_DEPEND_sink ||
getDependencyKind() == OMPC_DEPEND_source || V == nullptr);
return V;
const Expr *OMPDependClause::getLoopData(unsigned NumLoop) const {
assert((getDependencyKind() == OMPC_DEPEND_sink ||
getDependencyKind() == OMPC_DEPEND_source) &&
NumLoop < NumLoops &&
"Expected sink or source depend + loop index must be less number of "
"loops.");
auto It = std::next(getVarRefs().end(), NumLoop);
return *It;
}
unsigned OMPClauseMappableExprCommon::getComponentsTotalNumber(

View File

@ -1829,7 +1829,7 @@ void CodeGenFunction::startOutlinedSEHHelper(CodeGenFunction &ParentCGF,
SmallString<128> Name;
{
llvm::raw_svector_ostream OS(Name);
const FunctionDecl *ParentSEHFn = ParentCGF.CurSEHParent;
const NamedDecl *ParentSEHFn = ParentCGF.CurSEHParent;
assert(ParentSEHFn && "No CurSEHParent!");
MangleContext &Mangler = CGM.getCXXABI().getMangleContext();
if (IsFilter)
@ -1972,6 +1972,11 @@ llvm::Value *CodeGenFunction::EmitSEHAbnormalTermination() {
return Builder.CreateZExt(&*AI, Int32Ty);
}
void CodeGenFunction::pushSEHCleanup(CleanupKind Kind,
llvm::Function *FinallyFunc) {
EHStack.pushCleanup<PerformSEHFinally>(Kind, FinallyFunc);
}
void CodeGenFunction::EnterSEHTryStmt(const SEHTryStmt &S) {
CodeGenFunction HelperCGF(CGM, /*suppressNewContext=*/true);
if (const SEHFinallyStmt *Finally = S.getFinallyHandler()) {

View File

@ -18,6 +18,7 @@
#include "CGCleanup.h"
#include "CodeGenFunction.h"
#include "CodeGenModule.h"
#include "CGCXXABI.h"
#include "clang/CodeGen/ConstantInitBuilder.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/Decl.h"
@ -178,6 +179,9 @@ class CGObjCGNU : public CGObjCRuntime {
/// runtime provides some LLVM passes that can use this to do things like
/// automatic IMP caching and speculative inlining.
unsigned msgSendMDKind;
/// Does the current target use SEH-based exceptions? False implies
/// Itanium-style DWARF unwinding.
bool usesSEHExceptions;
/// Helper to check if we are targeting a specific runtime version or later.
bool isRuntime(ObjCRuntime::Kind kind, unsigned major, unsigned minor=0) {
@ -217,6 +221,7 @@ class CGObjCGNU : public CGObjCRuntime {
llvm::Constant *value = llvm::ConstantDataArray::getString(VMContext,Str);
auto *GV = new llvm::GlobalVariable(TheModule, value->getType(), true,
llvm::GlobalValue::LinkOnceODRLinkage, value, name);
GV->setComdat(TheModule.getOrInsertComdat(name));
if (Private)
GV->setVisibility(llvm::GlobalValue::HiddenVisibility);
ConstStr = GV;
@ -510,8 +515,8 @@ class CGObjCGNU : public CGObjCRuntime {
/// Returns a selector with the specified type encoding. An empty string is
/// used to return an untyped selector (with the types field set to NULL).
virtual llvm::Value *GetSelector(CodeGenFunction &CGF, Selector Sel,
const std::string &TypeEncoding);
virtual llvm::Value *GetTypedSelector(CodeGenFunction &CGF, Selector Sel,
const std::string &TypeEncoding);
/// Returns the name of ivar offset variables. In the GNUstep v1 ABI, this
/// contains the class and ivar names, in the v2 ABI this contains the type
@ -810,8 +815,12 @@ class CGObjCGNUstep : public CGObjCGNU {
// Slot_t objc_slot_lookup_super(struct objc_super*, SEL);
SlotLookupSuperFn.init(&CGM, "objc_slot_lookup_super", SlotTy,
PtrToObjCSuperTy, SelectorTy);
// If we're in ObjC++ mode, then we want to make
if (CGM.getLangOpts().CPlusPlus) {
// If we're in ObjC++ mode, then we want to make
if (usesSEHExceptions) {
llvm::Type *VoidTy = llvm::Type::getVoidTy(VMContext);
// void objc_exception_rethrow(void)
ExceptionReThrowFn.init(&CGM, "objc_exception_rethrow", VoidTy);
} else if (CGM.getLangOpts().CPlusPlus) {
llvm::Type *VoidTy = llvm::Type::getVoidTy(VMContext);
// void *__cxa_begin_catch(void *e)
EnterCatchFn.init(&CGM, "__cxa_begin_catch", PtrTy, PtrTy);
@ -888,22 +897,25 @@ class CGObjCGNUstep : public CGObjCGNU {
/// This is the ABI that provides a clean break with the legacy GCC ABI and
/// cleans up a number of things that were added to work around 1980s linkers.
class CGObjCGNUstep2 : public CGObjCGNUstep {
/// The section for selectors.
static constexpr const char *const SelSection = "__objc_selectors";
/// The section for classes.
static constexpr const char *const ClsSection = "__objc_classes";
/// The section for references to classes.
static constexpr const char *const ClsRefSection = "__objc_class_refs";
/// The section for categories.
static constexpr const char *const CatSection = "__objc_cats";
/// The section for protocols.
static constexpr const char *const ProtocolSection = "__objc_protocols";
/// The section for protocol references.
static constexpr const char *const ProtocolRefSection = "__objc_protocol_refs";
/// The section for class aliases
static constexpr const char *const ClassAliasSection = "__objc_class_aliases";
/// The section for constexpr constant strings
static constexpr const char *const ConstantStringSection = "__objc_constant_string";
enum SectionKind
{
SelectorSection = 0,
ClassSection,
ClassReferenceSection,
CategorySection,
ProtocolSection,
ProtocolReferenceSection,
ClassAliasSection,
ConstantStringSection
};
static const char *const SectionsBaseNames[8];
template<SectionKind K>
std::string sectionName() {
std::string name(SectionsBaseNames[K]);
if (CGM.getTriple().isOSBinFormatCOFF())
name += "$m";
return name;
}
/// The GCC ABI superclass message lookup function. Takes a pointer to a
/// structure describing the receiver and the class, and a selector as
/// arguments. Returns the IMP for the corresponding method.
@ -1069,7 +1081,7 @@ class CGObjCGNUstep2 : public CGObjCGNUstep {
isNamed ? StringRef(StringName) : ".objc_string",
Align, false, isNamed ? llvm::GlobalValue::LinkOnceODRLinkage
: llvm::GlobalValue::PrivateLinkage);
ObjCStrGV->setSection(ConstantStringSection);
ObjCStrGV->setSection(sectionName<ConstantStringSection>());
if (isNamed) {
ObjCStrGV->setComdat(TheModule.getOrInsertComdat(StringName));
ObjCStrGV->setVisibility(llvm::GlobalValue::HiddenVisibility);
@ -1247,9 +1259,10 @@ class CGObjCGNUstep2 : public CGObjCGNUstep {
assert(!TheModule.getGlobalVariable(RefName));
// Emit a reference symbol.
auto GV = new llvm::GlobalVariable(TheModule, ProtocolPtrTy,
false, llvm::GlobalValue::ExternalLinkage,
false, llvm::GlobalValue::LinkOnceODRLinkage,
llvm::ConstantExpr::getBitCast(Protocol, ProtocolPtrTy), RefName);
GV->setSection(ProtocolRefSection);
GV->setComdat(TheModule.getOrInsertComdat(RefName));
GV->setSection(sectionName<ProtocolReferenceSection>());
GV->setAlignment(CGM.getPointerAlign().getQuantity());
Ref = GV;
}
@ -1282,9 +1295,22 @@ class CGObjCGNUstep2 : public CGObjCGNUstep {
EmittedProtocol = true;
auto SymName = SymbolForProtocol(ProtocolName);
auto *OldGV = TheModule.getGlobalVariable(SymName);
// Use the protocol definition, if there is one.
if (const ObjCProtocolDecl *Def = PD->getDefinition())
PD = Def;
else {
// If there is no definition, then create an external linkage symbol and
// hope that someone else fills it in for us (and fail to link if they
// don't).
assert(!OldGV);
Protocol = new llvm::GlobalVariable(TheModule, ProtocolTy,
/*isConstant*/false,
llvm::GlobalValue::ExternalLinkage, nullptr, SymName);
return Protocol;
}
SmallVector<llvm::Constant*, 16> Protocols;
for (const auto *PI : PD->protocols())
@ -1301,8 +1327,6 @@ class CGObjCGNUstep2 : public CGObjCGNUstep {
EmitProtocolMethodList(PD->class_methods(), ClassMethodList,
OptionalClassMethodList);
auto SymName = SymbolForProtocol(ProtocolName);
auto *OldGV = TheModule.getGlobalVariable(SymName);
// The isa pointer must be set to a magic number so the runtime knows it's
// the correct layout.
ConstantInitBuilder builder(CGM);
@ -1326,7 +1350,7 @@ class CGObjCGNUstep2 : public CGObjCGNUstep {
auto *GV = ProtocolBuilder.finishAndCreateGlobal(SymName,
CGM.getPointerAlign(), false, llvm::GlobalValue::ExternalLinkage);
GV->setSection(ProtocolSection);
GV->setSection(sectionName<ProtocolSection>());
GV->setComdat(TheModule.getOrInsertComdat(SymName));
if (OldGV) {
OldGV->replaceAllUsesWith(llvm::ConstantExpr::getBitCast(GV,
@ -1342,8 +1366,8 @@ class CGObjCGNUstep2 : public CGObjCGNUstep {
return Val;
return llvm::ConstantExpr::getBitCast(Val, Ty);
}
llvm::Value *GetSelector(CodeGenFunction &CGF, Selector Sel,
const std::string &TypeEncoding) override {
llvm::Value *GetTypedSelector(CodeGenFunction &CGF, Selector Sel,
const std::string &TypeEncoding) override {
return GetConstantSelector(Sel, TypeEncoding);
}
llvm::Constant *GetTypeString(llvm::StringRef TypeEncoding) {
@ -1359,6 +1383,7 @@ class CGObjCGNUstep2 : public CGObjCGNUstep {
TypeEncoding);
auto *GV = new llvm::GlobalVariable(TheModule, Init->getType(),
true, llvm::GlobalValue::LinkOnceODRLinkage, Init, TypesVarName);
GV->setComdat(TheModule.getOrInsertComdat(TypesVarName));
GV->setVisibility(llvm::GlobalValue::HiddenVisibility);
TypesGlobal = GV;
}
@ -1387,12 +1412,41 @@ class CGObjCGNUstep2 : public CGObjCGNUstep {
CGM.getPointerAlign(), false, llvm::GlobalValue::LinkOnceODRLinkage);
GV->setComdat(TheModule.getOrInsertComdat(SelVarName));
GV->setVisibility(llvm::GlobalValue::HiddenVisibility);
GV->setSection(SelSection);
GV->setSection(sectionName<SelectorSection>());
auto *SelVal = EnforceType(GV, SelectorTy);
return SelVal;
}
llvm::StructType *emptyStruct = nullptr;
/// Return pointers to the start and end of a section. On ELF platforms, we
/// use the __start_ and __stop_ symbols that GNU-compatible linkers will set
/// to the start and end of section names, as long as those section names are
/// valid identifiers and the symbols are referenced but not defined. On
/// Windows, we use the fact that MSVC-compatible linkers will lexically sort
/// by subsections and place everything that we want to reference in a middle
/// subsection and then insert zero-sized symbols in subsections a and z.
std::pair<llvm::Constant*,llvm::Constant*>
GetSectionBounds(StringRef Section) {
if (CGM.getTriple().isOSBinFormatCOFF()) {
if (emptyStruct == nullptr) {
emptyStruct = llvm::StructType::create(VMContext, ".objc_section_sentinel");
emptyStruct->setBody({}, /*isPacked*/true);
}
auto ZeroInit = llvm::Constant::getNullValue(emptyStruct);
auto Sym = [&](StringRef Prefix, StringRef SecSuffix) {
auto *Sym = new llvm::GlobalVariable(TheModule, emptyStruct,
/*isConstant*/false,
llvm::GlobalValue::LinkOnceODRLinkage, ZeroInit, Prefix +
Section);
Sym->setVisibility(llvm::GlobalValue::HiddenVisibility);
Sym->setSection((Section + SecSuffix).str());
Sym->setComdat(TheModule.getOrInsertComdat((Prefix +
Section).str()));
Sym->setAlignment(1);
return Sym;
};
return { Sym("__start_", "$a"), Sym("__stop", "$z") };
}
auto *Start = new llvm::GlobalVariable(TheModule, PtrTy,
/*isConstant*/false,
llvm::GlobalValue::ExternalLinkage, nullptr, StringRef("__start_") +
@ -1405,6 +1459,9 @@ class CGObjCGNUstep2 : public CGObjCGNUstep {
Stop->setVisibility(llvm::GlobalValue::HiddenVisibility);
return { Start, Stop };
}
CatchTypeInfo getCatchAllTypeInfo() override {
return CGM.getCXXABI().getCatchAllTypeInfo();
}
llvm::Function *ModuleInitFunction() override {
llvm::Function *LoadFunction = llvm::Function::Create(
llvm::FunctionType::get(llvm::Type::getVoidTy(VMContext), false),
@ -1420,19 +1477,11 @@ class CGObjCGNUstep2 : public CGObjCGNUstep {
ConstantInitBuilder builder(CGM);
auto InitStructBuilder = builder.beginStruct();
InitStructBuilder.addInt(Int64Ty, 0);
auto addSection = [&](const char *section) {
auto bounds = GetSectionBounds(section);
for (auto *s : SectionsBaseNames) {
auto bounds = GetSectionBounds(s);
InitStructBuilder.add(bounds.first);
InitStructBuilder.add(bounds.second);
};
addSection(SelSection);
addSection(ClsSection);
addSection(ClsRefSection);
addSection(CatSection);
addSection(ProtocolSection);
addSection(ProtocolRefSection);
addSection(ClassAliasSection);
addSection(ConstantStringSection);
auto *InitStruct = InitStructBuilder.finishAndCreateGlobal(".objc_init",
CGM.getPointerAlign(), false, llvm::GlobalValue::LinkOnceODRLinkage);
InitStruct->setVisibility(llvm::GlobalValue::HiddenVisibility);
@ -1451,18 +1500,23 @@ class CGObjCGNUstep2 : public CGObjCGNUstep {
// Check that this hasn't been renamed. This shouldn't happen, because
// this function should be called precisely once.
assert(InitVar->getName() == ".objc_ctor");
InitVar->setSection(".ctors");
// In Windows, initialisers are sorted by the suffix. XCL is for library
// initialisers, which run before user initialisers. We are running
// Objective-C loads at the end of library load. This means +load methods
// will run before any other static constructors, but that static
// constructors can see a fully initialised Objective-C state.
if (CGM.getTriple().isOSBinFormatCOFF())
InitVar->setSection(".CRT$XCLz");
else
InitVar->setSection(".ctors");
InitVar->setVisibility(llvm::GlobalValue::HiddenVisibility);
InitVar->setComdat(TheModule.getOrInsertComdat(".objc_ctor"));
CGM.addCompilerUsedGlobal(InitVar);
CGM.addUsedGlobal(InitVar);
for (auto *C : Categories) {
auto *Cat = cast<llvm::GlobalVariable>(C->stripPointerCasts());
Cat->setSection(CatSection);
Cat->setSection(sectionName<CategorySection>());
CGM.addUsedGlobal(Cat);
}
// Add a null value fore each special section so that we can always
// guarantee that the _start and _stop symbols will exist and be
// meaningful.
auto createNullGlobal = [&](StringRef Name, ArrayRef<llvm::Constant*> Init,
StringRef Section) {
auto nullBuilder = builder.beginStruct();
@ -1476,38 +1530,48 @@ class CGObjCGNUstep2 : public CGObjCGNUstep {
CGM.addUsedGlobal(GV);
return GV;
};
createNullGlobal(".objc_null_selector", {NULLPtr, NULLPtr}, SelSection);
if (Categories.empty())
createNullGlobal(".objc_null_category", {NULLPtr, NULLPtr,
NULLPtr, NULLPtr, NULLPtr, NULLPtr, NULLPtr}, CatSection);
if (!EmittedClass) {
createNullGlobal(".objc_null_cls_init_ref", NULLPtr, ClsSection);
createNullGlobal(".objc_null_class_ref", { NULLPtr, NULLPtr },
ClsRefSection);
}
if (!EmittedProtocol)
createNullGlobal(".objc_null_protocol", {NULLPtr, NULLPtr, NULLPtr,
NULLPtr, NULLPtr, NULLPtr, NULLPtr, NULLPtr, NULLPtr, NULLPtr,
NULLPtr}, ProtocolSection);
if (!EmittedProtocolRef)
createNullGlobal(".objc_null_protocol_ref", {NULLPtr}, ProtocolRefSection);
if (!ClassAliases.empty())
for (auto clsAlias : ClassAliases)
createNullGlobal(std::string(".objc_class_alias") +
clsAlias.second, { MakeConstantString(clsAlias.second),
GetClassVar(clsAlias.first) }, ClassAliasSection);
else
createNullGlobal(".objc_null_class_alias", { NULLPtr, NULLPtr },
ClassAliasSection);
if (ConstantStrings.empty()) {
auto i32Zero = llvm::ConstantInt::get(Int32Ty, 0);
createNullGlobal(".objc_null_constant_string", { NULLPtr, i32Zero,
i32Zero, i32Zero, i32Zero, NULLPtr }, ConstantStringSection);
for (auto clsAlias : ClassAliases)
createNullGlobal(std::string(".objc_class_alias") +
clsAlias.second, { MakeConstantString(clsAlias.second),
GetClassVar(clsAlias.first) }, sectionName<ClassAliasSection>());
// On ELF platforms, add a null value for each special section so that we
// can always guarantee that the _start and _stop symbols will exist and be
// meaningful. This is not required on COFF platforms, where our start and
// stop symbols will create the section.
if (!CGM.getTriple().isOSBinFormatCOFF()) {
createNullGlobal(".objc_null_selector", {NULLPtr, NULLPtr},
sectionName<SelectorSection>());
if (Categories.empty())
createNullGlobal(".objc_null_category", {NULLPtr, NULLPtr,
NULLPtr, NULLPtr, NULLPtr, NULLPtr, NULLPtr},
sectionName<CategorySection>());
if (!EmittedClass) {
createNullGlobal(".objc_null_cls_init_ref", NULLPtr,
sectionName<ClassReferenceSection>());
createNullGlobal(".objc_null_class_ref", { NULLPtr, NULLPtr },
sectionName<ClassReferenceSection>());
}
if (!EmittedProtocol)
createNullGlobal(".objc_null_protocol", {NULLPtr, NULLPtr, NULLPtr,
NULLPtr, NULLPtr, NULLPtr, NULLPtr, NULLPtr, NULLPtr, NULLPtr,
NULLPtr}, sectionName<ProtocolSection>());
if (!EmittedProtocolRef)
createNullGlobal(".objc_null_protocol_ref", {NULLPtr},
sectionName<ProtocolReferenceSection>());
if (ClassAliases.empty())
createNullGlobal(".objc_null_class_alias", { NULLPtr, NULLPtr },
sectionName<ClassAliasSection>());
if (ConstantStrings.empty()) {
auto i32Zero = llvm::ConstantInt::get(Int32Ty, 0);
createNullGlobal(".objc_null_constant_string", { NULLPtr, i32Zero,
i32Zero, i32Zero, i32Zero, NULLPtr },
sectionName<ConstantStringSection>());
}
}
ConstantStrings.clear();
Categories.clear();
Classes.clear();
return nullptr;//CGObjCGNU::ModuleInitFunction();
return nullptr;
}
/// In the v2 ABI, ivar offset variables use the type encoding in their name
/// to trigger linker failures if the types don't match.
@ -1774,7 +1838,7 @@ class CGObjCGNUstep2 : public CGObjCGNUstep {
}
auto *classRefSymbol = GetClassVar(className);
classRefSymbol->setSection(ClsRefSection);
classRefSymbol->setSection(sectionName<ClassReferenceSection>());
classRefSymbol->setInitializer(llvm::ConstantExpr::getBitCast(classStruct, IdTy));
@ -1805,7 +1869,7 @@ class CGObjCGNUstep2 : public CGObjCGNUstep {
auto classInitRef = new llvm::GlobalVariable(TheModule,
classStruct->getType(), false, llvm::GlobalValue::ExternalLinkage,
classStruct, "._OBJC_INIT_CLASS_" + className);
classInitRef->setSection(ClsSection);
classInitRef->setSection(sectionName<ClassSection>());
CGM.addUsedGlobal(classInitRef);
EmittedClass = true;
@ -1829,6 +1893,18 @@ class CGObjCGNUstep2 : public CGObjCGNUstep {
};
const char *const CGObjCGNUstep2::SectionsBaseNames[8] =
{
"__objc_selectors",
"__objc_classes",
"__objc_class_refs",
"__objc_cats",
"__objc_protocols",
"__objc_protocol_refs",
"__objc_class_aliases",
"__objc_constant_string"
};
/// Support for the ObjFW runtime.
class CGObjCObjFW: public CGObjCGNU {
protected:
@ -1931,6 +2007,8 @@ CGObjCGNU::CGObjCGNU(CodeGenModule &cgm, unsigned runtimeABIVersion,
ProtocolVersion(protocolClassVersion), ClassABIVersion(classABI) {
msgSendMDKind = VMContext.getMDKindID("GNUObjCMessageSend");
usesSEHExceptions =
cgm.getContext().getTargetInfo().getTriple().isWindowsMSVCEnvironment();
CodeGenTypes &Types = CGM.getTypes();
IntTy = cast<llvm::IntegerType>(
@ -2121,8 +2199,8 @@ llvm::Value *CGObjCGNU::EmitNSAutoreleasePoolClassRef(CodeGenFunction &CGF) {
return Value;
}
llvm::Value *CGObjCGNU::GetSelector(CodeGenFunction &CGF, Selector Sel,
const std::string &TypeEncoding) {
llvm::Value *CGObjCGNU::GetTypedSelector(CodeGenFunction &CGF, Selector Sel,
const std::string &TypeEncoding) {
SmallVectorImpl<TypedSelector> &Types = SelectorTable[Sel];
llvm::GlobalAlias *SelValue = nullptr;
@ -2155,13 +2233,13 @@ Address CGObjCGNU::GetAddrOfSelector(CodeGenFunction &CGF, Selector Sel) {
}
llvm::Value *CGObjCGNU::GetSelector(CodeGenFunction &CGF, Selector Sel) {
return GetSelector(CGF, Sel, std::string());
return GetTypedSelector(CGF, Sel, std::string());
}
llvm::Value *CGObjCGNU::GetSelector(CodeGenFunction &CGF,
const ObjCMethodDecl *Method) {
std::string SelTypes = CGM.getContext().getObjCEncodingForMethodDecl(Method);
return GetSelector(CGF, Method->getSelector(), SelTypes);
return GetTypedSelector(CGF, Method->getSelector(), SelTypes);
}
llvm::Constant *CGObjCGNU::GetEHType(QualType T) {
@ -2186,6 +2264,9 @@ llvm::Constant *CGObjCGNU::GetEHType(QualType T) {
}
llvm::Constant *CGObjCGNUstep::GetEHType(QualType T) {
if (usesSEHExceptions)
return CGM.getCXXABI().getAddrOfRTTIDescriptor(T);
if (!CGM.getLangOpts().CPlusPlus)
return CGObjCGNU::GetEHType(T);
@ -3726,6 +3807,7 @@ void CGObjCGNU::EmitThrowStmt(CodeGenFunction &CGF,
const ObjCAtThrowStmt &S,
bool ClearInsertionPoint) {
llvm::Value *ExceptionAsObject;
bool isRethrow = false;
if (const Expr *ThrowExpr = S.getThrowExpr()) {
llvm::Value *Exception = CGF.EmitObjCThrowOperand(ThrowExpr);
@ -3734,11 +3816,24 @@ void CGObjCGNU::EmitThrowStmt(CodeGenFunction &CGF,
assert((!CGF.ObjCEHValueStack.empty() && CGF.ObjCEHValueStack.back()) &&
"Unexpected rethrow outside @catch block.");
ExceptionAsObject = CGF.ObjCEHValueStack.back();
isRethrow = true;
}
if (isRethrow && usesSEHExceptions) {
// For SEH, ExceptionAsObject may be undef, because the catch handler is
// not passed it for catchalls and so it is not visible to the catch
// funclet. The real thrown object will still be live on the stack at this
// point and will be rethrown. If we are explicitly rethrowing the object
// that was passed into the `@catch` block, then this code path is not
// reached and we will instead call `objc_exception_throw` with an explicit
// argument.
CGF.EmitRuntimeCallOrInvoke(ExceptionReThrowFn).setDoesNotReturn();
}
else {
ExceptionAsObject = CGF.Builder.CreateBitCast(ExceptionAsObject, IdTy);
llvm::CallSite Throw =
CGF.EmitRuntimeCallOrInvoke(ExceptionThrowFn, ExceptionAsObject);
Throw.setDoesNotReturn();
}
ExceptionAsObject = CGF.Builder.CreateBitCast(ExceptionAsObject, IdTy);
llvm::CallSite Throw =
CGF.EmitRuntimeCallOrInvoke(ExceptionThrowFn, ExceptionAsObject);
Throw.setDoesNotReturn();
CGF.Builder.CreateUnreachable();
if (ClearInsertionPoint)
CGF.Builder.ClearInsertionPoint();

View File

@ -15,6 +15,7 @@
#include "CGObjCRuntime.h"
#include "CGCleanup.h"
#include "CGCXXABI.h"
#include "CGRecordLayout.h"
#include "CodeGenFunction.h"
#include "CodeGenModule.h"
@ -22,6 +23,7 @@
#include "clang/AST/StmtObjC.h"
#include "clang/CodeGen/CGFunctionInfo.h"
#include "llvm/IR/CallSite.h"
#include "llvm/Support/SaveAndRestore.h"
using namespace clang;
using namespace CodeGen;
@ -120,6 +122,8 @@ namespace {
const Stmt *Body;
llvm::BasicBlock *Block;
llvm::Constant *TypeInfo;
/// Flags used to differentiate cleanups and catchalls in Windows SEH
unsigned Flags;
};
struct CallObjCEndCatch final : EHScopeStack::Cleanup {
@ -148,13 +152,17 @@ void CGObjCRuntime::EmitTryCatchStmt(CodeGenFunction &CGF,
if (S.getNumCatchStmts())
Cont = CGF.getJumpDestInCurrentScope("eh.cont");
bool useFunclets = EHPersonality::get(CGF).usesFuncletPads();
CodeGenFunction::FinallyInfo FinallyInfo;
if (const ObjCAtFinallyStmt *Finally = S.getFinallyStmt())
FinallyInfo.enter(CGF, Finally->getFinallyBody(),
beginCatchFn, endCatchFn, exceptionRethrowFn);
if (!useFunclets)
if (const ObjCAtFinallyStmt *Finally = S.getFinallyStmt())
FinallyInfo.enter(CGF, Finally->getFinallyBody(),
beginCatchFn, endCatchFn, exceptionRethrowFn);
SmallVector<CatchHandler, 8> Handlers;
// Enter the catch, if there is one.
if (S.getNumCatchStmts()) {
for (unsigned I = 0, N = S.getNumCatchStmts(); I != N; ++I) {
@ -166,10 +174,13 @@ void CGObjCRuntime::EmitTryCatchStmt(CodeGenFunction &CGF,
Handler.Variable = CatchDecl;
Handler.Body = CatchStmt->getCatchBody();
Handler.Block = CGF.createBasicBlock("catch");
Handler.Flags = 0;
// @catch(...) always matches.
if (!CatchDecl) {
Handler.TypeInfo = nullptr; // catch-all
auto catchAll = getCatchAllTypeInfo();
Handler.TypeInfo = catchAll.RTTI;
Handler.Flags = catchAll.Flags;
// Don't consider any other catches.
break;
}
@ -179,9 +190,31 @@ void CGObjCRuntime::EmitTryCatchStmt(CodeGenFunction &CGF,
EHCatchScope *Catch = CGF.EHStack.pushCatch(Handlers.size());
for (unsigned I = 0, E = Handlers.size(); I != E; ++I)
Catch->setHandler(I, Handlers[I].TypeInfo, Handlers[I].Block);
Catch->setHandler(I, { Handlers[I].TypeInfo, Handlers[I].Flags }, Handlers[I].Block);
}
if (useFunclets)
if (const ObjCAtFinallyStmt *Finally = S.getFinallyStmt()) {
CodeGenFunction HelperCGF(CGM, /*suppressNewContext=*/true);
if (!CGF.CurSEHParent)
CGF.CurSEHParent = cast<NamedDecl>(CGF.CurFuncDecl);
// Outline the finally block.
const Stmt *FinallyBlock = Finally->getFinallyBody();
HelperCGF.startOutlinedSEHHelper(CGF, /*isFilter*/false, FinallyBlock);
// Emit the original filter expression, convert to i32, and return.
HelperCGF.EmitStmt(FinallyBlock);
HelperCGF.FinishFunction(FinallyBlock->getLocEnd());
llvm::Function *FinallyFunc = HelperCGF.CurFn;
// Push a cleanup for __finally blocks.
CGF.pushSEHCleanup(NormalAndEHCleanup, FinallyFunc);
}
// Emit the try body.
CGF.EmitStmt(S.getTryBody());
@ -197,6 +230,13 @@ void CGObjCRuntime::EmitTryCatchStmt(CodeGenFunction &CGF,
CatchHandler &Handler = Handlers[I];
CGF.EmitBlock(Handler.Block);
llvm::CatchPadInst *CPI = nullptr;
SaveAndRestore<llvm::Instruction *> RestoreCurrentFuncletPad(CGF.CurrentFuncletPad);
if (useFunclets)
if ((CPI = dyn_cast_or_null<llvm::CatchPadInst>(Handler.Block->getFirstNonPHI()))) {
CGF.CurrentFuncletPad = CPI;
CPI->setOperand(2, CGF.getExceptionSlot().getPointer());
}
llvm::Value *RawExn = CGF.getExceptionFromSlot();
// Enter the catch.
@ -223,6 +263,8 @@ void CGObjCRuntime::EmitTryCatchStmt(CodeGenFunction &CGF,
CGF.EmitAutoVarDecl(*CatchParam);
EmitInitOfCatchParam(CGF, CastExn, CatchParam);
}
if (CPI)
CGF.EHStack.pushCleanup<CatchRetScope>(NormalCleanup, CPI);
CGF.ObjCEHValueStack.push_back(Exn);
CGF.EmitStmt(Handler.Body);
@ -232,13 +274,13 @@ void CGObjCRuntime::EmitTryCatchStmt(CodeGenFunction &CGF,
cleanups.ForceCleanup();
CGF.EmitBranchThroughCleanup(Cont);
}
}
// Go back to the try-statement fallthrough.
CGF.Builder.restoreIP(SavedIP);
// Pop out of the finally.
if (S.getFinallyStmt())
if (!useFunclets && S.getFinallyStmt())
FinallyInfo.exit(CGF);
if (Cont.isValid())
@ -277,7 +319,7 @@ namespace {
: SyncExitFn(SyncExitFn), SyncArg(SyncArg) {}
void Emit(CodeGenFunction &CGF, Flags flags) override {
CGF.Builder.CreateCall(SyncExitFn, SyncArg)->setDoesNotThrow();
CGF.EmitNounwindRuntimeCall(SyncExitFn, SyncArg);
}
};
}

View File

@ -17,6 +17,7 @@
#define LLVM_CLANG_LIB_CODEGEN_CGOBJCRUNTIME_H
#include "CGBuilder.h"
#include "CGCall.h"
#include "CGCleanup.h"
#include "CGValue.h"
#include "clang/AST/DeclObjC.h"
#include "clang/Basic/IdentifierTable.h" // Selector
@ -141,6 +142,8 @@ class CGObjCRuntime {
/// error to Sema.
virtual llvm::Constant *GetEHType(QualType T) = 0;
virtual CatchTypeInfo getCatchAllTypeInfo() { return { nullptr, 0 }; }
/// Generate a constant string object.
virtual ConstantAddress GenerateConstantString(const StringLiteral *) = 0;

View File

@ -897,25 +897,6 @@ static void EmitOMPAggregateInit(CodeGenFunction &CGF, Address DestAddr,
CGF.EmitBlock(DoneBB, /*IsFinished=*/true);
}
static llvm::Optional<OMPDeclareTargetDeclAttr::MapTypeTy>
isDeclareTargetDeclaration(const ValueDecl *VD) {
for (const Decl *D : VD->redecls()) {
if (!D->hasAttrs())
continue;
if (const auto *Attr = D->getAttr<OMPDeclareTargetDeclAttr>())
return Attr->getMapType();
}
if (const auto *V = dyn_cast<VarDecl>(VD)) {
if (const VarDecl *TD = V->getTemplateInstantiationPattern())
return isDeclareTargetDeclaration(TD);
} else if (const auto *FD = dyn_cast<FunctionDecl>(VD)) {
if (const auto *TD = FD->getTemplateInstantiationPattern())
return isDeclareTargetDeclaration(TD);
}
return llvm::None;
}
LValue ReductionCodeGen::emitSharedLValue(CodeGenFunction &CGF, const Expr *E) {
return CGF.EmitOMPSharedLValue(E);
}
@ -2417,7 +2398,7 @@ Address CGOpenMPRuntime::getAddrOfDeclareTargetLink(const VarDecl *VD) {
if (CGM.getLangOpts().OpenMPSimd)
return Address::invalid();
llvm::Optional<OMPDeclareTargetDeclAttr::MapTypeTy> Res =
isDeclareTargetDeclaration(VD);
OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD);
if (Res && *Res == OMPDeclareTargetDeclAttr::MT_Link) {
SmallString<64> PtrName;
{
@ -2639,7 +2620,7 @@ bool CGOpenMPRuntime::emitDeclareTargetVarDefinition(const VarDecl *VD,
llvm::GlobalVariable *Addr,
bool PerformInit) {
Optional<OMPDeclareTargetDeclAttr::MapTypeTy> Res =
isDeclareTargetDeclaration(VD);
OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD);
if (!Res || *Res == OMPDeclareTargetDeclAttr::MT_Link)
return false;
VD = VD->getDefinition(CGM.getContext());
@ -6945,7 +6926,7 @@ class MappableExprsHandler {
if (const auto *VD =
dyn_cast_or_null<VarDecl>(I->getAssociatedDeclaration())) {
if (llvm::Optional<OMPDeclareTargetDeclAttr::MapTypeTy> Res =
isDeclareTargetDeclaration(VD))
OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD))
if (*Res == OMPDeclareTargetDeclAttr::MT_Link) {
IsLink = true;
BP = CGF.CGM.getOpenMPRuntime().getAddrOfDeclareTargetLink(VD);
@ -7436,7 +7417,7 @@ class MappableExprsHandler {
if (!VD)
continue;
llvm::Optional<OMPDeclareTargetDeclAttr::MapTypeTy> Res =
isDeclareTargetDeclaration(VD);
OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD);
if (!Res || *Res != OMPDeclareTargetDeclAttr::MT_Link)
continue;
StructRangeInfoTy PartialStruct;
@ -8066,7 +8047,7 @@ bool CGOpenMPRuntime::emitTargetFunctions(GlobalDecl GD) {
scanForTargetRegionsFunctions(FD->getBody(), CGM.getMangledName(GD));
// Do not to emit function if it is not marked as declare target.
return !isDeclareTargetDeclaration(FD) &&
return !OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(FD) &&
AlreadyEmittedTargetFunctions.count(FD->getCanonicalDecl()) == 0;
}
@ -8093,14 +8074,15 @@ bool CGOpenMPRuntime::emitTargetGlobalVariable(GlobalDecl GD) {
// Do not to emit variable if it is not marked as declare target.
llvm::Optional<OMPDeclareTargetDeclAttr::MapTypeTy> Res =
isDeclareTargetDeclaration(cast<VarDecl>(GD.getDecl()));
OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(
cast<VarDecl>(GD.getDecl()));
return !Res || *Res == OMPDeclareTargetDeclAttr::MT_Link;
}
void CGOpenMPRuntime::registerTargetGlobalVariable(const VarDecl *VD,
llvm::Constant *Addr) {
if (llvm::Optional<OMPDeclareTargetDeclAttr::MapTypeTy> Res =
isDeclareTargetDeclaration(VD)) {
OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD)) {
OffloadEntriesInfoManagerTy::OMPTargetGlobalVarEntryKind Flags;
StringRef VarName;
CharUnits VarSize;
@ -8173,7 +8155,7 @@ bool CGOpenMPRuntime::markAsGlobalTarget(GlobalDecl GD) {
const FunctionDecl *FD = D->getCanonicalDecl();
// Do not to emit function if it is marked as declare target as it was already
// emitted.
if (isDeclareTargetDeclaration(D)) {
if (OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(D)) {
if (D->hasBody() && AlreadyEmittedTargetFunctions.count(FD) == 0) {
if (auto *F = dyn_cast_or_null<llvm::Function>(
CGM.GetGlobalValue(CGM.getMangledName(GD))))
@ -8782,7 +8764,8 @@ class DoacrossCleanupTy final : public EHScopeStack::Cleanup {
} // namespace
void CGOpenMPRuntime::emitDoacrossInit(CodeGenFunction &CGF,
const OMPLoopDirective &D) {
const OMPLoopDirective &D,
ArrayRef<Expr *> NumIterations) {
if (!CGF.HaveInsertPoint())
return;
@ -8805,32 +8788,45 @@ void CGOpenMPRuntime::emitDoacrossInit(CodeGenFunction &CGF,
} else {
RD = cast<RecordDecl>(KmpDimTy->getAsTagDecl());
}
llvm::APInt Size(/*numBits=*/32, NumIterations.size());
QualType ArrayTy =
C.getConstantArrayType(KmpDimTy, Size, ArrayType::Normal, 0);
Address DimsAddr = CGF.CreateMemTemp(KmpDimTy, "dims");
CGF.EmitNullInitialization(DimsAddr, KmpDimTy);
Address DimsAddr = CGF.CreateMemTemp(ArrayTy, "dims");
CGF.EmitNullInitialization(DimsAddr, ArrayTy);
enum { LowerFD = 0, UpperFD, StrideFD };
// Fill dims with data.
LValue DimsLVal = CGF.MakeAddrLValue(DimsAddr, KmpDimTy);
// dims.upper = num_iterations;
LValue UpperLVal =
CGF.EmitLValueForField(DimsLVal, *std::next(RD->field_begin(), UpperFD));
llvm::Value *NumIterVal = CGF.EmitScalarConversion(
CGF.EmitScalarExpr(D.getNumIterations()), D.getNumIterations()->getType(),
Int64Ty, D.getNumIterations()->getExprLoc());
CGF.EmitStoreOfScalar(NumIterVal, UpperLVal);
// dims.stride = 1;
LValue StrideLVal =
CGF.EmitLValueForField(DimsLVal, *std::next(RD->field_begin(), StrideFD));
CGF.EmitStoreOfScalar(llvm::ConstantInt::getSigned(CGM.Int64Ty, /*V=*/1),
StrideLVal);
for (unsigned I = 0, E = NumIterations.size(); I < E; ++I) {
LValue DimsLVal =
CGF.MakeAddrLValue(CGF.Builder.CreateConstArrayGEP(
DimsAddr, I, C.getTypeSizeInChars(KmpDimTy)),
KmpDimTy);
// dims.upper = num_iterations;
LValue UpperLVal = CGF.EmitLValueForField(
DimsLVal, *std::next(RD->field_begin(), UpperFD));
llvm::Value *NumIterVal =
CGF.EmitScalarConversion(CGF.EmitScalarExpr(NumIterations[I]),
D.getNumIterations()->getType(), Int64Ty,
D.getNumIterations()->getExprLoc());
CGF.EmitStoreOfScalar(NumIterVal, UpperLVal);
// dims.stride = 1;
LValue StrideLVal = CGF.EmitLValueForField(
DimsLVal, *std::next(RD->field_begin(), StrideFD));
CGF.EmitStoreOfScalar(llvm::ConstantInt::getSigned(CGM.Int64Ty, /*V=*/1),
StrideLVal);
}
// Build call void __kmpc_doacross_init(ident_t *loc, kmp_int32 gtid,
// kmp_int32 num_dims, struct kmp_dim * dims);
llvm::Value *Args[] = {emitUpdateLocation(CGF, D.getLocStart()),
getThreadID(CGF, D.getLocStart()),
llvm::ConstantInt::getSigned(CGM.Int32Ty, 1),
CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
DimsAddr.getPointer(), CGM.VoidPtrTy)};
llvm::Value *Args[] = {
emitUpdateLocation(CGF, D.getLocStart()),
getThreadID(CGF, D.getLocStart()),
llvm::ConstantInt::getSigned(CGM.Int32Ty, NumIterations.size()),
CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
CGF.Builder
.CreateConstArrayGEP(DimsAddr, 0, C.getTypeSizeInChars(KmpDimTy))
.getPointer(),
CGM.VoidPtrTy)};
llvm::Value *RTLFn = createRuntimeFunction(OMPRTL__kmpc_doacross_init);
CGF.EmitRuntimeCall(RTLFn, Args);
@ -8845,16 +8841,29 @@ void CGOpenMPRuntime::emitDoacrossOrdered(CodeGenFunction &CGF,
const OMPDependClause *C) {
QualType Int64Ty =
CGM.getContext().getIntTypeForBitwidth(/*DestWidth=*/64, /*Signed=*/1);
const Expr *CounterVal = C->getCounterValue();
assert(CounterVal);
llvm::Value *CntVal = CGF.EmitScalarConversion(CGF.EmitScalarExpr(CounterVal),
CounterVal->getType(), Int64Ty,
CounterVal->getExprLoc());
Address CntAddr = CGF.CreateMemTemp(Int64Ty, ".cnt.addr");
CGF.EmitStoreOfScalar(CntVal, CntAddr, /*Volatile=*/false, Int64Ty);
llvm::Value *Args[] = {emitUpdateLocation(CGF, C->getLocStart()),
getThreadID(CGF, C->getLocStart()),
CntAddr.getPointer()};
llvm::APInt Size(/*numBits=*/32, C->getNumLoops());
QualType ArrayTy = CGM.getContext().getConstantArrayType(
Int64Ty, Size, ArrayType::Normal, 0);
Address CntAddr = CGF.CreateMemTemp(ArrayTy, ".cnt.addr");
for (unsigned I = 0, E = C->getNumLoops(); I < E; ++I) {
const Expr *CounterVal = C->getLoopData(I);
assert(CounterVal);
llvm::Value *CntVal = CGF.EmitScalarConversion(
CGF.EmitScalarExpr(CounterVal), CounterVal->getType(), Int64Ty,
CounterVal->getExprLoc());
CGF.EmitStoreOfScalar(
CntVal,
CGF.Builder.CreateConstArrayGEP(
CntAddr, I, CGM.getContext().getTypeSizeInChars(Int64Ty)),
/*Volatile=*/false, Int64Ty);
}
llvm::Value *Args[] = {
emitUpdateLocation(CGF, C->getLocStart()),
getThreadID(CGF, C->getLocStart()),
CGF.Builder
.CreateConstArrayGEP(CntAddr, 0,
CGM.getContext().getTypeSizeInChars(Int64Ty))
.getPointer()};
llvm::Value *RTLFn;
if (C->getDependencyKind() == OMPC_DEPEND_source) {
RTLFn = createRuntimeFunction(OMPRTL__kmpc_doacross_post);
@ -9169,7 +9178,8 @@ void CGOpenMPSIMDRuntime::emitTargetDataStandAloneCall(
}
void CGOpenMPSIMDRuntime::emitDoacrossInit(CodeGenFunction &CGF,
const OMPLoopDirective &D) {
const OMPLoopDirective &D,
ArrayRef<Expr *> NumIterations) {
llvm_unreachable("Not supported in SIMD-only mode");
}

View File

@ -1465,8 +1465,8 @@ class CGOpenMPRuntime {
/// Emit initialization for doacross loop nesting support.
/// \param D Loop-based construct used in doacross nesting construct.
virtual void emitDoacrossInit(CodeGenFunction &CGF,
const OMPLoopDirective &D);
virtual void emitDoacrossInit(CodeGenFunction &CGF, const OMPLoopDirective &D,
ArrayRef<Expr *> NumIterations);
/// Emit code for doacross ordered directive with 'depend' clause.
/// \param C 'depend' clause with 'sink|source' dependency kind.
@ -2051,8 +2051,8 @@ class CGOpenMPSIMDRuntime final : public CGOpenMPRuntime {
/// Emit initialization for doacross loop nesting support.
/// \param D Loop-based construct used in doacross nesting construct.
void emitDoacrossInit(CodeGenFunction &CGF,
const OMPLoopDirective &D) override;
void emitDoacrossInit(CodeGenFunction &CGF, const OMPLoopDirective &D,
ArrayRef<Expr *> NumIterations) override;
/// Emit code for doacross ordered directive with 'depend' clause.
/// \param C 'depend' clause with 'sink|source' dependency kind.

View File

@ -191,20 +191,10 @@ class CheckVarsEscapingDeclContext final
bool AllEscaped = false;
bool IsForCombinedParallelRegion = false;
static llvm::Optional<OMPDeclareTargetDeclAttr::MapTypeTy>
isDeclareTargetDeclaration(const ValueDecl *VD) {
for (const Decl *D : VD->redecls()) {
if (!D->hasAttrs())
continue;
if (const auto *Attr = D->getAttr<OMPDeclareTargetDeclAttr>())
return Attr->getMapType();
}
return llvm::None;
}
void markAsEscaped(const ValueDecl *VD) {
// Do not globalize declare target variables.
if (!isa<VarDecl>(VD) || isDeclareTargetDeclaration(VD))
if (!isa<VarDecl>(VD) ||
OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD))
return;
VD = cast<ValueDecl>(VD->getCanonicalDecl());
// Variables captured by value must be globalized.

View File

@ -1509,6 +1509,23 @@ void CodeGenFunction::EmitOMPPrivateLoopCounters(
}
++I;
}
// Privatize extra loop counters used in loops for ordered(n) clauses.
for (const auto *C : S.getClausesOfKind<OMPOrderedClause>()) {
if (!C->getNumForLoops())
continue;
for (unsigned I = S.getCollapsedNumber(),
E = C->getLoopNumIterations().size();
I < E; ++I) {
const auto *DRE = cast<DeclRefExpr>(C->getLoopCunter(I));
const auto *VD = cast<VarDecl>(DRE->getDecl());
// Override only those variables that are really emitted already.
if (LocalDeclMap.count(VD)) {
(void)LoopScope.addPrivate(VD, [this, DRE, VD]() {
return CreateMemTemp(DRE->getType(), VD->getName());
});
}
}
}
}
static void emitPreCond(CodeGenFunction &CGF, const OMPLoopDirective &S,
@ -2244,7 +2261,7 @@ bool CodeGenFunction::EmitOMPWorksharingLoop(
bool Ordered = false;
if (const auto *OrderedClause = S.getSingleClause<OMPOrderedClause>()) {
if (OrderedClause->getNumForLoops())
RT.emitDoacrossInit(*this, S);
RT.emitDoacrossInit(*this, S, OrderedClause->getLoopNumIterations());
else
Ordered = true;
}
@ -4943,6 +4960,20 @@ void CodeGenFunction::EmitSimpleOMPExecutableDirective(
CGF.EmitVarDecl(*VD);
}
}
for (const auto *C : D.getClausesOfKind<OMPOrderedClause>()) {
if (!C->getNumForLoops())
continue;
for (unsigned I = LD->getCollapsedNumber(),
E = C->getLoopNumIterations().size();
I < E; ++I) {
if (const auto *VD = dyn_cast<OMPCapturedExprDecl>(
cast<DeclRefExpr>(C->getLoopCunter(I))->getDecl())) {
// Emit only those that were not explicitly referenced in clauses.
if (!CGF.LocalDeclMap.count(VD))
CGF.EmitVarDecl(*VD);
}
}
}
}
CGF.EmitStmt(D.getInnermostCapturedStmt()->getCapturedStmt());
}

View File

@ -470,7 +470,7 @@ class CodeGenFunction : public CodeGenTypeCache {
/// potentially set the return value.
bool SawAsmBlock = false;
const FunctionDecl *CurSEHParent = nullptr;
const NamedDecl *CurSEHParent = nullptr;
/// True if the current function is an outlined SEH helper. This can be a
/// finally block or filter expression.
@ -2878,6 +2878,8 @@ class CodeGenFunction : public CodeGenTypeCache {
void EnterSEHTryStmt(const SEHTryStmt &S);
void ExitSEHTryStmt(const SEHTryStmt &S);
void pushSEHCleanup(CleanupKind kind,
llvm::Function *FinallyFunc);
void startOutlinedSEHHelper(CodeGenFunction &ParentCGF, bool IsFilter,
const Stmt *OutlinedStmt);

View File

@ -4912,7 +4912,8 @@ ObjCRuntime Clang::AddObjCRuntimeArgs(const ArgList &args,
}
if ((runtime.getKind() == ObjCRuntime::GNUstep) &&
(runtime.getVersion() >= VersionTuple(2, 0)))
if (!getToolChain().getTriple().isOSBinFormatELF()) {
if (!getToolChain().getTriple().isOSBinFormatELF() &&
!getToolChain().getTriple().isOSBinFormatCOFF()) {
getToolChain().getDriver().Diag(
diag::err_drv_gnustep_objc_runtime_incompatible_binary)
<< runtime.getVersion().getMajor();

View File

@ -2035,7 +2035,11 @@ bool Darwin::isAlignedAllocationUnavailable() const {
void Darwin::addClangTargetOptions(const llvm::opt::ArgList &DriverArgs,
llvm::opt::ArgStringList &CC1Args,
Action::OffloadKind DeviceOffloadKind) const {
if (isAlignedAllocationUnavailable())
// Pass "-faligned-alloc-unavailable" only when the user hasn't manually
// enabled or disabled aligned allocations.
if (!DriverArgs.hasArgNoClaim(options::OPT_faligned_allocation,
options::OPT_fno_aligned_allocation) &&
isAlignedAllocationUnavailable())
CC1Args.push_back("-faligned-alloc-unavailable");
}

View File

@ -553,7 +553,7 @@ static void InitializeCPlusPlusFeatureTestMacros(const LangOptions &LangOpts,
Builder.defineMacro("__cpp_guaranteed_copy_elision", "201606L");
Builder.defineMacro("__cpp_nontype_template_parameter_auto", "201606L");
}
if (LangOpts.AlignedAllocation)
if (LangOpts.AlignedAllocation && !LangOpts.AlignedAllocationUnavailable)
Builder.defineMacro("__cpp_aligned_new", "201606L");
if (LangOpts.RelaxedTemplateTemplateArgs)
Builder.defineMacro("__cpp_template_template_args", "201611L");

View File

@ -1747,9 +1747,9 @@ static void diagnoseUnavailableAlignedAllocation(const FunctionDecl &FD,
StringRef OSName = AvailabilityAttr::getPlatformNameSourceSpelling(
S.getASTContext().getTargetInfo().getPlatformName());
S.Diag(Loc, diag::warn_aligned_allocation_unavailable)
<< IsDelete << FD.getType().getAsString() << OSName
<< alignedAllocMinVersion(T.getOS()).getAsString();
S.Diag(Loc, diag::err_aligned_allocation_unavailable)
<< IsDelete << FD.getType().getAsString() << OSName
<< alignedAllocMinVersion(T.getOS()).getAsString();
S.Diag(Loc, diag::note_silence_unligned_allocation_unavailable);
}
}

View File

@ -73,6 +73,8 @@ class DSAStackTy {
};
using OperatorOffsetTy =
llvm::SmallVector<std::pair<Expr *, OverloadedOperatorKind>, 4>;
using DoacrossDependMapTy =
llvm::DenseMap<OMPDependClause *, OperatorOffsetTy>;
private:
struct DSAInfo {
@ -97,8 +99,6 @@ class DSAStackTy {
llvm::DenseMap<const ValueDecl *, MappedExprComponentTy>;
using CriticalsWithHintsTy =
llvm::StringMap<std::pair<const OMPCriticalDirective *, llvm::APSInt>>;
using DoacrossDependMapTy =
llvm::DenseMap<OMPDependClause *, OperatorOffsetTy>;
struct ReductionData {
using BOKPtrType = llvm::PointerEmbeddedInt<BinaryOperatorKind, 16>;
SourceRange ReductionRange;
@ -137,7 +137,7 @@ class DSAStackTy {
/// first argument (Expr *) contains optional argument of the
/// 'ordered' clause, the second one is true if the regions has 'ordered'
/// clause, false otherwise.
llvm::PointerIntPair<const Expr *, 1, bool> OrderedRegion;
llvm::Optional<std::pair<const Expr *, OMPOrderedClause *>> OrderedRegion;
bool NowaitRegion = false;
bool CancelRegion = false;
unsigned AssociatedLoops = 1;
@ -398,23 +398,42 @@ class DSAStackTy {
}
/// Marks current region as ordered (it has an 'ordered' clause).
void setOrderedRegion(bool IsOrdered, const Expr *Param) {
void setOrderedRegion(bool IsOrdered, const Expr *Param,
OMPOrderedClause *Clause) {
assert(!isStackEmpty());
Stack.back().first.back().OrderedRegion.setInt(IsOrdered);
Stack.back().first.back().OrderedRegion.setPointer(Param);
if (IsOrdered)
Stack.back().first.back().OrderedRegion.emplace(Param, Clause);
else
Stack.back().first.back().OrderedRegion.reset();
}
/// Returns true, if region is ordered (has associated 'ordered' clause),
/// false - otherwise.
bool isOrderedRegion() const {
if (isStackEmpty())
return false;
return Stack.back().first.rbegin()->OrderedRegion.hasValue();
}
/// Returns optional parameter for the ordered region.
std::pair<const Expr *, OMPOrderedClause *> getOrderedRegionParam() const {
if (isStackEmpty() ||
!Stack.back().first.rbegin()->OrderedRegion.hasValue())
return std::make_pair(nullptr, nullptr);
return Stack.back().first.rbegin()->OrderedRegion.getValue();
}
/// Returns true, if parent region is ordered (has associated
/// 'ordered' clause), false - otherwise.
bool isParentOrderedRegion() const {
if (isStackEmpty() || Stack.back().first.size() == 1)
return false;
return std::next(Stack.back().first.rbegin())->OrderedRegion.getInt();
return std::next(Stack.back().first.rbegin())->OrderedRegion.hasValue();
}
/// Returns optional parameter for the ordered region.
const Expr *getParentOrderedRegionParam() const {
if (isStackEmpty() || Stack.back().first.size() == 1)
return nullptr;
return std::next(Stack.back().first.rbegin())->OrderedRegion.getPointer();
std::pair<const Expr *, OMPOrderedClause *>
getParentOrderedRegionParam() const {
if (isStackEmpty() || Stack.back().first.size() == 1 ||
!std::next(Stack.back().first.rbegin())->OrderedRegion.hasValue())
return std::make_pair(nullptr, nullptr);
return std::next(Stack.back().first.rbegin())->OrderedRegion.getValue();
}
/// Marks current region as nowait (it has a 'nowait' clause).
void setNowaitRegion(bool IsNowait = true) {
@ -1239,17 +1258,6 @@ void Sema::popOpenMPFunctionRegion(const FunctionScopeInfo *OldFSI) {
DSAStack->popFunction(OldFSI);
}
static llvm::Optional<OMPDeclareTargetDeclAttr::MapTypeTy>
isDeclareTargetDeclaration(const ValueDecl *VD) {
for (const Decl *D : VD->redecls()) {
if (!D->hasAttrs())
continue;
if (const auto *Attr = D->getAttr<OMPDeclareTargetDeclAttr>())
return Attr->getMapType();
}
return llvm::None;
}
bool Sema::isOpenMPCapturedByRef(const ValueDecl *D, unsigned Level) const {
assert(LangOpts.OpenMP && "OpenMP is not allowed");
@ -1429,7 +1437,7 @@ VarDecl *Sema::isOpenMPCapturedDecl(ValueDecl *D) const {
// If the declaration is enclosed in a 'declare target' directive,
// then it should not be captured.
//
if (isDeclareTargetDeclaration(VD))
if (OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD))
return nullptr;
return VD;
}
@ -1968,7 +1976,7 @@ class DSAAttrChecker final : public StmtVisitor<DSAAttrChecker, void> {
// Skip internally declared static variables.
llvm::Optional<OMPDeclareTargetDeclAttr::MapTypeTy> Res =
isDeclareTargetDeclaration(VD);
OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(VD);
if (VD->hasGlobalStorage() && !CS->capturesVariable(VD) &&
(!Res || *Res != OMPDeclareTargetDeclAttr::MT_Link))
return;
@ -3736,6 +3744,13 @@ class OpenMPIterationSpaceChecker {
Expr *buildCounterInit() const;
/// Build step of the counter be used for codegen.
Expr *buildCounterStep() const;
/// Build loop data with counter value for depend clauses in ordered
/// directives.
Expr *
buildOrderedLoopData(Scope *S, Expr *Counter,
llvm::MapVector<const Expr *, DeclRefExpr *> &Captures,
SourceLocation Loc, Expr *Inc = nullptr,
OverloadedOperatorKind OOK = OO_Amp);
/// Return true if any expression is dependent.
bool dependent() const;
@ -3900,7 +3915,12 @@ bool OpenMPIterationSpaceChecker::checkAndSetInit(Stmt *S, bool EmitDiags) {
SemaRef.Diag(S->getLocStart(),
diag::ext_omp_loop_not_canonical_init)
<< S->getSourceRange();
return setLCDeclAndLB(Var, nullptr, Var->getInit());
return setLCDeclAndLB(
Var,
buildDeclRefExpr(SemaRef, Var,
Var->getType().getNonReferenceType(),
DS->getLocStart()),
Var->getInit());
}
}
}
@ -4262,7 +4282,8 @@ Expr *OpenMPIterationSpaceChecker::buildPreCond(
/// Build reference expression to the counter be used for codegen.
DeclRefExpr *OpenMPIterationSpaceChecker::buildCounterVar(
llvm::MapVector<const Expr *, DeclRefExpr *> &Captures, DSAStackTy &DSA) const {
llvm::MapVector<const Expr *, DeclRefExpr *> &Captures,
DSAStackTy &DSA) const {
auto *VD = dyn_cast<VarDecl>(LCDecl);
if (!VD) {
VD = SemaRef.isOpenMPCapturedDecl(LCDecl);
@ -4302,6 +4323,63 @@ Expr *OpenMPIterationSpaceChecker::buildCounterInit() const { return LB; }
/// Build step of the counter be used for codegen.
Expr *OpenMPIterationSpaceChecker::buildCounterStep() const { return Step; }
Expr *OpenMPIterationSpaceChecker::buildOrderedLoopData(
Scope *S, Expr *Counter,
llvm::MapVector<const Expr *, DeclRefExpr *> &Captures, SourceLocation Loc,
Expr *Inc, OverloadedOperatorKind OOK) {
Expr *Cnt = SemaRef.DefaultLvalueConversion(Counter).get();
if (!Cnt)
return nullptr;
if (Inc) {
assert((OOK == OO_Plus || OOK == OO_Minus) &&
"Expected only + or - operations for depend clauses.");
BinaryOperatorKind BOK = (OOK == OO_Plus) ? BO_Add : BO_Sub;
Cnt = SemaRef.BuildBinOp(S, Loc, BOK, Cnt, Inc).get();
if (!Cnt)
return nullptr;
}
ExprResult Diff;
QualType VarType = LCDecl->getType().getNonReferenceType();
if (VarType->isIntegerType() || VarType->isPointerType() ||
SemaRef.getLangOpts().CPlusPlus) {
// Upper - Lower
Expr *Upper =
TestIsLessOp ? Cnt : tryBuildCapture(SemaRef, UB, Captures).get();
Expr *Lower =
TestIsLessOp ? tryBuildCapture(SemaRef, LB, Captures).get() : Cnt;
if (!Upper || !Lower)
return nullptr;
Diff = SemaRef.BuildBinOp(S, DefaultLoc, BO_Sub, Upper, Lower);
if (!Diff.isUsable() && VarType->getAsCXXRecordDecl()) {
// BuildBinOp already emitted error, this one is to point user to upper
// and lower bound, and to tell what is passed to 'operator-'.
SemaRef.Diag(Upper->getLocStart(), diag::err_omp_loop_diff_cxx)
<< Upper->getSourceRange() << Lower->getSourceRange();
return nullptr;
}
}
if (!Diff.isUsable())
return nullptr;
// Parentheses (for dumping/debugging purposes only).
Diff = SemaRef.ActOnParenExpr(DefaultLoc, DefaultLoc, Diff.get());
if (!Diff.isUsable())
return nullptr;
ExprResult NewStep = tryBuildCapture(SemaRef, Step, Captures);
if (!NewStep.isUsable())
return nullptr;
// (Upper - Lower) / Step
Diff = SemaRef.BuildBinOp(S, DefaultLoc, BO_Div, Diff.get(), NewStep.get());
if (!Diff.isUsable())
return nullptr;
return Diff.get();
}
/// Iteration space of a single for loop.
struct LoopIterationSpace final {
/// Condition of the loop.
@ -4361,7 +4439,8 @@ void Sema::ActOnOpenMPLoopInitialization(SourceLocation ForLoc, Stmt *Init) {
static bool checkOpenMPIterationSpace(
OpenMPDirectiveKind DKind, Stmt *S, Sema &SemaRef, DSAStackTy &DSA,
unsigned CurrentNestedLoopCount, unsigned NestedLoopCount,
Expr *CollapseLoopCountExpr, Expr *OrderedLoopCountExpr,
unsigned TotalNestedLoopCount, Expr *CollapseLoopCountExpr,
Expr *OrderedLoopCountExpr,
Sema::VarsWithInheritedDSAType &VarsWithImplicitDSA,
LoopIterationSpace &ResultIterSpace,
llvm::MapVector<const Expr *, DeclRefExpr *> &Captures) {
@ -4371,9 +4450,9 @@ static bool checkOpenMPIterationSpace(
if (!For) {
SemaRef.Diag(S->getLocStart(), diag::err_omp_not_for)
<< (CollapseLoopCountExpr != nullptr || OrderedLoopCountExpr != nullptr)
<< getOpenMPDirectiveName(DKind) << NestedLoopCount
<< getOpenMPDirectiveName(DKind) << TotalNestedLoopCount
<< (CurrentNestedLoopCount > 0) << CurrentNestedLoopCount;
if (NestedLoopCount > 1) {
if (TotalNestedLoopCount > 1) {
if (CollapseLoopCountExpr && OrderedLoopCountExpr)
SemaRef.Diag(DSA.getConstructLoc(),
diag::note_omp_collapse_ordered_expr)
@ -4506,6 +4585,41 @@ static bool checkOpenMPIterationSpace(
ResultIterSpace.PrivateCounterVar == nullptr ||
ResultIterSpace.CounterInit == nullptr ||
ResultIterSpace.CounterStep == nullptr);
if (!HasErrors && DSA.isOrderedRegion()) {
if (DSA.getOrderedRegionParam().second->getNumForLoops()) {
if (CurrentNestedLoopCount <
DSA.getOrderedRegionParam().second->getLoopNumIterations().size()) {
DSA.getOrderedRegionParam().second->setLoopNumIterations(
CurrentNestedLoopCount, ResultIterSpace.NumIterations);
DSA.getOrderedRegionParam().second->setLoopCounter(
CurrentNestedLoopCount, ResultIterSpace.CounterVar);
}
}
for (auto &Pair : DSA.getDoacrossDependClauses()) {
if (CurrentNestedLoopCount >= Pair.first->getNumLoops()) {
// Erroneous case - clause has some problems.
continue;
}
if (Pair.first->getDependencyKind() == OMPC_DEPEND_sink &&
Pair.second.size() <= CurrentNestedLoopCount) {
// Erroneous case - clause has some problems.
Pair.first->setLoopData(CurrentNestedLoopCount, nullptr);
continue;
}
Expr *CntValue;
if (Pair.first->getDependencyKind() == OMPC_DEPEND_source)
CntValue = ISC.buildOrderedLoopData(
DSA.getCurScope(), ResultIterSpace.CounterVar, Captures,
Pair.first->getDependencyLoc());
else
CntValue = ISC.buildOrderedLoopData(
DSA.getCurScope(), ResultIterSpace.CounterVar, Captures,
Pair.first->getDependencyLoc(),
Pair.second[CurrentNestedLoopCount].first,
Pair.second[CurrentNestedLoopCount].second);
Pair.first->setLoopData(CurrentNestedLoopCount, CntValue);
}
}
return HasErrors;
}
@ -4691,6 +4805,7 @@ checkOpenMPLoop(OpenMPDirectiveKind DKind, Expr *CollapseLoopCountExpr,
if (CollapseLoopCountExpr->EvaluateAsInt(Result, SemaRef.getASTContext()))
NestedLoopCount = Result.getLimitedValue();
}
unsigned OrderedLoopCount = 1;
if (OrderedLoopCountExpr) {
// Found 'ordered' clause - calculate collapse number.
llvm::APSInt Result;
@ -4703,20 +4818,21 @@ checkOpenMPLoop(OpenMPDirectiveKind DKind, Expr *CollapseLoopCountExpr,
diag::note_collapse_loop_count)
<< CollapseLoopCountExpr->getSourceRange();
}
NestedLoopCount = Result.getLimitedValue();
OrderedLoopCount = Result.getLimitedValue();
}
}
// This is helper routine for loop directives (e.g., 'for', 'simd',
// 'for simd', etc.).
llvm::MapVector<const Expr *, DeclRefExpr *> Captures;
SmallVector<LoopIterationSpace, 4> IterSpaces;
IterSpaces.resize(NestedLoopCount);
IterSpaces.resize(std::max(OrderedLoopCount, NestedLoopCount));
Stmt *CurStmt = AStmt->IgnoreContainers(/* IgnoreCaptured */ true);
for (unsigned Cnt = 0; Cnt < NestedLoopCount; ++Cnt) {
if (checkOpenMPIterationSpace(DKind, CurStmt, SemaRef, DSA, Cnt,
NestedLoopCount, CollapseLoopCountExpr,
OrderedLoopCountExpr, VarsWithImplicitDSA,
IterSpaces[Cnt], Captures))
if (checkOpenMPIterationSpace(
DKind, CurStmt, SemaRef, DSA, Cnt, NestedLoopCount,
std::max(OrderedLoopCount, NestedLoopCount), CollapseLoopCountExpr,
OrderedLoopCountExpr, VarsWithImplicitDSA, IterSpaces[Cnt],
Captures))
return 0;
// Move on to the next nested for loop, or to the loop body.
// OpenMP [2.8.1, simd construct, Restrictions]
@ -4725,6 +4841,27 @@ checkOpenMPLoop(OpenMPDirectiveKind DKind, Expr *CollapseLoopCountExpr,
// any two loops.
CurStmt = cast<ForStmt>(CurStmt)->getBody()->IgnoreContainers();
}
for (unsigned Cnt = NestedLoopCount; Cnt < OrderedLoopCount; ++Cnt) {
if (checkOpenMPIterationSpace(
DKind, CurStmt, SemaRef, DSA, Cnt, NestedLoopCount,
std::max(OrderedLoopCount, NestedLoopCount), CollapseLoopCountExpr,
OrderedLoopCountExpr, VarsWithImplicitDSA, IterSpaces[Cnt],
Captures))
return 0;
if (Cnt > 0 && IterSpaces[Cnt].CounterVar) {
// Handle initialization of captured loop iterator variables.
auto *DRE = cast<DeclRefExpr>(IterSpaces[Cnt].CounterVar);
if (isa<OMPCapturedExprDecl>(DRE->getDecl())) {
Captures[DRE] = DRE;
}
}
// Move on to the next nested for loop, or to the loop body.
// OpenMP [2.8.1, simd construct, Restrictions]
// All loops associated with the construct must be perfectly nested; that
// is, there must be no intervening code nor any OpenMP directive between
// any two loops.
CurStmt = cast<ForStmt>(CurStmt)->getBody()->IgnoreContainers();
}
Built.clear(/* size */ NestedLoopCount);
@ -5104,7 +5241,6 @@ checkOpenMPLoop(OpenMPDirectiveKind DKind, Expr *CollapseLoopCountExpr,
Built.Inits.resize(NestedLoopCount);
Built.Updates.resize(NestedLoopCount);
Built.Finals.resize(NestedLoopCount);
SmallVector<Expr *, 4> LoopMultipliers;
{
ExprResult Div;
// Go from inner nested loop to outer.
@ -5174,7 +5310,6 @@ checkOpenMPLoop(OpenMPDirectiveKind DKind, Expr *CollapseLoopCountExpr,
HasErrors = true;
break;
}
LoopMultipliers.push_back(Div.get());
}
if (!Update.isUsable() || !Final.isUsable()) {
HasErrors = true;
@ -5222,55 +5357,6 @@ checkOpenMPLoop(OpenMPDirectiveKind DKind, Expr *CollapseLoopCountExpr,
Built.DistCombinedFields.NLB = CombNextLB.get();
Built.DistCombinedFields.NUB = CombNextUB.get();
Expr *CounterVal = SemaRef.DefaultLvalueConversion(IV.get()).get();
// Fill data for doacross depend clauses.
for (const auto &Pair : DSA.getDoacrossDependClauses()) {
if (Pair.first->getDependencyKind() == OMPC_DEPEND_source) {
Pair.first->setCounterValue(CounterVal);
} else {
if (NestedLoopCount != Pair.second.size() ||
NestedLoopCount != LoopMultipliers.size() + 1) {
// Erroneous case - clause has some problems.
Pair.first->setCounterValue(CounterVal);
continue;
}
assert(Pair.first->getDependencyKind() == OMPC_DEPEND_sink);
auto I = Pair.second.rbegin();
auto IS = IterSpaces.rbegin();
auto ILM = LoopMultipliers.rbegin();
Expr *UpCounterVal = CounterVal;
Expr *Multiplier = nullptr;
for (int Cnt = NestedLoopCount - 1; Cnt >= 0; --Cnt) {
if (I->first) {
assert(IS->CounterStep);
Expr *NormalizedOffset =
SemaRef
.BuildBinOp(CurScope, I->first->getExprLoc(), BO_Div,
I->first, IS->CounterStep)
.get();
if (Multiplier) {
NormalizedOffset =
SemaRef
.BuildBinOp(CurScope, I->first->getExprLoc(), BO_Mul,
NormalizedOffset, Multiplier)
.get();
}
assert(I->second == OO_Plus || I->second == OO_Minus);
BinaryOperatorKind BOK = (I->second == OO_Plus) ? BO_Add : BO_Sub;
UpCounterVal = SemaRef
.BuildBinOp(CurScope, I->first->getExprLoc(), BOK,
UpCounterVal, NormalizedOffset)
.get();
}
Multiplier = *ILM;
++I;
++IS;
++ILM;
}
Pair.first->setCounterValue(UpCounterVal);
}
}
return NestedLoopCount;
}
@ -5838,12 +5924,12 @@ StmtResult Sema::ActOnOpenMPOrderedDirective(ArrayRef<OMPClause *> Clauses,
Diag(DependFound->getLocStart(), diag::err_omp_depend_clause_thread_simd)
<< getOpenMPClauseName(TC ? TC->getClauseKind() : SC->getClauseKind());
ErrorFound = true;
} else if (DependFound && !DSAStack->getParentOrderedRegionParam()) {
} else if (DependFound && !DSAStack->getParentOrderedRegionParam().first) {
Diag(DependFound->getLocStart(),
diag::err_omp_ordered_directive_without_param);
ErrorFound = true;
} else if (TC || Clauses.empty()) {
if (const Expr *Param = DSAStack->getParentOrderedRegionParam()) {
if (const Expr *Param = DSAStack->getParentOrderedRegionParam().first) {
SourceLocation ErrLoc = TC ? TC->getLocStart() : StartLoc;
Diag(ErrLoc, diag::err_omp_ordered_directive_with_param)
<< (TC != nullptr);
@ -8619,9 +8705,11 @@ OMPClause *Sema::ActOnOpenMPOrderedClause(SourceLocation StartLoc,
} else {
NumForLoops = nullptr;
}
DSAStack->setOrderedRegion(/*IsOrdered=*/true, NumForLoops);
return new (Context)
OMPOrderedClause(NumForLoops, StartLoc, LParenLoc, EndLoc);
auto *Clause = OMPOrderedClause::Create(
Context, NumForLoops, NumForLoops ? DSAStack->getAssociatedLoops() : 0,
StartLoc, LParenLoc, EndLoc);
DSAStack->setOrderedRegion(/*IsOrdered=*/true, NumForLoops, Clause);
return Clause;
}
OMPClause *Sema::ActOnOpenMPSimpleClause(
@ -11477,8 +11565,9 @@ Sema::ActOnOpenMPDependClause(OpenMPDependClauseKind DepKind,
DSAStackTy::OperatorOffsetTy OpsOffs;
llvm::APSInt DepCounter(/*BitWidth=*/32);
llvm::APSInt TotalDepCount(/*BitWidth=*/32);
if (DepKind == OMPC_DEPEND_sink) {
if (const Expr *OrderedCountExpr = DSAStack->getParentOrderedRegionParam()) {
if (DepKind == OMPC_DEPEND_sink || DepKind == OMPC_DEPEND_source) {
if (const Expr *OrderedCountExpr =
DSAStack->getParentOrderedRegionParam().first) {
TotalDepCount = OrderedCountExpr->EvaluateKnownConstInt(Context);
TotalDepCount.setIsUnsigned(/*Val=*/true);
}
@ -11494,7 +11583,7 @@ Sema::ActOnOpenMPDependClause(OpenMPDependClauseKind DepKind,
SourceLocation ELoc = RefExpr->getExprLoc();
Expr *SimpleExpr = RefExpr->IgnoreParenCasts();
if (DepKind == OMPC_DEPEND_sink) {
if (DSAStack->getParentOrderedRegionParam() &&
if (DSAStack->getParentOrderedRegionParam().first &&
DepCounter >= TotalDepCount) {
Diag(ELoc, diag::err_omp_depend_sink_unexpected_expr);
continue;
@ -11560,7 +11649,7 @@ Sema::ActOnOpenMPDependClause(OpenMPDependClauseKind DepKind,
continue;
}
if (!CurContext->isDependentContext() &&
DSAStack->getParentOrderedRegionParam() &&
DSAStack->getParentOrderedRegionParam().first &&
DepCounter != DSAStack->isParentLoopControlVariable(D).first) {
const ValueDecl *VD =
DSAStack->getParentLoopControlVariable(DepCounter.getZExtValue());
@ -11598,7 +11687,7 @@ Sema::ActOnOpenMPDependClause(OpenMPDependClauseKind DepKind,
if (!CurContext->isDependentContext() && DepKind == OMPC_DEPEND_sink &&
TotalDepCount > VarList.size() &&
DSAStack->getParentOrderedRegionParam() &&
DSAStack->getParentOrderedRegionParam().first &&
DSAStack->getParentLoopControlVariable(VarList.size() + 1)) {
Diag(EndLoc, diag::err_omp_depend_sink_expected_loop_iteration)
<< 1 << DSAStack->getParentLoopControlVariable(VarList.size() + 1);
@ -11608,7 +11697,8 @@ Sema::ActOnOpenMPDependClause(OpenMPDependClauseKind DepKind,
return nullptr;
auto *C = OMPDependClause::Create(Context, StartLoc, LParenLoc, EndLoc,
DepKind, DepLoc, ColonLoc, Vars);
DepKind, DepLoc, ColonLoc, Vars,
TotalDepCount.getZExtValue());
if ((DepKind == OMPC_DEPEND_sink || DepKind == OMPC_DEPEND_source) &&
DSAStack->isParentOrderedRegion())
DSAStack->addDoacrossDependClause(C, OpsOffs);
@ -13042,6 +13132,8 @@ void Sema::checkDeclIsAllowedInOpenMPTarget(Expr *E, Decl *D,
return;
}
}
if (const auto *FTD = dyn_cast<FunctionTemplateDecl>(D))
D = FTD->getTemplatedDecl();
if (const auto *FD = dyn_cast<FunctionDecl>(D)) {
if (FD->hasAttr<OMPDeclareTargetDeclAttr>() &&
(FD->getAttr<OMPDeclareTargetDeclAttr>()->getMapType() ==
@ -13052,16 +13144,6 @@ void Sema::checkDeclIsAllowedInOpenMPTarget(Expr *E, Decl *D,
return;
}
}
if (const auto *FTD = dyn_cast<FunctionTemplateDecl>(D)) {
if (FTD->hasAttr<OMPDeclareTargetDeclAttr>() &&
(FTD->getAttr<OMPDeclareTargetDeclAttr>()->getMapType() ==
OMPDeclareTargetDeclAttr::MT_Link)) {
assert(IdLoc.isValid() && "Source location is expected");
Diag(IdLoc, diag::err_omp_function_in_link_clause);
Diag(FTD->getLocation(), diag::note_defined_here) << FTD;
return;
}
}
if (!E) {
// Checking declaration inside declare target region.
if (!D->hasAttr<OMPDeclareTargetDeclAttr>() &&

View File

@ -5192,10 +5192,20 @@ void Sema::PerformPendingInstantiations(bool LocalOnly) {
if (FunctionDecl *Function = dyn_cast<FunctionDecl>(Inst.first)) {
bool DefinitionRequired = Function->getTemplateSpecializationKind() ==
TSK_ExplicitInstantiationDefinition;
InstantiateFunctionDefinition(/*FIXME:*/Inst.second, Function, true,
DefinitionRequired, true);
if (Function->isDefined())
Function->setInstantiationIsPending(false);
if (Function->isMultiVersion()) {
getASTContext().forEachMultiversionedFunctionVersion(
Function, [this, Inst, DefinitionRequired](FunctionDecl *CurFD) {
InstantiateFunctionDefinition(/*FIXME:*/ Inst.second, CurFD, true,
DefinitionRequired, true);
if (CurFD->isDefined())
CurFD->setInstantiationIsPending(false);
});
} else {
InstantiateFunctionDefinition(/*FIXME:*/ Inst.second, Function, true,
DefinitionRequired, true);
if (Function->isDefined())
Function->setInstantiationIsPending(false);
}
continue;
}

View File

@ -1856,7 +1856,7 @@ OMPClause *OMPClauseReader::readClause() {
C = new (Context) OMPScheduleClause();
break;
case OMPC_ordered:
C = new (Context) OMPOrderedClause();
C = OMPOrderedClause::CreateEmpty(Context, Reader->Record.readInt());
break;
case OMPC_nowait:
C = new (Context) OMPNowaitClause();
@ -1927,9 +1927,12 @@ OMPClause *OMPClauseReader::readClause() {
case OMPC_flush:
C = OMPFlushClause::CreateEmpty(Context, Reader->Record.readInt());
break;
case OMPC_depend:
C = OMPDependClause::CreateEmpty(Context, Reader->Record.readInt());
case OMPC_depend: {
unsigned NumVars = Reader->Record.readInt();
unsigned NumLoops = Reader->Record.readInt();
C = OMPDependClause::CreateEmpty(Context, NumVars, NumLoops);
break;
}
case OMPC_device:
C = new (Context) OMPDeviceClause();
break;
@ -2087,6 +2090,10 @@ void OMPClauseReader::VisitOMPScheduleClause(OMPScheduleClause *C) {
void OMPClauseReader::VisitOMPOrderedClause(OMPOrderedClause *C) {
C->setNumForLoops(Reader->Record.readSubExpr());
for (unsigned I = 0, E = C->NumberOfLoops; I < E; ++I)
C->setLoopNumIterations(I, Reader->Record.readSubExpr());
for (unsigned I = 0, E = C->NumberOfLoops; I < E; ++I)
C->setLoopCounter(I, Reader->Record.readSubExpr());
C->setLParenLoc(Reader->ReadSourceLocation());
}
@ -2395,10 +2402,11 @@ void OMPClauseReader::VisitOMPDependClause(OMPDependClause *C) {
unsigned NumVars = C->varlist_size();
SmallVector<Expr *, 16> Vars;
Vars.reserve(NumVars);
for (unsigned i = 0; i != NumVars; ++i)
for (unsigned I = 0; I != NumVars; ++I)
Vars.push_back(Reader->Record.readSubExpr());
C->setVarRefs(Vars);
C->setCounterValue(Reader->Record.readSubExpr());
for (unsigned I = 0, E = C->getNumLoops(); I < E; ++I)
C->setLoopData(I, Reader->Record.readSubExpr());
}
void OMPClauseReader::VisitOMPDeviceClause(OMPDeviceClause *C) {

View File

@ -1898,7 +1898,12 @@ void OMPClauseWriter::VisitOMPScheduleClause(OMPScheduleClause *C) {
}
void OMPClauseWriter::VisitOMPOrderedClause(OMPOrderedClause *C) {
Record.push_back(C->getLoopNumIterations().size());
Record.AddStmt(C->getNumForLoops());
for (Expr *NumIter : C->getLoopNumIterations())
Record.AddStmt(NumIter);
for (unsigned I = 0, E = C->getLoopNumIterations().size(); I <E; ++I)
Record.AddStmt(C->getLoopCunter(I));
Record.AddSourceLocation(C->getLParenLoc());
}
@ -2102,13 +2107,15 @@ void OMPClauseWriter::VisitOMPFlushClause(OMPFlushClause *C) {
void OMPClauseWriter::VisitOMPDependClause(OMPDependClause *C) {
Record.push_back(C->varlist_size());
Record.push_back(C->getNumLoops());
Record.AddSourceLocation(C->getLParenLoc());
Record.push_back(C->getDependencyKind());
Record.AddSourceLocation(C->getDependencyLoc());
Record.AddSourceLocation(C->getColonLoc());
for (auto *VE : C->varlists())
Record.AddStmt(VE);
Record.AddStmt(C->getCounterValue());
for (unsigned I = 0, E = C->getNumLoops(); I < E; ++I)
Record.AddStmt(C->getLoopData(I));
}
void OMPClauseWriter::VisitOMPDeviceClause(OMPDeviceClause *C) {

View File

@ -1551,11 +1551,11 @@ void LinkerDriver::link(ArrayRef<const char *> ArgsArr) {
continue;
}
// If the symbol isn't common, it must have been replaced with a regular
// symbol, which will carry its own alignment.
auto *DC = dyn_cast<DefinedCommon>(Sym);
if (!DC) {
warn("/aligncomm symbol " + Name + " of wrong kind");
if (!DC)
continue;
}
CommonChunk *C = DC->getChunk();
C->Alignment = std::max(C->Alignment, Alignment);

View File

@ -2929,8 +2929,10 @@ void elf::mergeSections() {
// We do not want to handle sections that are not alive, so just remove
// them instead of trying to merge.
if (!MS->Live)
if (!MS->Live) {
S = nullptr;
continue;
}
StringRef OutsecName = getOutputSectionName(MS);
uint32_t Alignment = std::max<uint32_t>(MS->Alignment, MS->Entsize);

View File

@ -29,7 +29,13 @@ ELF Improvements
COFF Improvements
-----------------
* Item 1.
* Improved correctness of exporting mangled stdcall symbols.
* Completed support for ARM64 relocations.
* Added support for outputting PDB debug info for MinGW targets.
* Improved compatibility of output binaries with GNU binutils objcopy/strip.
MachO Improvements
------------------

View File

@ -8,4 +8,4 @@
#define CLANG_VENDOR "FreeBSD "
#define SVN_REVISION "339355"
#define SVN_REVISION "339999"

View File

@ -7,4 +7,4 @@
#define LLD_REPOSITORY_STRING "FreeBSD"
// <Upstream revision at import>-<Local identifier in __FreeBSD_version style>
#define LLD_REVISION_STRING "339355-1200005"
#define LLD_REVISION_STRING "339999-1200005"

View File

@ -13,8 +13,14 @@
#define LLDB_CONFIG_TERMIOS_SUPPORTED
#define LLDB_EDITLINE_USE_WCHAR 0
#define LLDB_HAVE_EL_RFUNC_T 0
/* #undef LLDB_DISABLE_POSIX */
#define LLDB_LIBDIR_SUFFIX ""
#define HAVE_SYS_EVENT_H 1
#define HAVE_PPOLL 1
@ -25,6 +31,8 @@
#define HAVE_NR_PROCESS_VM_READV 0
#ifndef HAVE_LIBCOMPRESSION
/* #undef HAVE_LIBCOMPRESSION */
#endif
#endif // #ifndef LLDB_HOST_CONFIG_H

View File

@ -321,10 +321,10 @@
#define PACKAGE_NAME "LLVM"
/* Define to the full name and version of this package. */
#define PACKAGE_STRING "LLVM 7.0.0svn"
#define PACKAGE_STRING "LLVM 7.0.0"
/* Define to the version of this package. */
#define PACKAGE_VERSION "7.0.0svn"
#define PACKAGE_VERSION "7.0.0"
/* Define to the vendor of this package. */
/* #undef PACKAGE_VENDOR */

View File

@ -76,7 +76,7 @@
#define LLVM_VERSION_PATCH 0
/* LLVM version string */
#define LLVM_VERSION_STRING "7.0.0svn"
#define LLVM_VERSION_STRING "7.0.0"
/* Whether LLVM records statistics for use with GetStatistics(),
* PrintStatistics() or PrintStatisticsJSON()