Vendor import of clang trunk r256945:
https://llvm.org/svn/llvm-project/cfe/trunk@256945
This commit is contained in:
parent
3176e97f13
commit
fc74ff5a07
@ -631,11 +631,19 @@ if (CLANG_ENABLE_BOOTSTRAP)
|
||||
|
||||
string(REGEX MATCH "stage([0-9]*)" MATCHED_STAGE "${CLANG_STAGE}")
|
||||
if(MATCHED_STAGE)
|
||||
math(EXPR STAGE_NUM "${MATCHED_STAGE} + 1")
|
||||
set(NEXT_CLANG_STAGE stage${STAGE_NUM})
|
||||
if(NOT LLVM_BUILD_INSTRUMENTED)
|
||||
math(EXPR STAGE_NUM "${CMAKE_MATCH_1} + 1")
|
||||
set(NEXT_CLANG_STAGE stage${STAGE_NUM})
|
||||
else()
|
||||
set(NEXT_CLANG_STAGE stage${CMAKE_MATCH_1})
|
||||
endif()
|
||||
else()
|
||||
set(NEXT_CLANG_STAGE bootstrap)
|
||||
endif()
|
||||
|
||||
if(BOOTSTRAP_LLVM_BUILD_INSTRUMENTED)
|
||||
set(NEXT_CLANG_STAGE ${NEXT_CLANG_STAGE}-instrumented)
|
||||
endif()
|
||||
message(STATUS "Setting next clang stage to: ${NEXT_CLANG_STAGE}")
|
||||
|
||||
|
||||
@ -681,6 +689,26 @@ if (CLANG_ENABLE_BOOTSTRAP)
|
||||
set(RUNTIME_DEP compiler-rt)
|
||||
endif()
|
||||
|
||||
set(COMPILER_OPTIONS
|
||||
-DCMAKE_CXX_COMPILER=${LLVM_RUNTIME_OUTPUT_INTDIR}/clang++
|
||||
-DCMAKE_C_COMPILER=${LLVM_RUNTIME_OUTPUT_INTDIR}/clang
|
||||
-DCMAKE_ASM_COMPILER=${LLVM_RUNTIME_OUTPUT_INTDIR}/clang)
|
||||
|
||||
if(BOOTSTRAP_LLVM_BUILD_INSTRUMENTED)
|
||||
set(PGO_DEP llvm-profdata)
|
||||
set(PGO_OPT -DLLVM_PROFDATA=${LLVM_RUNTIME_OUTPUT_INTDIR}/llvm-profdata)
|
||||
endif()
|
||||
|
||||
if(LLVM_BUILD_INSTRUMENTED)
|
||||
set(PGO_DEP generate-profdata)
|
||||
set(PGO_OPT -DLLVM_PROFDATA_FILE=${CMAKE_CURRENT_BINARY_DIR}/utils/perf-training/clang.profdata)
|
||||
set(COMPILER_OPTIONS
|
||||
-DCMAKE_CXX_COMPILER=${CMAKE_CXX_COMPILER}
|
||||
-DCMAKE_C_COMPILER=${CMAKE_C_COMPILER}
|
||||
-DCMAKE_ASM_COMPILER=${CMAKE_ASM_COMPILER})
|
||||
set(RUNTIME_DEP) # Don't set runtime dependencies
|
||||
endif()
|
||||
|
||||
# Find all variables that start with BOOTSTRAP_ and populate a variable with
|
||||
# them.
|
||||
get_cmake_property(variableNames VARIABLES)
|
||||
@ -703,7 +731,7 @@ if (CLANG_ENABLE_BOOTSTRAP)
|
||||
endforeach()
|
||||
|
||||
ExternalProject_Add(${NEXT_CLANG_STAGE}
|
||||
DEPENDS clang ${LTO_DEP} ${RUNTIME_DEP}
|
||||
DEPENDS clang ${LTO_DEP} ${RUNTIME_DEP} ${PGO_DEP}
|
||||
PREFIX ${NEXT_CLANG_STAGE}
|
||||
SOURCE_DIR ${CMAKE_SOURCE_DIR}
|
||||
STAMP_DIR ${STAMP_DIR}
|
||||
@ -715,11 +743,9 @@ if (CLANG_ENABLE_BOOTSTRAP)
|
||||
-DCMAKE_INSTALL_PREFIX=${CMAKE_INSTALL_PREFIX}
|
||||
${CLANG_BOOTSTRAP_CMAKE_ARGS}
|
||||
${PASSTHROUGH_VARIABLES}
|
||||
-DCMAKE_CXX_COMPILER=${LLVM_RUNTIME_OUTPUT_INTDIR}/clang++
|
||||
-DCMAKE_C_COMPILER=${LLVM_RUNTIME_OUTPUT_INTDIR}/clang
|
||||
-DCMAKE_ASM_COMPILER=${LLVM_RUNTIME_OUTPUT_INTDIR}/clang
|
||||
-DCLANG_STAGE=${NEXT_CLANG_STAGE}
|
||||
${LTO_LIBRARY} ${LTO_AR} ${LTO_RANLIB} ${verbose}
|
||||
-DCLANG_STAGE=${NEXT_CLANG_STAGE}
|
||||
${COMPILER_OPTIONS}
|
||||
${LTO_LIBRARY} ${LTO_AR} ${LTO_RANLIB} ${verbose} ${PGO_OPT}
|
||||
INSTALL_COMMAND ""
|
||||
STEP_TARGETS configure build
|
||||
${cmake_3_4_USES_TERMINAL_OPTIONS}
|
||||
|
9
cmake/caches/PGO-stage2-instrumented.cmake
Normal file
9
cmake/caches/PGO-stage2-instrumented.cmake
Normal file
@ -0,0 +1,9 @@
|
||||
set(CMAKE_BUILD_TYPE RELEASE CACHE STRING "")
|
||||
set(CLANG_ENABLE_BOOTSTRAP ON CACHE BOOL "")
|
||||
set(LLVM_BUILD_EXTERNAL_COMPILER_RT ON CACHE BOOL "")
|
||||
|
||||
set(CLANG_BOOTSTRAP_TARGETS check-all check-llvm check-clang test-suite CACHE STRING "")
|
||||
|
||||
set(CLANG_BOOTSTRAP_CMAKE_ARGS
|
||||
-C ${CMAKE_CURRENT_LIST_DIR}/PGO-stage2.cmake
|
||||
CACHE STRING "")
|
2
cmake/caches/PGO-stage2.cmake
Normal file
2
cmake/caches/PGO-stage2.cmake
Normal file
@ -0,0 +1,2 @@
|
||||
set(CMAKE_BUILD_TYPE RELEASE CACHE STRING "")
|
||||
set(LLVM_BUILD_EXTERNAL_COMPILER_RT ON CACHE BOOL "")
|
17
cmake/caches/PGO.cmake
Normal file
17
cmake/caches/PGO.cmake
Normal file
@ -0,0 +1,17 @@
|
||||
set(CMAKE_BUILD_TYPE RELEASE CACHE STRING "")
|
||||
set(CLANG_ENABLE_BOOTSTRAP ON CACHE BOOL "")
|
||||
set(LLVM_BUILD_EXTERNAL_COMPILER_RT ON CACHE BOOL "")
|
||||
|
||||
set(LLVM_TARGETS_TO_BUILD X86 CACHE STRING "")
|
||||
set(BOOTSTRAP_LLVM_BUILD_INSTRUMENTED ON CACHE BOOL "")
|
||||
set(CLANG_BOOTSTRAP_TARGETS
|
||||
generate-profdata
|
||||
stage2
|
||||
stage2-check-all
|
||||
stage2-check-llvm
|
||||
stage2-check-clang
|
||||
stage2-test-suite CACHE STRING "")
|
||||
|
||||
set(CLANG_BOOTSTRAP_CMAKE_ARGS
|
||||
-C ${CMAKE_CURRENT_LIST_DIR}/PGO-stage2-instrumented.cmake
|
||||
CACHE STRING "")
|
@ -395,7 +395,7 @@ struct DeclarationNameLoc {
|
||||
// Locations (if any) for the tilde (destructor) or operator keyword
|
||||
// (conversion) are stored elsewhere.
|
||||
struct NT {
|
||||
TypeSourceInfo* TInfo;
|
||||
TypeSourceInfo *TInfo;
|
||||
};
|
||||
|
||||
// The location (if any) of the operator keyword is stored elsewhere.
|
||||
|
@ -2190,7 +2190,8 @@ class CallExpr : public Expr {
|
||||
return reinterpret_cast<Expr **>(SubExprs+getNumPreArgs()+PREARGS_START);
|
||||
}
|
||||
const Expr *const *getArgs() const {
|
||||
return const_cast<CallExpr*>(this)->getArgs();
|
||||
return reinterpret_cast<Expr **>(SubExprs + getNumPreArgs() +
|
||||
PREARGS_START);
|
||||
}
|
||||
|
||||
/// getArg - Return the specified argument.
|
||||
@ -3926,7 +3927,9 @@ class InitListExpr : public Expr {
|
||||
/// which covers @c [2].y=1.0. This DesignatedInitExpr will have two
|
||||
/// designators, one array designator for @c [2] followed by one field
|
||||
/// designator for @c .y. The initialization expression will be 1.0.
|
||||
class DesignatedInitExpr : public Expr {
|
||||
class DesignatedInitExpr final
|
||||
: public Expr,
|
||||
private llvm::TrailingObjects<DesignatedInitExpr, Stmt *> {
|
||||
public:
|
||||
/// \brief Forward declaration of the Designator class.
|
||||
class Designator;
|
||||
@ -4206,12 +4209,12 @@ class DesignatedInitExpr : public Expr {
|
||||
|
||||
Expr *getSubExpr(unsigned Idx) const {
|
||||
assert(Idx < NumSubExprs && "Subscript out of range");
|
||||
return cast<Expr>(reinterpret_cast<Stmt *const *>(this + 1)[Idx]);
|
||||
return cast<Expr>(getTrailingObjects<Stmt *>()[Idx]);
|
||||
}
|
||||
|
||||
void setSubExpr(unsigned Idx, Expr *E) {
|
||||
assert(Idx < NumSubExprs && "Subscript out of range");
|
||||
reinterpret_cast<Stmt **>(this + 1)[Idx] = E;
|
||||
getTrailingObjects<Stmt *>()[Idx] = E;
|
||||
}
|
||||
|
||||
/// \brief Replaces the designator at index @p Idx with the series
|
||||
@ -4230,9 +4233,11 @@ class DesignatedInitExpr : public Expr {
|
||||
|
||||
// Iterators
|
||||
child_range children() {
|
||||
Stmt **begin = reinterpret_cast<Stmt**>(this + 1);
|
||||
Stmt **begin = getTrailingObjects<Stmt *>();
|
||||
return child_range(begin, begin + NumSubExprs);
|
||||
}
|
||||
|
||||
friend TrailingObjects;
|
||||
};
|
||||
|
||||
/// \brief Represents a place-holder for an object not to be initialized by
|
||||
@ -4683,7 +4688,9 @@ class AsTypeExpr : public Expr {
|
||||
/// equivalent to a particular message send, and this is very much
|
||||
/// part of the user model. The name of this class encourages this
|
||||
/// modelling design.
|
||||
class PseudoObjectExpr : public Expr {
|
||||
class PseudoObjectExpr final
|
||||
: public Expr,
|
||||
private llvm::TrailingObjects<PseudoObjectExpr, Expr *> {
|
||||
// PseudoObjectExprBits.NumSubExprs - The number of sub-expressions.
|
||||
// Always at least two, because the first sub-expression is the
|
||||
// syntactic form.
|
||||
@ -4695,13 +4702,11 @@ class PseudoObjectExpr : public Expr {
|
||||
// in to Create, which is an index within the semantic forms.
|
||||
// Note also that ASTStmtWriter assumes this encoding.
|
||||
|
||||
Expr **getSubExprsBuffer() { return reinterpret_cast<Expr**>(this + 1); }
|
||||
Expr **getSubExprsBuffer() { return getTrailingObjects<Expr *>(); }
|
||||
const Expr * const *getSubExprsBuffer() const {
|
||||
return reinterpret_cast<const Expr * const *>(this + 1);
|
||||
return getTrailingObjects<Expr *>();
|
||||
}
|
||||
|
||||
friend class ASTStmtReader;
|
||||
|
||||
PseudoObjectExpr(QualType type, ExprValueKind VK,
|
||||
Expr *syntactic, ArrayRef<Expr*> semantic,
|
||||
unsigned resultIndex);
|
||||
@ -4798,6 +4803,9 @@ class PseudoObjectExpr : public Expr {
|
||||
static bool classof(const Stmt *T) {
|
||||
return T->getStmtClass() == PseudoObjectExprClass;
|
||||
}
|
||||
|
||||
friend TrailingObjects;
|
||||
friend class ASTStmtReader;
|
||||
};
|
||||
|
||||
/// AtomicExpr - Variadic atomic builtins: __atomic_exchange, __atomic_fetch_*,
|
||||
|
@ -951,7 +951,9 @@ class CXXThrowExpr : public Expr {
|
||||
/// This wraps up a function call argument that was created from the
|
||||
/// corresponding parameter's default argument, when the call did not
|
||||
/// explicitly supply arguments for all of the parameters.
|
||||
class CXXDefaultArgExpr : public Expr {
|
||||
class CXXDefaultArgExpr final
|
||||
: public Expr,
|
||||
private llvm::TrailingObjects<CXXDefaultArgExpr, Expr *> {
|
||||
/// \brief The parameter whose default is being used.
|
||||
///
|
||||
/// When the bit is set, the subexpression is stored after the
|
||||
@ -977,7 +979,7 @@ class CXXDefaultArgExpr : public Expr {
|
||||
SubExpr->getValueKind(), SubExpr->getObjectKind(),
|
||||
false, false, false, false),
|
||||
Param(param, true), Loc(Loc) {
|
||||
*reinterpret_cast<Expr **>(this + 1) = SubExpr;
|
||||
*getTrailingObjects<Expr *>() = SubExpr;
|
||||
}
|
||||
|
||||
public:
|
||||
@ -1002,12 +1004,12 @@ class CXXDefaultArgExpr : public Expr {
|
||||
// Retrieve the actual argument to the function call.
|
||||
const Expr *getExpr() const {
|
||||
if (Param.getInt())
|
||||
return *reinterpret_cast<Expr const * const*> (this + 1);
|
||||
return *getTrailingObjects<Expr *>();
|
||||
return getParam()->getDefaultArg();
|
||||
}
|
||||
Expr *getExpr() {
|
||||
if (Param.getInt())
|
||||
return *reinterpret_cast<Expr **> (this + 1);
|
||||
return *getTrailingObjects<Expr *>();
|
||||
return getParam()->getDefaultArg();
|
||||
}
|
||||
|
||||
@ -1031,6 +1033,7 @@ class CXXDefaultArgExpr : public Expr {
|
||||
return child_range(child_iterator(), child_iterator());
|
||||
}
|
||||
|
||||
friend TrailingObjects;
|
||||
friend class ASTStmtReader;
|
||||
friend class ASTStmtWriter;
|
||||
};
|
||||
@ -1441,7 +1444,9 @@ class CXXTemporaryObjectExpr : public CXXConstructExpr {
|
||||
/// C++1y introduces a new form of "capture" called an init-capture that
|
||||
/// includes an initializing expression (rather than capturing a variable),
|
||||
/// and which can never occur implicitly.
|
||||
class LambdaExpr : public Expr {
|
||||
class LambdaExpr final
|
||||
: public Expr,
|
||||
private llvm::TrailingObjects<LambdaExpr, Stmt *, unsigned, VarDecl *> {
|
||||
/// \brief The source range that covers the lambda introducer ([...]).
|
||||
SourceRange IntroducerRange;
|
||||
|
||||
@ -1476,23 +1481,21 @@ class LambdaExpr : public Expr {
|
||||
/// module file just to determine the source range.
|
||||
SourceLocation ClosingBrace;
|
||||
|
||||
// Note: The capture initializers are stored directly after the lambda
|
||||
// expression, along with the index variables used to initialize by-copy
|
||||
// array captures.
|
||||
size_t numTrailingObjects(OverloadToken<Stmt *>) const {
|
||||
return NumCaptures + 1;
|
||||
}
|
||||
|
||||
typedef LambdaCapture Capture;
|
||||
size_t numTrailingObjects(OverloadToken<unsigned>) const {
|
||||
return HasArrayIndexVars ? NumCaptures + 1 : 0;
|
||||
}
|
||||
|
||||
/// \brief Construct a lambda expression.
|
||||
LambdaExpr(QualType T, SourceRange IntroducerRange,
|
||||
LambdaCaptureDefault CaptureDefault,
|
||||
SourceLocation CaptureDefaultLoc,
|
||||
ArrayRef<Capture> Captures,
|
||||
bool ExplicitParams,
|
||||
bool ExplicitResultType,
|
||||
ArrayRef<Expr *> CaptureInits,
|
||||
ArrayRef<VarDecl *> ArrayIndexVars,
|
||||
ArrayRef<unsigned> ArrayIndexStarts,
|
||||
SourceLocation ClosingBrace,
|
||||
SourceLocation CaptureDefaultLoc, ArrayRef<LambdaCapture> Captures,
|
||||
bool ExplicitParams, bool ExplicitResultType,
|
||||
ArrayRef<Expr *> CaptureInits, ArrayRef<VarDecl *> ArrayIndexVars,
|
||||
ArrayRef<unsigned> ArrayIndexStarts, SourceLocation ClosingBrace,
|
||||
bool ContainsUnexpandedParameterPack);
|
||||
|
||||
/// \brief Construct an empty lambda expression.
|
||||
@ -1503,53 +1506,35 @@ class LambdaExpr : public Expr {
|
||||
getStoredStmts()[NumCaptures] = nullptr;
|
||||
}
|
||||
|
||||
Stmt **getStoredStmts() { return reinterpret_cast<Stmt **>(this + 1); }
|
||||
Stmt **getStoredStmts() { return getTrailingObjects<Stmt *>(); }
|
||||
|
||||
Stmt *const *getStoredStmts() const {
|
||||
return reinterpret_cast<Stmt *const *>(this + 1);
|
||||
}
|
||||
Stmt *const *getStoredStmts() const { return getTrailingObjects<Stmt *>(); }
|
||||
|
||||
/// \brief Retrieve the mapping from captures to the first array index
|
||||
/// variable.
|
||||
unsigned *getArrayIndexStarts() {
|
||||
return reinterpret_cast<unsigned *>(getStoredStmts() + NumCaptures + 1);
|
||||
}
|
||||
unsigned *getArrayIndexStarts() { return getTrailingObjects<unsigned>(); }
|
||||
|
||||
const unsigned *getArrayIndexStarts() const {
|
||||
return reinterpret_cast<const unsigned *>(getStoredStmts() + NumCaptures +
|
||||
1);
|
||||
return getTrailingObjects<unsigned>();
|
||||
}
|
||||
|
||||
/// \brief Retrieve the complete set of array-index variables.
|
||||
VarDecl **getArrayIndexVars() {
|
||||
unsigned ArrayIndexSize = llvm::RoundUpToAlignment(
|
||||
sizeof(unsigned) * (NumCaptures + 1), llvm::alignOf<VarDecl *>());
|
||||
return reinterpret_cast<VarDecl **>(
|
||||
reinterpret_cast<char *>(getArrayIndexStarts()) + ArrayIndexSize);
|
||||
}
|
||||
VarDecl **getArrayIndexVars() { return getTrailingObjects<VarDecl *>(); }
|
||||
|
||||
VarDecl *const *getArrayIndexVars() const {
|
||||
unsigned ArrayIndexSize = llvm::RoundUpToAlignment(
|
||||
sizeof(unsigned) * (NumCaptures + 1), llvm::alignOf<VarDecl *>());
|
||||
return reinterpret_cast<VarDecl *const *>(
|
||||
reinterpret_cast<const char *>(getArrayIndexStarts()) + ArrayIndexSize);
|
||||
return getTrailingObjects<VarDecl *>();
|
||||
}
|
||||
|
||||
public:
|
||||
/// \brief Construct a new lambda expression.
|
||||
static LambdaExpr *Create(const ASTContext &C,
|
||||
CXXRecordDecl *Class,
|
||||
SourceRange IntroducerRange,
|
||||
LambdaCaptureDefault CaptureDefault,
|
||||
SourceLocation CaptureDefaultLoc,
|
||||
ArrayRef<Capture> Captures,
|
||||
bool ExplicitParams,
|
||||
bool ExplicitResultType,
|
||||
ArrayRef<Expr *> CaptureInits,
|
||||
ArrayRef<VarDecl *> ArrayIndexVars,
|
||||
ArrayRef<unsigned> ArrayIndexStarts,
|
||||
SourceLocation ClosingBrace,
|
||||
bool ContainsUnexpandedParameterPack);
|
||||
static LambdaExpr *
|
||||
Create(const ASTContext &C, CXXRecordDecl *Class, SourceRange IntroducerRange,
|
||||
LambdaCaptureDefault CaptureDefault, SourceLocation CaptureDefaultLoc,
|
||||
ArrayRef<LambdaCapture> Captures, bool ExplicitParams,
|
||||
bool ExplicitResultType, ArrayRef<Expr *> CaptureInits,
|
||||
ArrayRef<VarDecl *> ArrayIndexVars,
|
||||
ArrayRef<unsigned> ArrayIndexStarts, SourceLocation ClosingBrace,
|
||||
bool ContainsUnexpandedParameterPack);
|
||||
|
||||
/// \brief Construct a new lambda expression that will be deserialized from
|
||||
/// an external source.
|
||||
@ -1572,7 +1557,7 @@ class LambdaExpr : public Expr {
|
||||
|
||||
/// \brief An iterator that walks over the captures of the lambda,
|
||||
/// both implicit and explicit.
|
||||
typedef const Capture *capture_iterator;
|
||||
typedef const LambdaCapture *capture_iterator;
|
||||
|
||||
/// \brief An iterator over a range of lambda captures.
|
||||
typedef llvm::iterator_range<capture_iterator> capture_range;
|
||||
@ -1709,9 +1694,11 @@ class LambdaExpr : public Expr {
|
||||
SourceLocation getLocEnd() const LLVM_READONLY { return ClosingBrace; }
|
||||
|
||||
child_range children() {
|
||||
// Includes initialization exprs plus body stmt
|
||||
return child_range(getStoredStmts(), getStoredStmts() + NumCaptures + 1);
|
||||
}
|
||||
|
||||
friend TrailingObjects;
|
||||
friend class ASTStmtReader;
|
||||
friend class ASTStmtWriter;
|
||||
};
|
||||
@ -2226,7 +2213,9 @@ class CXXPseudoDestructorExpr : public Expr {
|
||||
/// __is_enum(std::string) == false
|
||||
/// __is_trivially_constructible(vector<int>, int*, int*)
|
||||
/// \endcode
|
||||
class TypeTraitExpr : public Expr {
|
||||
class TypeTraitExpr final
|
||||
: public Expr,
|
||||
private llvm::TrailingObjects<TypeTraitExpr, TypeSourceInfo *> {
|
||||
/// \brief The location of the type trait keyword.
|
||||
SourceLocation Loc;
|
||||
|
||||
@ -2243,16 +2232,10 @@ class TypeTraitExpr : public Expr {
|
||||
|
||||
TypeTraitExpr(EmptyShell Empty) : Expr(TypeTraitExprClass, Empty) { }
|
||||
|
||||
/// \brief Retrieve the argument types.
|
||||
TypeSourceInfo **getTypeSourceInfos() {
|
||||
return reinterpret_cast<TypeSourceInfo **>(this+1);
|
||||
size_t numTrailingObjects(OverloadToken<TypeSourceInfo *>) const {
|
||||
return getNumArgs();
|
||||
}
|
||||
|
||||
/// \brief Retrieve the argument types.
|
||||
TypeSourceInfo * const *getTypeSourceInfos() const {
|
||||
return reinterpret_cast<TypeSourceInfo * const*>(this+1);
|
||||
}
|
||||
|
||||
|
||||
public:
|
||||
/// \brief Create a new type trait expression.
|
||||
static TypeTraitExpr *Create(const ASTContext &C, QualType T,
|
||||
@ -2284,22 +2267,9 @@ class TypeTraitExpr : public Expr {
|
||||
}
|
||||
|
||||
/// \brief Retrieve the argument types.
|
||||
ArrayRef<TypeSourceInfo *> getArgs() const {
|
||||
return llvm::makeArrayRef(getTypeSourceInfos(), getNumArgs());
|
||||
}
|
||||
|
||||
typedef TypeSourceInfo **arg_iterator;
|
||||
arg_iterator arg_begin() {
|
||||
return getTypeSourceInfos();
|
||||
}
|
||||
arg_iterator arg_end() {
|
||||
return getTypeSourceInfos() + getNumArgs();
|
||||
}
|
||||
|
||||
typedef TypeSourceInfo const * const *arg_const_iterator;
|
||||
arg_const_iterator arg_begin() const { return getTypeSourceInfos(); }
|
||||
arg_const_iterator arg_end() const {
|
||||
return getTypeSourceInfos() + getNumArgs();
|
||||
ArrayRef<TypeSourceInfo *> getArgs() const {
|
||||
return llvm::makeArrayRef(getTrailingObjects<TypeSourceInfo *>(),
|
||||
getNumArgs());
|
||||
}
|
||||
|
||||
SourceLocation getLocStart() const LLVM_READONLY { return Loc; }
|
||||
@ -2314,9 +2284,9 @@ class TypeTraitExpr : public Expr {
|
||||
return child_range(child_iterator(), child_iterator());
|
||||
}
|
||||
|
||||
friend TrailingObjects;
|
||||
friend class ASTStmtReader;
|
||||
friend class ASTStmtWriter;
|
||||
|
||||
};
|
||||
|
||||
/// \brief An Embarcadero array type trait, as used in the implementation of
|
||||
@ -2899,7 +2869,9 @@ class DependentScopeDeclRefExpr final
|
||||
/// This expression also tracks whether the sub-expression contains a
|
||||
/// potentially-evaluated block literal. The lifetime of a block
|
||||
/// literal is the extent of the enclosing scope.
|
||||
class ExprWithCleanups : public Expr {
|
||||
class ExprWithCleanups final
|
||||
: public Expr,
|
||||
private llvm::TrailingObjects<ExprWithCleanups, BlockDecl *> {
|
||||
public:
|
||||
/// The type of objects that are kept in the cleanup.
|
||||
/// It's useful to remember the set of blocks; we could also
|
||||
@ -2913,12 +2885,7 @@ class ExprWithCleanups : public Expr {
|
||||
ExprWithCleanups(EmptyShell, unsigned NumObjects);
|
||||
ExprWithCleanups(Expr *SubExpr, ArrayRef<CleanupObject> Objects);
|
||||
|
||||
CleanupObject *getObjectsBuffer() {
|
||||
return reinterpret_cast<CleanupObject*>(this + 1);
|
||||
}
|
||||
const CleanupObject *getObjectsBuffer() const {
|
||||
return reinterpret_cast<const CleanupObject*>(this + 1);
|
||||
}
|
||||
friend TrailingObjects;
|
||||
friend class ASTStmtReader;
|
||||
|
||||
public:
|
||||
@ -2929,7 +2896,8 @@ class ExprWithCleanups : public Expr {
|
||||
ArrayRef<CleanupObject> objects);
|
||||
|
||||
ArrayRef<CleanupObject> getObjects() const {
|
||||
return llvm::makeArrayRef(getObjectsBuffer(), getNumObjects());
|
||||
return llvm::makeArrayRef(getTrailingObjects<CleanupObject>(),
|
||||
getNumObjects());
|
||||
}
|
||||
|
||||
unsigned getNumObjects() const { return ExprWithCleanupsBits.NumObjects; }
|
||||
@ -2981,7 +2949,9 @@ class ExprWithCleanups : public Expr {
|
||||
/// When the returned expression is instantiated, it may resolve to a
|
||||
/// constructor call, conversion function call, or some kind of type
|
||||
/// conversion.
|
||||
class CXXUnresolvedConstructExpr : public Expr {
|
||||
class CXXUnresolvedConstructExpr final
|
||||
: public Expr,
|
||||
private llvm::TrailingObjects<CXXUnresolvedConstructExpr, Expr *> {
|
||||
/// \brief The type being constructed.
|
||||
TypeSourceInfo *Type;
|
||||
|
||||
@ -3002,6 +2972,7 @@ class CXXUnresolvedConstructExpr : public Expr {
|
||||
CXXUnresolvedConstructExpr(EmptyShell Empty, unsigned NumArgs)
|
||||
: Expr(CXXUnresolvedConstructExprClass, Empty), Type(), NumArgs(NumArgs) { }
|
||||
|
||||
friend TrailingObjects;
|
||||
friend class ASTStmtReader;
|
||||
|
||||
public:
|
||||
@ -3036,13 +3007,11 @@ class CXXUnresolvedConstructExpr : public Expr {
|
||||
unsigned arg_size() const { return NumArgs; }
|
||||
|
||||
typedef Expr** arg_iterator;
|
||||
arg_iterator arg_begin() { return reinterpret_cast<Expr**>(this + 1); }
|
||||
arg_iterator arg_begin() { return getTrailingObjects<Expr *>(); }
|
||||
arg_iterator arg_end() { return arg_begin() + NumArgs; }
|
||||
|
||||
typedef const Expr* const * const_arg_iterator;
|
||||
const_arg_iterator arg_begin() const {
|
||||
return reinterpret_cast<const Expr* const *>(this + 1);
|
||||
}
|
||||
const_arg_iterator arg_begin() const { return getTrailingObjects<Expr *>(); }
|
||||
const_arg_iterator arg_end() const {
|
||||
return arg_begin() + NumArgs;
|
||||
}
|
||||
@ -3075,7 +3044,7 @@ class CXXUnresolvedConstructExpr : public Expr {
|
||||
|
||||
// Iterators
|
||||
child_range children() {
|
||||
Stmt **begin = reinterpret_cast<Stmt**>(this+1);
|
||||
Stmt **begin = reinterpret_cast<Stmt **>(arg_begin());
|
||||
return child_range(begin, begin + NumArgs);
|
||||
}
|
||||
};
|
||||
@ -3608,7 +3577,9 @@ class PackExpansionExpr : public Expr {
|
||||
/// static const unsigned value = sizeof...(Types);
|
||||
/// };
|
||||
/// \endcode
|
||||
class SizeOfPackExpr : public Expr {
|
||||
class SizeOfPackExpr final
|
||||
: public Expr,
|
||||
private llvm::TrailingObjects<SizeOfPackExpr, TemplateArgument> {
|
||||
/// \brief The location of the \c sizeof keyword.
|
||||
SourceLocation OperatorLoc;
|
||||
|
||||
@ -3633,6 +3604,7 @@ class SizeOfPackExpr : public Expr {
|
||||
/// \brief The parameter pack.
|
||||
NamedDecl *Pack;
|
||||
|
||||
friend TrailingObjects;
|
||||
friend class ASTStmtReader;
|
||||
friend class ASTStmtWriter;
|
||||
|
||||
@ -3649,7 +3621,7 @@ class SizeOfPackExpr : public Expr {
|
||||
Length(Length ? *Length : PartialArgs.size()), Pack(Pack) {
|
||||
assert((!Length || PartialArgs.empty()) &&
|
||||
"have partial args for non-dependent sizeof... expression");
|
||||
TemplateArgument *Args = reinterpret_cast<TemplateArgument *>(this + 1);
|
||||
TemplateArgument *Args = getTrailingObjects<TemplateArgument>();
|
||||
std::uninitialized_copy(PartialArgs.begin(), PartialArgs.end(), Args);
|
||||
}
|
||||
|
||||
@ -3700,8 +3672,7 @@ class SizeOfPackExpr : public Expr {
|
||||
/// \brief Get
|
||||
ArrayRef<TemplateArgument> getPartialArguments() const {
|
||||
assert(isPartiallySubstituted());
|
||||
const TemplateArgument *Args =
|
||||
reinterpret_cast<const TemplateArgument *>(this + 1);
|
||||
const TemplateArgument *Args = getTrailingObjects<TemplateArgument>();
|
||||
return llvm::makeArrayRef(Args, Args + Length);
|
||||
}
|
||||
|
||||
@ -3837,7 +3808,9 @@ class SubstNonTypeTemplateParmPackExpr : public Expr {
|
||||
/// };
|
||||
/// template struct S<int, int>;
|
||||
/// \endcode
|
||||
class FunctionParmPackExpr : public Expr {
|
||||
class FunctionParmPackExpr final
|
||||
: public Expr,
|
||||
private llvm::TrailingObjects<FunctionParmPackExpr, ParmVarDecl *> {
|
||||
/// \brief The function parameter pack which was referenced.
|
||||
ParmVarDecl *ParamPack;
|
||||
|
||||
@ -3851,6 +3824,7 @@ class FunctionParmPackExpr : public Expr {
|
||||
SourceLocation NameLoc, unsigned NumParams,
|
||||
ParmVarDecl *const *Params);
|
||||
|
||||
friend TrailingObjects;
|
||||
friend class ASTReader;
|
||||
friend class ASTStmtReader;
|
||||
|
||||
@ -3871,7 +3845,7 @@ class FunctionParmPackExpr : public Expr {
|
||||
/// \brief Iterators over the parameters which the parameter pack expanded
|
||||
/// into.
|
||||
typedef ParmVarDecl * const *iterator;
|
||||
iterator begin() const { return reinterpret_cast<iterator>(this+1); }
|
||||
iterator begin() const { return getTrailingObjects<ParmVarDecl *>(); }
|
||||
iterator end() const { return begin() + NumParameters; }
|
||||
|
||||
/// \brief Get the number of parameters in this parameter pack.
|
||||
|
@ -141,15 +141,17 @@ class ObjCBoxedExpr : public Expr {
|
||||
|
||||
/// ObjCArrayLiteral - used for objective-c array containers; as in:
|
||||
/// @[@"Hello", NSApp, [NSNumber numberWithInt:42]];
|
||||
class ObjCArrayLiteral : public Expr {
|
||||
class ObjCArrayLiteral final
|
||||
: public Expr,
|
||||
private llvm::TrailingObjects<ObjCArrayLiteral, Expr *> {
|
||||
unsigned NumElements;
|
||||
SourceRange Range;
|
||||
ObjCMethodDecl *ArrayWithObjectsMethod;
|
||||
|
||||
|
||||
ObjCArrayLiteral(ArrayRef<Expr *> Elements,
|
||||
QualType T, ObjCMethodDecl * Method,
|
||||
SourceRange SR);
|
||||
|
||||
|
||||
explicit ObjCArrayLiteral(EmptyShell Empty, unsigned NumElements)
|
||||
: Expr(ObjCArrayLiteralClass, Empty), NumElements(NumElements) {}
|
||||
|
||||
@ -171,11 +173,11 @@ class ObjCArrayLiteral : public Expr {
|
||||
}
|
||||
|
||||
/// \brief Retrieve elements of array of literals.
|
||||
Expr **getElements() { return reinterpret_cast<Expr **>(this + 1); }
|
||||
Expr **getElements() { return getTrailingObjects<Expr *>(); }
|
||||
|
||||
/// \brief Retrieve elements of array of literals.
|
||||
const Expr * const *getElements() const {
|
||||
return reinterpret_cast<const Expr * const*>(this + 1);
|
||||
const Expr * const *getElements() const {
|
||||
return getTrailingObjects<Expr *>();
|
||||
}
|
||||
|
||||
/// getNumElements - Return number of elements of objective-c array literal.
|
||||
@ -196,11 +198,12 @@ class ObjCArrayLiteral : public Expr {
|
||||
}
|
||||
|
||||
// Iterators
|
||||
child_range children() {
|
||||
return child_range((Stmt **)getElements(),
|
||||
(Stmt **)getElements() + NumElements);
|
||||
child_range children() {
|
||||
return child_range(reinterpret_cast<Stmt **>(getElements()),
|
||||
reinterpret_cast<Stmt **>(getElements()) + NumElements);
|
||||
}
|
||||
|
||||
|
||||
friend TrailingObjects;
|
||||
friend class ASTStmtReader;
|
||||
};
|
||||
|
||||
@ -230,32 +233,35 @@ template <> struct isPodLike<clang::ObjCDictionaryElement> : std::true_type {};
|
||||
}
|
||||
|
||||
namespace clang {
|
||||
/// ObjCDictionaryLiteral - AST node to represent objective-c dictionary
|
||||
/// \brief Internal struct for storing Key/value pair.
|
||||
struct ObjCDictionaryLiteral_KeyValuePair {
|
||||
Expr *Key;
|
||||
Expr *Value;
|
||||
};
|
||||
|
||||
/// \brief Internal struct to describes an element that is a pack
|
||||
/// expansion, used if any of the elements in the dictionary literal
|
||||
/// are pack expansions.
|
||||
struct ObjCDictionaryLiteral_ExpansionData {
|
||||
/// \brief The location of the ellipsis, if this element is a pack
|
||||
/// expansion.
|
||||
SourceLocation EllipsisLoc;
|
||||
|
||||
/// \brief If non-zero, the number of elements that this pack
|
||||
/// expansion will expand to (+1).
|
||||
unsigned NumExpansionsPlusOne;
|
||||
};
|
||||
|
||||
/// ObjCDictionaryLiteral - AST node to represent objective-c dictionary
|
||||
/// literals; as in: @{@"name" : NSUserName(), @"date" : [NSDate date] };
|
||||
class ObjCDictionaryLiteral : public Expr {
|
||||
/// \brief Key/value pair used to store the key and value of a given element.
|
||||
///
|
||||
/// Objects of this type are stored directly after the expression.
|
||||
struct KeyValuePair {
|
||||
Expr *Key;
|
||||
Expr *Value;
|
||||
};
|
||||
|
||||
/// \brief Data that describes an element that is a pack expansion, used if any
|
||||
/// of the elements in the dictionary literal are pack expansions.
|
||||
struct ExpansionData {
|
||||
/// \brief The location of the ellipsis, if this element is a pack
|
||||
/// expansion.
|
||||
SourceLocation EllipsisLoc;
|
||||
|
||||
/// \brief If non-zero, the number of elements that this pack
|
||||
/// expansion will expand to (+1).
|
||||
unsigned NumExpansionsPlusOne;
|
||||
};
|
||||
|
||||
class ObjCDictionaryLiteral final
|
||||
: public Expr,
|
||||
private llvm::TrailingObjects<ObjCDictionaryLiteral,
|
||||
ObjCDictionaryLiteral_KeyValuePair,
|
||||
ObjCDictionaryLiteral_ExpansionData> {
|
||||
/// \brief The number of elements in this dictionary literal.
|
||||
unsigned NumElements : 31;
|
||||
|
||||
|
||||
/// \brief Determine whether this dictionary literal has any pack expansions.
|
||||
///
|
||||
/// If the dictionary literal has pack expansions, then there will
|
||||
@ -264,10 +270,17 @@ class ObjCDictionaryLiteral : public Expr {
|
||||
/// any) and number of elements in the expansion (if known). If
|
||||
/// there are no pack expansions, we optimize away this storage.
|
||||
unsigned HasPackExpansions : 1;
|
||||
|
||||
|
||||
SourceRange Range;
|
||||
ObjCMethodDecl *DictWithObjectsMethod;
|
||||
|
||||
|
||||
typedef ObjCDictionaryLiteral_KeyValuePair KeyValuePair;
|
||||
typedef ObjCDictionaryLiteral_ExpansionData ExpansionData;
|
||||
|
||||
size_t numTrailingObjects(OverloadToken<KeyValuePair>) const {
|
||||
return NumElements;
|
||||
}
|
||||
|
||||
ObjCDictionaryLiteral(ArrayRef<ObjCDictionaryElement> VK,
|
||||
bool HasPackExpansions,
|
||||
QualType T, ObjCMethodDecl *method,
|
||||
@ -278,28 +291,6 @@ class ObjCDictionaryLiteral : public Expr {
|
||||
: Expr(ObjCDictionaryLiteralClass, Empty), NumElements(NumElements),
|
||||
HasPackExpansions(HasPackExpansions) {}
|
||||
|
||||
KeyValuePair *getKeyValues() {
|
||||
return reinterpret_cast<KeyValuePair *>(this + 1);
|
||||
}
|
||||
|
||||
const KeyValuePair *getKeyValues() const {
|
||||
return reinterpret_cast<const KeyValuePair *>(this + 1);
|
||||
}
|
||||
|
||||
ExpansionData *getExpansionData() {
|
||||
if (!HasPackExpansions)
|
||||
return nullptr;
|
||||
|
||||
return reinterpret_cast<ExpansionData *>(getKeyValues() + NumElements);
|
||||
}
|
||||
|
||||
const ExpansionData *getExpansionData() const {
|
||||
if (!HasPackExpansions)
|
||||
return nullptr;
|
||||
|
||||
return reinterpret_cast<const ExpansionData *>(getKeyValues()+NumElements);
|
||||
}
|
||||
|
||||
public:
|
||||
static ObjCDictionaryLiteral *Create(const ASTContext &C,
|
||||
ArrayRef<ObjCDictionaryElement> VK,
|
||||
@ -317,10 +308,11 @@ class ObjCDictionaryLiteral : public Expr {
|
||||
|
||||
ObjCDictionaryElement getKeyValueElement(unsigned Index) const {
|
||||
assert((Index < NumElements) && "Arg access out of range!");
|
||||
const KeyValuePair &KV = getKeyValues()[Index];
|
||||
const KeyValuePair &KV = getTrailingObjects<KeyValuePair>()[Index];
|
||||
ObjCDictionaryElement Result = { KV.Key, KV.Value, SourceLocation(), None };
|
||||
if (HasPackExpansions) {
|
||||
const ExpansionData &Expansion = getExpansionData()[Index];
|
||||
const ExpansionData &Expansion =
|
||||
getTrailingObjects<ExpansionData>()[Index];
|
||||
Result.EllipsisLoc = Expansion.EllipsisLoc;
|
||||
if (Expansion.NumExpansionsPlusOne > 0)
|
||||
Result.NumExpansions = Expansion.NumExpansionsPlusOne - 1;
|
||||
@ -340,17 +332,20 @@ class ObjCDictionaryLiteral : public Expr {
|
||||
}
|
||||
|
||||
// Iterators
|
||||
child_range children() {
|
||||
child_range children() {
|
||||
// Note: we're taking advantage of the layout of the KeyValuePair struct
|
||||
// here. If that struct changes, this code will need to change as well.
|
||||
static_assert(sizeof(KeyValuePair) == sizeof(Stmt *) * 2,
|
||||
"KeyValuePair is expected size");
|
||||
return child_range(reinterpret_cast<Stmt **>(this + 1),
|
||||
reinterpret_cast<Stmt **>(this + 1) + NumElements * 2);
|
||||
return child_range(
|
||||
reinterpret_cast<Stmt **>(getTrailingObjects<KeyValuePair>()),
|
||||
reinterpret_cast<Stmt **>(getTrailingObjects<KeyValuePair>()) +
|
||||
NumElements * 2);
|
||||
}
|
||||
|
||||
friend class ASTStmtReader;
|
||||
friend class ASTStmtWriter;
|
||||
friend TrailingObjects;
|
||||
};
|
||||
|
||||
|
||||
@ -797,13 +792,6 @@ class ObjCSubscriptRefExpr : public Expr {
|
||||
explicit ObjCSubscriptRefExpr(EmptyShell Empty)
|
||||
: Expr(ObjCSubscriptRefExprClass, Empty) {}
|
||||
|
||||
static ObjCSubscriptRefExpr *Create(const ASTContext &C,
|
||||
Expr *base,
|
||||
Expr *key, QualType T,
|
||||
ObjCMethodDecl *getMethod,
|
||||
ObjCMethodDecl *setMethod,
|
||||
SourceLocation RB);
|
||||
|
||||
SourceLocation getRBracket() const { return RBracket; }
|
||||
void setRBracket(SourceLocation RB) { RBracket = RB; }
|
||||
|
||||
@ -865,7 +853,13 @@ class ObjCSubscriptRefExpr : public Expr {
|
||||
/// All four kinds of message sends are modeled by the ObjCMessageExpr
|
||||
/// class, and can be distinguished via \c getReceiverKind(). Example:
|
||||
///
|
||||
class ObjCMessageExpr : public Expr {
|
||||
/// The "void *" trailing objects are actually ONE void * (the
|
||||
/// receiver pointer), and NumArgs Expr *. But due to the
|
||||
/// implementation of children(), these must be together contiguously.
|
||||
|
||||
class ObjCMessageExpr final
|
||||
: public Expr,
|
||||
private llvm::TrailingObjects<ObjCMessageExpr, void *, SourceLocation> {
|
||||
/// \brief Stores either the selector that this message is sending
|
||||
/// to (when \c HasMethod is zero) or an \c ObjCMethodDecl pointer
|
||||
/// referring to the method that we type-checked against.
|
||||
@ -877,11 +871,6 @@ class ObjCMessageExpr : public Expr {
|
||||
/// including the receiver.
|
||||
unsigned NumArgs : NumArgsBitWidth;
|
||||
|
||||
void setNumArgs(unsigned Num) {
|
||||
assert((Num >> NumArgsBitWidth) == 0 && "Num of args is out of range!");
|
||||
NumArgs = Num;
|
||||
}
|
||||
|
||||
/// \brief The kind of message send this is, which is one of the
|
||||
/// ReceiverKind values.
|
||||
///
|
||||
@ -915,6 +904,13 @@ class ObjCMessageExpr : public Expr {
|
||||
/// brackets ('[' and ']', respectively).
|
||||
SourceLocation LBracLoc, RBracLoc;
|
||||
|
||||
size_t numTrailingObjects(OverloadToken<void *>) const { return NumArgs + 1; }
|
||||
|
||||
void setNumArgs(unsigned Num) {
|
||||
assert((Num >> NumArgsBitWidth) == 0 && "Num of args is out of range!");
|
||||
NumArgs = Num;
|
||||
}
|
||||
|
||||
ObjCMessageExpr(EmptyShell Empty, unsigned NumArgs)
|
||||
: Expr(ObjCMessageExprClass, Empty), SelectorOrMethod(0), Kind(0),
|
||||
HasMethod(0), IsDelegateInitCall(0), IsImplicit(0), SelLocsKind(0) {
|
||||
@ -959,14 +955,11 @@ class ObjCMessageExpr : public Expr {
|
||||
SelectorLocationsKind SelLocsK);
|
||||
|
||||
/// \brief Retrieve the pointer value of the message receiver.
|
||||
void *getReceiverPointer() const {
|
||||
return *const_cast<void **>(
|
||||
reinterpret_cast<const void * const*>(this + 1));
|
||||
}
|
||||
void *getReceiverPointer() const { return *getTrailingObjects<void *>(); }
|
||||
|
||||
/// \brief Set the pointer value of the message receiver.
|
||||
void setReceiverPointer(void *Value) {
|
||||
*reinterpret_cast<void **>(this + 1) = Value;
|
||||
*getTrailingObjects<void *>() = Value;
|
||||
}
|
||||
|
||||
SelectorLocationsKind getSelLocsKind() const {
|
||||
@ -979,10 +972,10 @@ class ObjCMessageExpr : public Expr {
|
||||
/// \brief Get a pointer to the stored selector identifiers locations array.
|
||||
/// No locations will be stored if HasStandardSelLocs is true.
|
||||
SourceLocation *getStoredSelLocs() {
|
||||
return reinterpret_cast<SourceLocation*>(getArgs() + getNumArgs());
|
||||
return getTrailingObjects<SourceLocation>();
|
||||
}
|
||||
const SourceLocation *getStoredSelLocs() const {
|
||||
return reinterpret_cast<const SourceLocation*>(getArgs() + getNumArgs());
|
||||
return getTrailingObjects<SourceLocation>();
|
||||
}
|
||||
|
||||
/// \brief Get the number of stored selector identifiers locations.
|
||||
@ -1286,20 +1279,21 @@ class ObjCMessageExpr : public Expr {
|
||||
/// \brief Retrieve the arguments to this message, not including the
|
||||
/// receiver.
|
||||
Expr **getArgs() {
|
||||
return reinterpret_cast<Expr **>(this + 1) + 1;
|
||||
return reinterpret_cast<Expr **>(getTrailingObjects<void *>() + 1);
|
||||
}
|
||||
const Expr * const *getArgs() const {
|
||||
return reinterpret_cast<const Expr * const *>(this + 1) + 1;
|
||||
return reinterpret_cast<const Expr *const *>(getTrailingObjects<void *>() +
|
||||
1);
|
||||
}
|
||||
|
||||
/// getArg - Return the specified argument.
|
||||
Expr *getArg(unsigned Arg) {
|
||||
assert(Arg < NumArgs && "Arg access out of range!");
|
||||
return cast<Expr>(getArgs()[Arg]);
|
||||
return getArgs()[Arg];
|
||||
}
|
||||
const Expr *getArg(unsigned Arg) const {
|
||||
assert(Arg < NumArgs && "Arg access out of range!");
|
||||
return cast<Expr>(getArgs()[Arg]);
|
||||
return getArgs()[Arg];
|
||||
}
|
||||
/// setArg - Set the specified argument.
|
||||
void setArg(unsigned Arg, Expr *ArgExpr) {
|
||||
@ -1379,6 +1373,7 @@ class ObjCMessageExpr : public Expr {
|
||||
return reinterpret_cast<Stmt const * const*>(getArgs() + NumArgs);
|
||||
}
|
||||
|
||||
friend TrailingObjects;
|
||||
friend class ASTStmtReader;
|
||||
friend class ASTStmtWriter;
|
||||
};
|
||||
|
@ -216,9 +216,6 @@ class MicrosoftMangleContext : public MangleContext {
|
||||
uint32_t NVOffset, int32_t VBPtrOffset,
|
||||
uint32_t VBIndex, raw_ostream &Out) = 0;
|
||||
|
||||
virtual void mangleCXXCatchHandlerType(QualType T, uint32_t Flags,
|
||||
raw_ostream &Out) = 0;
|
||||
|
||||
virtual void mangleCXXRTTIBaseClassDescriptor(
|
||||
const CXXRecordDecl *Derived, uint32_t NVOffset, int32_t VBPtrOffset,
|
||||
uint32_t VBTableOffset, uint32_t Flags, raw_ostream &Out) = 0;
|
||||
|
@ -84,21 +84,15 @@ template <class T> class OMPVarListClause : public OMPClause {
|
||||
/// \brief Fetches list of variables associated with this clause.
|
||||
MutableArrayRef<Expr *> getVarRefs() {
|
||||
return MutableArrayRef<Expr *>(
|
||||
reinterpret_cast<Expr **>(
|
||||
reinterpret_cast<char *>(this) +
|
||||
llvm::RoundUpToAlignment(sizeof(T), llvm::alignOf<Expr *>())),
|
||||
NumVars);
|
||||
static_cast<T *>(this)->template getTrailingObjects<Expr *>(), NumVars);
|
||||
}
|
||||
|
||||
/// \brief Sets the list of variables for this clause.
|
||||
void setVarRefs(ArrayRef<Expr *> VL) {
|
||||
assert(VL.size() == NumVars &&
|
||||
"Number of variables is not the same as the preallocated buffer");
|
||||
std::copy(
|
||||
VL.begin(), VL.end(),
|
||||
reinterpret_cast<Expr **>(
|
||||
reinterpret_cast<char *>(this) +
|
||||
llvm::RoundUpToAlignment(sizeof(T), llvm::alignOf<Expr *>())));
|
||||
std::copy(VL.begin(), VL.end(),
|
||||
static_cast<T *>(this)->template getTrailingObjects<Expr *>());
|
||||
}
|
||||
|
||||
/// \brief Build a clause with \a N variables
|
||||
@ -142,9 +136,7 @@ template <class T> class OMPVarListClause : public OMPClause {
|
||||
/// \brief Fetches list of all variables in the clause.
|
||||
ArrayRef<const Expr *> getVarRefs() const {
|
||||
return llvm::makeArrayRef(
|
||||
reinterpret_cast<const Expr *const *>(
|
||||
reinterpret_cast<const char *>(this) +
|
||||
llvm::RoundUpToAlignment(sizeof(T), llvm::alignOf<const Expr *>())),
|
||||
static_cast<const T *>(this)->template getTrailingObjects<Expr *>(),
|
||||
NumVars);
|
||||
}
|
||||
};
|
||||
@ -1160,7 +1152,11 @@ class OMPSeqCstClause : public OMPClause {
|
||||
/// In this example directive '#pragma omp parallel' has clause 'private'
|
||||
/// with the variables 'a' and 'b'.
|
||||
///
|
||||
class OMPPrivateClause : public OMPVarListClause<OMPPrivateClause> {
|
||||
class OMPPrivateClause final
|
||||
: public OMPVarListClause<OMPPrivateClause>,
|
||||
private llvm::TrailingObjects<OMPPrivateClause, Expr *> {
|
||||
friend TrailingObjects;
|
||||
friend OMPVarListClause;
|
||||
friend class OMPClauseReader;
|
||||
/// \brief Build clause with number of variables \a N.
|
||||
///
|
||||
@ -1252,7 +1248,11 @@ class OMPPrivateClause : public OMPVarListClause<OMPPrivateClause> {
|
||||
/// In this example directive '#pragma omp parallel' has clause 'firstprivate'
|
||||
/// with the variables 'a' and 'b'.
|
||||
///
|
||||
class OMPFirstprivateClause : public OMPVarListClause<OMPFirstprivateClause> {
|
||||
class OMPFirstprivateClause final
|
||||
: public OMPVarListClause<OMPFirstprivateClause>,
|
||||
private llvm::TrailingObjects<OMPFirstprivateClause, Expr *> {
|
||||
friend TrailingObjects;
|
||||
friend OMPVarListClause;
|
||||
friend class OMPClauseReader;
|
||||
|
||||
/// \brief Build clause with number of variables \a N.
|
||||
@ -1372,7 +1372,9 @@ class OMPFirstprivateClause : public OMPVarListClause<OMPFirstprivateClause> {
|
||||
/// \endcode
|
||||
/// In this example directive '#pragma omp simd' has clause 'lastprivate'
|
||||
/// with the variables 'a' and 'b'.
|
||||
class OMPLastprivateClause : public OMPVarListClause<OMPLastprivateClause> {
|
||||
class OMPLastprivateClause final
|
||||
: public OMPVarListClause<OMPLastprivateClause>,
|
||||
private llvm::TrailingObjects<OMPLastprivateClause, Expr *> {
|
||||
// There are 4 additional tail-allocated arrays at the end of the class:
|
||||
// 1. Contains list of pseudo variables with the default initialization for
|
||||
// each non-firstprivate variables. Used in codegen for initialization of
|
||||
@ -1390,6 +1392,8 @@ class OMPLastprivateClause : public OMPVarListClause<OMPLastprivateClause> {
|
||||
// Required for proper codegen of final assignment performed by the
|
||||
// lastprivate clause.
|
||||
//
|
||||
friend TrailingObjects;
|
||||
friend OMPVarListClause;
|
||||
friend class OMPClauseReader;
|
||||
|
||||
/// \brief Build clause with number of variables \a N.
|
||||
@ -1557,7 +1561,11 @@ class OMPLastprivateClause : public OMPVarListClause<OMPLastprivateClause> {
|
||||
/// In this example directive '#pragma omp parallel' has clause 'shared'
|
||||
/// with the variables 'a' and 'b'.
|
||||
///
|
||||
class OMPSharedClause : public OMPVarListClause<OMPSharedClause> {
|
||||
class OMPSharedClause final
|
||||
: public OMPVarListClause<OMPSharedClause>,
|
||||
private llvm::TrailingObjects<OMPSharedClause, Expr *> {
|
||||
friend TrailingObjects;
|
||||
friend OMPVarListClause;
|
||||
/// \brief Build clause with number of variables \a N.
|
||||
///
|
||||
/// \param StartLoc Starting location of the clause.
|
||||
@ -1617,7 +1625,11 @@ class OMPSharedClause : public OMPVarListClause<OMPSharedClause> {
|
||||
/// In this example directive '#pragma omp parallel' has clause 'reduction'
|
||||
/// with operator '+' and the variables 'a' and 'b'.
|
||||
///
|
||||
class OMPReductionClause : public OMPVarListClause<OMPReductionClause> {
|
||||
class OMPReductionClause final
|
||||
: public OMPVarListClause<OMPReductionClause>,
|
||||
private llvm::TrailingObjects<OMPReductionClause, Expr *> {
|
||||
friend TrailingObjects;
|
||||
friend OMPVarListClause;
|
||||
friend class OMPClauseReader;
|
||||
/// \brief Location of ':'.
|
||||
SourceLocation ColonLoc;
|
||||
@ -1819,7 +1831,11 @@ class OMPReductionClause : public OMPVarListClause<OMPReductionClause> {
|
||||
/// In this example directive '#pragma omp simd' has clause 'linear'
|
||||
/// with variables 'a', 'b' and linear step '2'.
|
||||
///
|
||||
class OMPLinearClause : public OMPVarListClause<OMPLinearClause> {
|
||||
class OMPLinearClause final
|
||||
: public OMPVarListClause<OMPLinearClause>,
|
||||
private llvm::TrailingObjects<OMPLinearClause, Expr *> {
|
||||
friend TrailingObjects;
|
||||
friend OMPVarListClause;
|
||||
friend class OMPClauseReader;
|
||||
/// \brief Modifier of 'linear' clause.
|
||||
OpenMPLinearClauseKind Modifier;
|
||||
@ -2039,7 +2055,11 @@ class OMPLinearClause : public OMPVarListClause<OMPLinearClause> {
|
||||
/// In this example directive '#pragma omp simd' has clause 'aligned'
|
||||
/// with variables 'a', 'b' and alignment '8'.
|
||||
///
|
||||
class OMPAlignedClause : public OMPVarListClause<OMPAlignedClause> {
|
||||
class OMPAlignedClause final
|
||||
: public OMPVarListClause<OMPAlignedClause>,
|
||||
private llvm::TrailingObjects<OMPAlignedClause, Expr *> {
|
||||
friend TrailingObjects;
|
||||
friend OMPVarListClause;
|
||||
friend class OMPClauseReader;
|
||||
/// \brief Location of ':'.
|
||||
SourceLocation ColonLoc;
|
||||
@ -2123,7 +2143,9 @@ class OMPAlignedClause : public OMPVarListClause<OMPAlignedClause> {
|
||||
/// In this example directive '#pragma omp parallel' has clause 'copyin'
|
||||
/// with the variables 'a' and 'b'.
|
||||
///
|
||||
class OMPCopyinClause : public OMPVarListClause<OMPCopyinClause> {
|
||||
class OMPCopyinClause final
|
||||
: public OMPVarListClause<OMPCopyinClause>,
|
||||
private llvm::TrailingObjects<OMPCopyinClause, Expr *> {
|
||||
// Class has 3 additional tail allocated arrays:
|
||||
// 1. List of helper expressions for proper generation of assignment operation
|
||||
// required for copyin clause. This list represents sources.
|
||||
@ -2137,6 +2159,8 @@ class OMPCopyinClause : public OMPVarListClause<OMPCopyinClause> {
|
||||
// threadprivate variables to local instances of that variables in other
|
||||
// implicit threads.
|
||||
|
||||
friend TrailingObjects;
|
||||
friend OMPVarListClause;
|
||||
friend class OMPClauseReader;
|
||||
/// \brief Build clause with number of variables \a N.
|
||||
///
|
||||
@ -2282,7 +2306,11 @@ class OMPCopyinClause : public OMPVarListClause<OMPCopyinClause> {
|
||||
/// In this example directive '#pragma omp single' has clause 'copyprivate'
|
||||
/// with the variables 'a' and 'b'.
|
||||
///
|
||||
class OMPCopyprivateClause : public OMPVarListClause<OMPCopyprivateClause> {
|
||||
class OMPCopyprivateClause final
|
||||
: public OMPVarListClause<OMPCopyprivateClause>,
|
||||
private llvm::TrailingObjects<OMPCopyprivateClause, Expr *> {
|
||||
friend TrailingObjects;
|
||||
friend OMPVarListClause;
|
||||
friend class OMPClauseReader;
|
||||
/// \brief Build clause with number of variables \a N.
|
||||
///
|
||||
@ -2431,7 +2459,11 @@ class OMPCopyprivateClause : public OMPVarListClause<OMPCopyprivateClause> {
|
||||
/// In this example directive '#pragma omp flush' has implicit clause 'flush'
|
||||
/// with the variables 'a' and 'b'.
|
||||
///
|
||||
class OMPFlushClause : public OMPVarListClause<OMPFlushClause> {
|
||||
class OMPFlushClause final
|
||||
: public OMPVarListClause<OMPFlushClause>,
|
||||
private llvm::TrailingObjects<OMPFlushClause, Expr *> {
|
||||
friend TrailingObjects;
|
||||
friend OMPVarListClause;
|
||||
/// \brief Build clause with number of variables \a N.
|
||||
///
|
||||
/// \param StartLoc Starting location of the clause.
|
||||
@ -2491,7 +2523,11 @@ class OMPFlushClause : public OMPVarListClause<OMPFlushClause> {
|
||||
/// In this example directive '#pragma omp task' with clause 'depend' with the
|
||||
/// variables 'a' and 'b' with dependency 'in'.
|
||||
///
|
||||
class OMPDependClause : public OMPVarListClause<OMPDependClause> {
|
||||
class OMPDependClause final
|
||||
: public OMPVarListClause<OMPDependClause>,
|
||||
private llvm::TrailingObjects<OMPDependClause, Expr *> {
|
||||
friend TrailingObjects;
|
||||
friend OMPVarListClause;
|
||||
friend class OMPClauseReader;
|
||||
/// \brief Dependency type (one of in, out, inout).
|
||||
OpenMPDependClauseKind DepKind;
|
||||
@ -2695,7 +2731,10 @@ class OMPSIMDClause : public OMPClause {
|
||||
/// In this example directive '#pragma omp target' has clause 'map'
|
||||
/// with the variables 'a' and 'b'.
|
||||
///
|
||||
class OMPMapClause : public OMPVarListClause<OMPMapClause> {
|
||||
class OMPMapClause final : public OMPVarListClause<OMPMapClause>,
|
||||
private llvm::TrailingObjects<OMPMapClause, Expr *> {
|
||||
friend TrailingObjects;
|
||||
friend OMPVarListClause;
|
||||
friend class OMPClauseReader;
|
||||
|
||||
/// \brief Map type modifier for the 'map' clause.
|
||||
|
@ -192,6 +192,10 @@ class Context {
|
||||
/// for AuxTarget).
|
||||
unsigned getAuxBuiltinID(unsigned ID) const { return ID - TSRecords.size(); }
|
||||
|
||||
/// Returns true if this is a libc/libm function without the '__builtin_'
|
||||
/// prefix.
|
||||
static bool isBuiltinFunc(const char *Name);
|
||||
|
||||
private:
|
||||
const Info &getRecord(unsigned ID) const;
|
||||
|
||||
|
@ -41,6 +41,13 @@ TARGET_BUILTIN(__builtin_ia32_undef128, "V2d", "nc", "")
|
||||
TARGET_BUILTIN(__builtin_ia32_undef256, "V4d", "nc", "")
|
||||
TARGET_BUILTIN(__builtin_ia32_undef512, "V8d", "nc", "")
|
||||
|
||||
// FLAGS
|
||||
//
|
||||
TARGET_BUILTIN(__builtin_ia32_readeflags_u32, "Ui", "n", "")
|
||||
TARGET_BUILTIN(__builtin_ia32_readeflags_u64, "ULLi", "n", "")
|
||||
TARGET_BUILTIN(__builtin_ia32_writeeflags_u32, "vUi", "n", "")
|
||||
TARGET_BUILTIN(__builtin_ia32_writeeflags_u64, "vULLi", "n", "")
|
||||
|
||||
// 3DNow!
|
||||
//
|
||||
TARGET_BUILTIN(__builtin_ia32_femms, "v", "", "3dnow")
|
||||
@ -917,6 +924,9 @@ TARGET_BUILTIN(__builtin_ia32_xtest, "i", "", "rtm")
|
||||
BUILTIN(__builtin_ia32_rdpmc, "ULLii", "")
|
||||
BUILTIN(__builtin_ia32_rdtsc, "ULLi", "")
|
||||
BUILTIN(__builtin_ia32_rdtscp, "ULLiUi*", "")
|
||||
// PKU
|
||||
TARGET_BUILTIN(__builtin_ia32_rdpkru, "Ui", "", "pku")
|
||||
TARGET_BUILTIN(__builtin_ia32_wrpkru, "vUi", "", "pku")
|
||||
|
||||
// AVX-512
|
||||
TARGET_BUILTIN(__builtin_ia32_sqrtpd512_mask, "V8dV8dV8dUcIi", "", "avx512f")
|
||||
|
@ -123,6 +123,9 @@ def err_drv_emit_llvm_link : Error<
|
||||
def err_drv_optimization_remark_pattern : Error<
|
||||
"%0 in '%1'">;
|
||||
def err_drv_no_neon_modifier : Error<"[no]neon is not accepted as modifier, please use [no]simd instead">;
|
||||
def err_drv_invalid_omp_target : Error<"OpenMP target is invalid: '%0'">;
|
||||
def err_drv_omp_host_ir_file_not_found : Error<
|
||||
"The provided host compiler IR file '%0' is required to generate code for OpenMP target regions but cannot be found.">;
|
||||
|
||||
def warn_O4_is_O3 : Warning<"-O4 is equivalent to -O3">, InGroup<Deprecated>;
|
||||
def warn_drv_lto_libpath : Warning<"libLTO.dylib relative to clang installed dir not found; using 'ld' default search path instead">,
|
||||
|
@ -910,6 +910,10 @@ def warn_pragma_expected_enable_disable : Warning<
|
||||
def warn_pragma_unknown_extension : Warning<
|
||||
"unknown OpenCL extension %0 - ignoring">, InGroup<IgnoredPragmas>;
|
||||
|
||||
// OpenCL error
|
||||
def err_opencl_taking_function_address_parser : Error<
|
||||
"taking address of function is not allowed">;
|
||||
|
||||
// OpenMP support.
|
||||
def warn_pragma_omp_ignored : Warning<
|
||||
"unexpected '#pragma omp ...' in program">, InGroup<SourceUsesOpenMP>, DefaultIgnore;
|
||||
|
@ -2400,6 +2400,8 @@ def warn_attribute_dll_instantiated_base_class : Warning<
|
||||
"propagating dll attribute to %select{already instantiated|explicitly specialized}0 "
|
||||
"base class template without dll attribute is not supported">,
|
||||
InGroup<DiagGroup<"unsupported-dll-base-class-template">>, DefaultIgnore;
|
||||
def err_attribute_dll_ambiguous_default_ctor : Error<
|
||||
"'__declspec(dllexport)' cannot be applied to more than one default constructor in %0">;
|
||||
def err_attribute_weakref_not_static : Error<
|
||||
"weakref declaration must have internal linkage">;
|
||||
def err_attribute_weakref_not_global_context : Error<
|
||||
@ -3112,6 +3114,10 @@ def note_addrof_ovl_candidate_disabled_by_enable_if_attr : Note<
|
||||
def note_ovl_candidate_failed_overload_resolution : Note<
|
||||
"candidate template ignored: couldn't resolve reference to overloaded "
|
||||
"function %0">;
|
||||
def note_ovl_candidate_deduced_mismatch : Note<
|
||||
"candidate template ignored: deduced type "
|
||||
"%diff{$ of %ordinal0 parameter does not match adjusted type $ of argument"
|
||||
"|of %ordinal0 parameter does not match adjusted type of argument}1,2%3">;
|
||||
def note_ovl_candidate_non_deduced_mismatch : Note<
|
||||
"candidate template ignored: could not match %diff{$ against $|types}0,1">;
|
||||
// This note is needed because the above note would sometimes print two
|
||||
@ -7969,6 +7975,8 @@ def err_omp_schedule_nonmonotonic_static : Error<
|
||||
"'nonmonotonic' modifier can only be specified with 'dynamic' or 'guided' schedule kind">;
|
||||
def err_omp_schedule_nonmonotonic_ordered : Error<
|
||||
"'schedule' clause with 'nonmonotonic' modifier cannot be specified if an 'ordered' clause is specified">;
|
||||
def err_omp_ordered_simd : Error<
|
||||
"'ordered' clause with a parameter can not be specified in '#pragma omp %0' directive">;
|
||||
} // end of OpenMP category
|
||||
|
||||
let CategoryName = "Related Result Type Issue" in {
|
||||
|
@ -165,6 +165,8 @@ LANGOPT(HalfArgsAndReturns, 1, 0, "half args and returns")
|
||||
LANGOPT(CUDA , 1, 0, "CUDA")
|
||||
LANGOPT(OpenMP , 1, 0, "OpenMP support")
|
||||
LANGOPT(OpenMPUseTLS , 1, 0, "Use TLS for threadprivates or runtime calls")
|
||||
LANGOPT(OpenMPIsDevice , 1, 0, "Generate code only for OpenMP target device")
|
||||
|
||||
LANGOPT(CUDAIsDevice , 1, 0, "Compiling for CUDA device")
|
||||
LANGOPT(CUDAAllowHostCallsFromHostDevice, 1, 0, "Allow host device functions to call host functions")
|
||||
LANGOPT(CUDADisableTargetCallChecks, 1, 0, "Disable checks for call targets (host, device, etc.)")
|
||||
|
@ -108,7 +108,18 @@ class LangOptions : public LangOptionsBase {
|
||||
|
||||
/// \brief Options for parsing comments.
|
||||
CommentOptions CommentOpts;
|
||||
|
||||
|
||||
/// \brief A list of all -fno-builtin-* function names (e.g., memset).
|
||||
std::vector<std::string> NoBuiltinFuncs;
|
||||
|
||||
/// \brief Triples of the OpenMP targets that the host code codegen should
|
||||
/// take into account in order to generate accurate offloading descriptors.
|
||||
std::vector<llvm::Triple> OMPTargetTriples;
|
||||
|
||||
/// \brief Name of the IR file that contains the result of the OpenMP target
|
||||
/// host code generation.
|
||||
std::string OMPHostIRFile;
|
||||
|
||||
LangOptions();
|
||||
|
||||
// Define accessors/mutators for language options of enumeration type.
|
||||
@ -134,6 +145,10 @@ class LangOptions : public LangOptionsBase {
|
||||
/// \brief Reset all of the options that are not considered when building a
|
||||
/// module.
|
||||
void resetNonModularOptions();
|
||||
|
||||
/// \brief Is this a libc/libm function that is no longer recognized as a
|
||||
/// builtin because a -fno-builtin-* option has been specified?
|
||||
bool isNoBuiltinFunc(const char *Name) const;
|
||||
};
|
||||
|
||||
/// \brief Floating point control options
|
||||
|
@ -220,6 +220,7 @@ OPENMP_FOR_SIMD_CLAUSE(safelen)
|
||||
OPENMP_FOR_SIMD_CLAUSE(simdlen)
|
||||
OPENMP_FOR_SIMD_CLAUSE(linear)
|
||||
OPENMP_FOR_SIMD_CLAUSE(aligned)
|
||||
OPENMP_FOR_SIMD_CLAUSE(ordered)
|
||||
|
||||
// Clauses allowed for OpenMP directive 'omp sections'.
|
||||
OPENMP_SECTIONS_CLAUSE(private)
|
||||
@ -303,6 +304,7 @@ OPENMP_PARALLEL_FOR_SIMD_CLAUSE(safelen)
|
||||
OPENMP_PARALLEL_FOR_SIMD_CLAUSE(simdlen)
|
||||
OPENMP_PARALLEL_FOR_SIMD_CLAUSE(linear)
|
||||
OPENMP_PARALLEL_FOR_SIMD_CLAUSE(aligned)
|
||||
OPENMP_PARALLEL_FOR_SIMD_CLAUSE(ordered)
|
||||
|
||||
// Clauses allowed for OpenMP directive 'parallel sections'.
|
||||
OPENMP_PARALLEL_SECTIONS_CLAUSE(if)
|
||||
|
@ -677,6 +677,15 @@ def fcuda_include_gpubinary : Separate<["-"], "fcuda-include-gpubinary">,
|
||||
def fcuda_target_overloads : Flag<["-"], "fcuda-target-overloads">,
|
||||
HelpText<"Enable function overloads based on CUDA target attributes.">;
|
||||
|
||||
//===----------------------------------------------------------------------===//
|
||||
// OpenMP Options
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
def fopenmp_is_device : Flag<["-"], "fopenmp-is-device">,
|
||||
HelpText<"Generate code only for an OpenMP target device.">;
|
||||
def omp_host_ir_file_path : Separate<["-"], "omp-host-ir-file-path">,
|
||||
HelpText<"Path to the IR file produced by the frontend for the host.">;
|
||||
|
||||
} // let Flags = [CC1Option]
|
||||
|
||||
|
||||
|
@ -814,7 +814,7 @@ def fno_blocks : Flag<["-"], "fno-blocks">, Group<f_Group>;
|
||||
def fno_borland_extensions : Flag<["-"], "fno-borland-extensions">, Group<f_Group>;
|
||||
def fno_builtin : Flag<["-"], "fno-builtin">, Group<f_Group>, Flags<[CC1Option]>,
|
||||
HelpText<"Disable implicit builtin knowledge of functions">;
|
||||
def fno_builtin_ : Joined<["-"], "fno-builtin-">, Group<clang_ignored_f_Group>,
|
||||
def fno_builtin_ : Joined<["-"], "fno-builtin-">, Group<f_Group>, Flags<[CC1Option]>,
|
||||
HelpText<"Disable implicit builtin knowledge of a specific function">;
|
||||
def fno_math_builtin : Flag<["-"], "fno-math-builtin">, Group<f_Group>, Flags<[CC1Option]>,
|
||||
HelpText<"Disable implicit builtin knowledge of math functions">;
|
||||
@ -1369,6 +1369,7 @@ def mno_xsave : Flag<["-"], "mno-xsave">, Group<m_x86_Features_Group>;
|
||||
def mno_xsaveopt : Flag<["-"], "mno-xsaveopt">, Group<m_x86_Features_Group>;
|
||||
def mno_xsavec : Flag<["-"], "mno-xsavec">, Group<m_x86_Features_Group>;
|
||||
def mno_xsaves : Flag<["-"], "mno-xsaves">, Group<m_x86_Features_Group>;
|
||||
def mno_pku : Flag<["-"], "mno-pku">, Group<m_x86_Features_Group>;
|
||||
|
||||
def munaligned_access : Flag<["-"], "munaligned-access">, Group<m_arm_Features_Group>,
|
||||
HelpText<"Allow memory accesses to be unaligned (AArch32/AArch64 only)">;
|
||||
@ -1384,6 +1385,8 @@ def mno_restrict_it: Flag<["-"], "mno-restrict-it">, Group<m_arm_Features_Group>
|
||||
def marm : Flag<["-"], "marm">, Alias<mno_thumb>;
|
||||
def ffixed_r9 : Flag<["-"], "ffixed-r9">, Group<m_arm_Features_Group>,
|
||||
HelpText<"Reserve the r9 register (ARM only)">;
|
||||
def mno_movt : Flag<["-"], "mno-movt">, Group<m_arm_Features_Group>,
|
||||
HelpText<"Disallow use of movt/movw pairs (ARM only)">;
|
||||
def mcrc : Flag<["-"], "mcrc">, Group<m_arm_Features_Group>,
|
||||
HelpText<"Allow use of CRC instructions (ARM only)">;
|
||||
def mnocrc : Flag<["-"], "mnocrc">, Group<m_arm_Features_Group>,
|
||||
@ -1520,6 +1523,7 @@ def mf16c : Flag<["-"], "mf16c">, Group<m_x86_Features_Group>;
|
||||
def mrtm : Flag<["-"], "mrtm">, Group<m_x86_Features_Group>;
|
||||
def mprfchw : Flag<["-"], "mprfchw">, Group<m_x86_Features_Group>;
|
||||
def mrdseed : Flag<["-"], "mrdseed">, Group<m_x86_Features_Group>;
|
||||
def mpku : Flag<["-"], "mpku">, Group<m_x86_Features_Group>;
|
||||
def madx : Flag<["-"], "madx">, Group<m_x86_Features_Group>;
|
||||
def msha : Flag<["-"], "msha">, Group<m_x86_Features_Group>;
|
||||
def mcx16 : Flag<["-"], "mcx16">, Group<m_x86_Features_Group>;
|
||||
@ -1647,6 +1651,8 @@ def nostdlib : Flag<["-"], "nostdlib">;
|
||||
def object : Flag<["-"], "object">;
|
||||
def o : JoinedOrSeparate<["-"], "o">, Flags<[DriverOption, RenderAsInput, CC1Option, CC1AsOption]>,
|
||||
HelpText<"Write output to <file>">, MetaVarName<"<file>">;
|
||||
def omptargets_EQ : CommaJoined<["-"], "omptargets=">, Flags<[DriverOption, CC1Option]>,
|
||||
HelpText<"Specify comma-separated list of triples OpenMP offloading targets to be supported">;
|
||||
def pagezero__size : JoinedOrSeparate<["-"], "pagezero_size">;
|
||||
def pass_exit_codes : Flag<["-", "--"], "pass-exit-codes">, Flags<[Unsupported]>;
|
||||
def pedantic_errors : Flag<["-", "--"], "pedantic-errors">, Group<pedantic_Group>, Flags<[CC1Option]>;
|
||||
|
@ -218,6 +218,9 @@ class CodeGenOptions : public CodeGenOptionsBase {
|
||||
/// Set of sanitizer checks that trap rather than diagnose.
|
||||
SanitizerSet SanitizeTrap;
|
||||
|
||||
/// \brief A list of all -fno-builtin-* function names (e.g., memset).
|
||||
std::vector<std::string> NoBuiltinFuncs;
|
||||
|
||||
public:
|
||||
// Define accessors/mutators for code generation options of enumeration type.
|
||||
#define CODEGENOPT(Name, Bits, Default)
|
||||
@ -227,6 +230,14 @@ class CodeGenOptions : public CodeGenOptionsBase {
|
||||
#include "clang/Frontend/CodeGenOptions.def"
|
||||
|
||||
CodeGenOptions();
|
||||
|
||||
/// \brief Is this a libc/libm function that is no longer recognized as a
|
||||
/// builtin because a -fno-builtin-* option has been specified?
|
||||
bool isNoBuiltinFunc(const char *Name) const;
|
||||
|
||||
const std::vector<std::string> &getNoBuiltinFuncs() const {
|
||||
return NoBuiltinFuncs;
|
||||
}
|
||||
};
|
||||
|
||||
} // end namespace clang
|
||||
|
@ -3416,7 +3416,6 @@ class Sema {
|
||||
bool LookupInlineAsmField(StringRef Base, StringRef Member,
|
||||
unsigned &Offset, SourceLocation AsmLoc);
|
||||
ExprResult LookupInlineAsmVarDeclField(Expr *RefExpr, StringRef Member,
|
||||
unsigned &Offset,
|
||||
llvm::InlineAsmIdentifierInfo &Info,
|
||||
SourceLocation AsmLoc);
|
||||
StmtResult ActOnMSAsmStmt(SourceLocation AsmLoc, SourceLocation LBraceLoc,
|
||||
@ -6302,6 +6301,9 @@ class Sema {
|
||||
/// \brief Substitution of the deduced template argument values
|
||||
/// resulted in an error.
|
||||
TDK_SubstitutionFailure,
|
||||
/// \brief After substituting deduced template arguments, a dependent
|
||||
/// parameter type did not match the corresponding argument.
|
||||
TDK_DeducedMismatch,
|
||||
/// \brief A non-depnedent component of the parameter did not match the
|
||||
/// corresponding component of the argument.
|
||||
TDK_NonDeducedMismatch,
|
||||
|
@ -140,6 +140,9 @@ class TemplateDeductionInfo {
|
||||
/// TDK_SubstitutionFailure: this argument is the template
|
||||
/// argument we were instantiating when we encountered an error.
|
||||
///
|
||||
/// TDK_DeducedMismatch: this is the parameter type, after substituting
|
||||
/// deduced arguments.
|
||||
///
|
||||
/// TDK_NonDeducedMismatch: this is the component of the 'parameter'
|
||||
/// of the deduction, directly provided in the source code.
|
||||
TemplateArgument FirstArg;
|
||||
@ -147,18 +150,32 @@ class TemplateDeductionInfo {
|
||||
/// \brief The second template argument to which the template
|
||||
/// argument deduction failure refers.
|
||||
///
|
||||
/// TDK_Inconsistent: this argument is the second value deduced
|
||||
/// for the corresponding template parameter.
|
||||
///
|
||||
/// TDK_DeducedMismatch: this is the (adjusted) call argument type.
|
||||
///
|
||||
/// TDK_NonDeducedMismatch: this is the mismatching component of the
|
||||
/// 'argument' of the deduction, from which we are deducing arguments.
|
||||
///
|
||||
/// FIXME: Finish documenting this.
|
||||
TemplateArgument SecondArg;
|
||||
|
||||
/// \brief The expression which caused a deduction failure.
|
||||
///
|
||||
/// TDK_FailedOverloadResolution: this argument is the reference to
|
||||
/// an overloaded function which could not be resolved to a specific
|
||||
/// function.
|
||||
Expr *Expression;
|
||||
union {
|
||||
/// \brief The expression which caused a deduction failure.
|
||||
///
|
||||
/// TDK_FailedOverloadResolution: this argument is the reference to
|
||||
/// an overloaded function which could not be resolved to a specific
|
||||
/// function.
|
||||
Expr *Expression;
|
||||
|
||||
/// \brief The index of the function argument that caused a deduction
|
||||
/// failure.
|
||||
///
|
||||
/// TDK_DeducedMismatch: this is the index of the argument that had a
|
||||
/// different argument type from its substituted parameter type.
|
||||
unsigned CallArgIndex;
|
||||
};
|
||||
|
||||
/// \brief Information on packs that we're currently expanding.
|
||||
///
|
||||
@ -211,6 +228,10 @@ struct DeductionFailureInfo {
|
||||
/// if any.
|
||||
Expr *getExpr();
|
||||
|
||||
/// \brief Return the index of the call argument that this deduction
|
||||
/// failure refers to, if any.
|
||||
llvm::Optional<unsigned> getCallArgIndex();
|
||||
|
||||
/// \brief Free any memory associated with this deduction failure.
|
||||
void Destroy();
|
||||
};
|
||||
|
@ -3658,14 +3658,13 @@ static int CmpProtocolNames(ObjCProtocolDecl *const *LHS,
|
||||
return DeclarationName::compare((*LHS)->getDeclName(), (*RHS)->getDeclName());
|
||||
}
|
||||
|
||||
static bool areSortedAndUniqued(ObjCProtocolDecl * const *Protocols,
|
||||
unsigned NumProtocols) {
|
||||
if (NumProtocols == 0) return true;
|
||||
static bool areSortedAndUniqued(ArrayRef<ObjCProtocolDecl *> Protocols) {
|
||||
if (Protocols.empty()) return true;
|
||||
|
||||
if (Protocols[0]->getCanonicalDecl() != Protocols[0])
|
||||
return false;
|
||||
|
||||
for (unsigned i = 1; i != NumProtocols; ++i)
|
||||
for (unsigned i = 1; i != Protocols.size(); ++i)
|
||||
if (CmpProtocolNames(&Protocols[i - 1], &Protocols[i]) >= 0 ||
|
||||
Protocols[i]->getCanonicalDecl() != Protocols[i])
|
||||
return false;
|
||||
@ -3730,8 +3729,7 @@ QualType ASTContext::getObjCObjectType(
|
||||
[&](QualType type) {
|
||||
return type.isCanonical();
|
||||
});
|
||||
bool protocolsSorted = areSortedAndUniqued(protocols.data(),
|
||||
protocols.size());
|
||||
bool protocolsSorted = areSortedAndUniqued(protocols);
|
||||
if (!typeArgsAreCanonical || !protocolsSorted || !baseType.isCanonical()) {
|
||||
// Determine the canonical type arguments.
|
||||
ArrayRef<QualType> canonTypeArgs;
|
||||
|
@ -3711,8 +3711,7 @@ DesignatedInitExpr::Create(const ASTContext &C, Designator *Designators,
|
||||
ArrayRef<Expr*> IndexExprs,
|
||||
SourceLocation ColonOrEqualLoc,
|
||||
bool UsesColonSyntax, Expr *Init) {
|
||||
void *Mem = C.Allocate(sizeof(DesignatedInitExpr) +
|
||||
sizeof(Stmt *) * (IndexExprs.size() + 1),
|
||||
void *Mem = C.Allocate(totalSizeToAlloc<Stmt *>(IndexExprs.size() + 1),
|
||||
llvm::alignOf<DesignatedInitExpr>());
|
||||
return new (Mem) DesignatedInitExpr(C, C.VoidTy, NumDesignators, Designators,
|
||||
ColonOrEqualLoc, UsesColonSyntax,
|
||||
@ -3721,8 +3720,8 @@ DesignatedInitExpr::Create(const ASTContext &C, Designator *Designators,
|
||||
|
||||
DesignatedInitExpr *DesignatedInitExpr::CreateEmpty(const ASTContext &C,
|
||||
unsigned NumIndexExprs) {
|
||||
void *Mem = C.Allocate(sizeof(DesignatedInitExpr) +
|
||||
sizeof(Stmt *) * (NumIndexExprs + 1), 8);
|
||||
void *Mem = C.Allocate(totalSizeToAlloc<Stmt *>(NumIndexExprs + 1),
|
||||
llvm::alignOf<DesignatedInitExpr>());
|
||||
return new (Mem) DesignatedInitExpr(NumIndexExprs + 1);
|
||||
}
|
||||
|
||||
@ -3764,22 +3763,19 @@ SourceLocation DesignatedInitExpr::getLocEnd() const {
|
||||
|
||||
Expr *DesignatedInitExpr::getArrayIndex(const Designator& D) const {
|
||||
assert(D.Kind == Designator::ArrayDesignator && "Requires array designator");
|
||||
Stmt *const *SubExprs = reinterpret_cast<Stmt *const *>(this + 1);
|
||||
return cast<Expr>(*(SubExprs + D.ArrayOrRange.Index + 1));
|
||||
return getSubExpr(D.ArrayOrRange.Index + 1);
|
||||
}
|
||||
|
||||
Expr *DesignatedInitExpr::getArrayRangeStart(const Designator &D) const {
|
||||
assert(D.Kind == Designator::ArrayRangeDesignator &&
|
||||
"Requires array range designator");
|
||||
Stmt *const *SubExprs = reinterpret_cast<Stmt *const *>(this + 1);
|
||||
return cast<Expr>(*(SubExprs + D.ArrayOrRange.Index + 1));
|
||||
return getSubExpr(D.ArrayOrRange.Index + 1);
|
||||
}
|
||||
|
||||
Expr *DesignatedInitExpr::getArrayRangeEnd(const Designator &D) const {
|
||||
assert(D.Kind == Designator::ArrayRangeDesignator &&
|
||||
"Requires array range designator");
|
||||
Stmt *const *SubExprs = reinterpret_cast<Stmt *const *>(this + 1);
|
||||
return cast<Expr>(*(SubExprs + D.ArrayOrRange.Index + 2));
|
||||
return getSubExpr(D.ArrayOrRange.Index + 2);
|
||||
}
|
||||
|
||||
/// \brief Replaces the designator at index @p Idx with the series
|
||||
@ -3863,9 +3859,9 @@ const OpaqueValueExpr *OpaqueValueExpr::findInCopyConstruct(const Expr *e) {
|
||||
PseudoObjectExpr *PseudoObjectExpr::Create(const ASTContext &Context,
|
||||
EmptyShell sh,
|
||||
unsigned numSemanticExprs) {
|
||||
void *buffer = Context.Allocate(sizeof(PseudoObjectExpr) +
|
||||
(1 + numSemanticExprs) * sizeof(Expr*),
|
||||
llvm::alignOf<PseudoObjectExpr>());
|
||||
void *buffer =
|
||||
Context.Allocate(totalSizeToAlloc<Expr *>(1 + numSemanticExprs),
|
||||
llvm::alignOf<PseudoObjectExpr>());
|
||||
return new(buffer) PseudoObjectExpr(sh, numSemanticExprs);
|
||||
}
|
||||
|
||||
@ -3892,8 +3888,7 @@ PseudoObjectExpr *PseudoObjectExpr::Create(const ASTContext &C, Expr *syntax,
|
||||
assert(semantics[resultIndex]->getObjectKind() == OK_Ordinary);
|
||||
}
|
||||
|
||||
void *buffer = C.Allocate(sizeof(PseudoObjectExpr) +
|
||||
(1 + semantics.size()) * sizeof(Expr*),
|
||||
void *buffer = C.Allocate(totalSizeToAlloc<Expr *>(semantics.size() + 1),
|
||||
llvm::alignOf<PseudoObjectExpr>());
|
||||
return new(buffer) PseudoObjectExpr(type, VK, syntax, semantics,
|
||||
resultIndex);
|
||||
|
@ -766,7 +766,7 @@ const IdentifierInfo *UserDefinedLiteral::getUDSuffix() const {
|
||||
CXXDefaultArgExpr *
|
||||
CXXDefaultArgExpr::Create(const ASTContext &C, SourceLocation Loc,
|
||||
ParmVarDecl *Param, Expr *SubExpr) {
|
||||
void *Mem = C.Allocate(sizeof(CXXDefaultArgExpr) + sizeof(Stmt *));
|
||||
void *Mem = C.Allocate(totalSizeToAlloc<Expr *>(1));
|
||||
return new (Mem) CXXDefaultArgExpr(CXXDefaultArgExprClass, Loc, Param,
|
||||
SubExpr);
|
||||
}
|
||||
@ -924,29 +924,22 @@ LambdaCaptureKind LambdaCapture::getCaptureKind() const {
|
||||
return CapByCopy ? LCK_ByCopy : LCK_ByRef;
|
||||
}
|
||||
|
||||
LambdaExpr::LambdaExpr(QualType T,
|
||||
SourceRange IntroducerRange,
|
||||
LambdaExpr::LambdaExpr(QualType T, SourceRange IntroducerRange,
|
||||
LambdaCaptureDefault CaptureDefault,
|
||||
SourceLocation CaptureDefaultLoc,
|
||||
ArrayRef<Capture> Captures,
|
||||
bool ExplicitParams,
|
||||
bool ExplicitResultType,
|
||||
ArrayRef<Expr *> CaptureInits,
|
||||
ArrayRef<LambdaCapture> Captures, bool ExplicitParams,
|
||||
bool ExplicitResultType, ArrayRef<Expr *> CaptureInits,
|
||||
ArrayRef<VarDecl *> ArrayIndexVars,
|
||||
ArrayRef<unsigned> ArrayIndexStarts,
|
||||
SourceLocation ClosingBrace,
|
||||
bool ContainsUnexpandedParameterPack)
|
||||
: Expr(LambdaExprClass, T, VK_RValue, OK_Ordinary,
|
||||
T->isDependentType(), T->isDependentType(), T->isDependentType(),
|
||||
ContainsUnexpandedParameterPack),
|
||||
IntroducerRange(IntroducerRange),
|
||||
CaptureDefaultLoc(CaptureDefaultLoc),
|
||||
NumCaptures(Captures.size()),
|
||||
CaptureDefault(CaptureDefault),
|
||||
ExplicitParams(ExplicitParams),
|
||||
ExplicitResultType(ExplicitResultType),
|
||||
ClosingBrace(ClosingBrace)
|
||||
{
|
||||
: Expr(LambdaExprClass, T, VK_RValue, OK_Ordinary, T->isDependentType(),
|
||||
T->isDependentType(), T->isDependentType(),
|
||||
ContainsUnexpandedParameterPack),
|
||||
IntroducerRange(IntroducerRange), CaptureDefaultLoc(CaptureDefaultLoc),
|
||||
NumCaptures(Captures.size()), CaptureDefault(CaptureDefault),
|
||||
ExplicitParams(ExplicitParams), ExplicitResultType(ExplicitResultType),
|
||||
ClosingBrace(ClosingBrace) {
|
||||
assert(CaptureInits.size() == Captures.size() && "Wrong number of arguments");
|
||||
CXXRecordDecl *Class = getLambdaClass();
|
||||
CXXRecordDecl::LambdaDefinitionData &Data = Class->getLambdaData();
|
||||
@ -957,8 +950,9 @@ LambdaExpr::LambdaExpr(QualType T,
|
||||
const ASTContext &Context = Class->getASTContext();
|
||||
Data.NumCaptures = NumCaptures;
|
||||
Data.NumExplicitCaptures = 0;
|
||||
Data.Captures = (Capture *)Context.Allocate(sizeof(Capture) * NumCaptures);
|
||||
Capture *ToCapture = Data.Captures;
|
||||
Data.Captures =
|
||||
(LambdaCapture *)Context.Allocate(sizeof(LambdaCapture) * NumCaptures);
|
||||
LambdaCapture *ToCapture = Data.Captures;
|
||||
for (unsigned I = 0, N = Captures.size(); I != N; ++I) {
|
||||
if (Captures[I].isExplicit())
|
||||
++Data.NumExplicitCaptures;
|
||||
@ -986,30 +980,20 @@ LambdaExpr::LambdaExpr(QualType T,
|
||||
}
|
||||
}
|
||||
|
||||
LambdaExpr *LambdaExpr::Create(const ASTContext &Context,
|
||||
CXXRecordDecl *Class,
|
||||
SourceRange IntroducerRange,
|
||||
LambdaCaptureDefault CaptureDefault,
|
||||
SourceLocation CaptureDefaultLoc,
|
||||
ArrayRef<Capture> Captures,
|
||||
bool ExplicitParams,
|
||||
bool ExplicitResultType,
|
||||
ArrayRef<Expr *> CaptureInits,
|
||||
ArrayRef<VarDecl *> ArrayIndexVars,
|
||||
ArrayRef<unsigned> ArrayIndexStarts,
|
||||
SourceLocation ClosingBrace,
|
||||
bool ContainsUnexpandedParameterPack) {
|
||||
LambdaExpr *LambdaExpr::Create(
|
||||
const ASTContext &Context, CXXRecordDecl *Class,
|
||||
SourceRange IntroducerRange, LambdaCaptureDefault CaptureDefault,
|
||||
SourceLocation CaptureDefaultLoc, ArrayRef<LambdaCapture> Captures,
|
||||
bool ExplicitParams, bool ExplicitResultType, ArrayRef<Expr *> CaptureInits,
|
||||
ArrayRef<VarDecl *> ArrayIndexVars, ArrayRef<unsigned> ArrayIndexStarts,
|
||||
SourceLocation ClosingBrace, bool ContainsUnexpandedParameterPack) {
|
||||
// Determine the type of the expression (i.e., the type of the
|
||||
// function object we're creating).
|
||||
QualType T = Context.getTypeDeclType(Class);
|
||||
|
||||
unsigned Size = sizeof(LambdaExpr) + sizeof(Stmt *) * (Captures.size() + 1);
|
||||
if (!ArrayIndexVars.empty()) {
|
||||
Size += sizeof(unsigned) * (Captures.size() + 1);
|
||||
// Realign for following VarDecl array.
|
||||
Size = llvm::RoundUpToAlignment(Size, llvm::alignOf<VarDecl*>());
|
||||
Size += sizeof(VarDecl *) * ArrayIndexVars.size();
|
||||
}
|
||||
unsigned Size = totalSizeToAlloc<Stmt *, unsigned, VarDecl *>(
|
||||
Captures.size() + 1, ArrayIndexVars.empty() ? 0 : Captures.size() + 1,
|
||||
ArrayIndexVars.size());
|
||||
void *Mem = Context.Allocate(Size);
|
||||
return new (Mem) LambdaExpr(T, IntroducerRange,
|
||||
CaptureDefault, CaptureDefaultLoc, Captures,
|
||||
@ -1021,10 +1005,9 @@ LambdaExpr *LambdaExpr::Create(const ASTContext &Context,
|
||||
LambdaExpr *LambdaExpr::CreateDeserialized(const ASTContext &C,
|
||||
unsigned NumCaptures,
|
||||
unsigned NumArrayIndexVars) {
|
||||
unsigned Size = sizeof(LambdaExpr) + sizeof(Stmt *) * (NumCaptures + 1);
|
||||
if (NumArrayIndexVars)
|
||||
Size += sizeof(VarDecl) * NumArrayIndexVars
|
||||
+ sizeof(unsigned) * (NumCaptures + 1);
|
||||
unsigned Size = totalSizeToAlloc<Stmt *, unsigned, VarDecl *>(
|
||||
NumCaptures + 1, NumArrayIndexVars ? NumCaptures + 1 : 0,
|
||||
NumArrayIndexVars);
|
||||
void *Mem = C.Allocate(Size);
|
||||
return new (Mem) LambdaExpr(EmptyShell(), NumCaptures, NumArrayIndexVars > 0);
|
||||
}
|
||||
@ -1108,7 +1091,7 @@ CompoundStmt *LambdaExpr::getBody() const {
|
||||
*const_cast<clang::Stmt **>(&getStoredStmts()[NumCaptures]) =
|
||||
getCallOperator()->getBody();
|
||||
|
||||
return reinterpret_cast<CompoundStmt *>(getStoredStmts()[NumCaptures]);
|
||||
return static_cast<CompoundStmt *>(getStoredStmts()[NumCaptures]);
|
||||
}
|
||||
|
||||
bool LambdaExpr::isMutable() const {
|
||||
@ -1125,14 +1108,13 @@ ExprWithCleanups::ExprWithCleanups(Expr *subexpr,
|
||||
SubExpr(subexpr) {
|
||||
ExprWithCleanupsBits.NumObjects = objects.size();
|
||||
for (unsigned i = 0, e = objects.size(); i != e; ++i)
|
||||
getObjectsBuffer()[i] = objects[i];
|
||||
getTrailingObjects<CleanupObject>()[i] = objects[i];
|
||||
}
|
||||
|
||||
ExprWithCleanups *ExprWithCleanups::Create(const ASTContext &C, Expr *subexpr,
|
||||
ArrayRef<CleanupObject> objects) {
|
||||
size_t size = sizeof(ExprWithCleanups)
|
||||
+ objects.size() * sizeof(CleanupObject);
|
||||
void *buffer = C.Allocate(size, llvm::alignOf<ExprWithCleanups>());
|
||||
void *buffer = C.Allocate(totalSizeToAlloc<CleanupObject>(objects.size()),
|
||||
llvm::alignOf<ExprWithCleanups>());
|
||||
return new (buffer) ExprWithCleanups(subexpr, objects);
|
||||
}
|
||||
|
||||
@ -1144,8 +1126,8 @@ ExprWithCleanups::ExprWithCleanups(EmptyShell empty, unsigned numObjects)
|
||||
ExprWithCleanups *ExprWithCleanups::Create(const ASTContext &C,
|
||||
EmptyShell empty,
|
||||
unsigned numObjects) {
|
||||
size_t size = sizeof(ExprWithCleanups) + numObjects * sizeof(CleanupObject);
|
||||
void *buffer = C.Allocate(size, llvm::alignOf<ExprWithCleanups>());
|
||||
void *buffer = C.Allocate(totalSizeToAlloc<CleanupObject>(numObjects),
|
||||
llvm::alignOf<ExprWithCleanups>());
|
||||
return new (buffer) ExprWithCleanups(empty, numObjects);
|
||||
}
|
||||
|
||||
@ -1165,7 +1147,7 @@ CXXUnresolvedConstructExpr::CXXUnresolvedConstructExpr(TypeSourceInfo *Type,
|
||||
LParenLoc(LParenLoc),
|
||||
RParenLoc(RParenLoc),
|
||||
NumArgs(Args.size()) {
|
||||
Stmt **StoredArgs = reinterpret_cast<Stmt **>(this + 1);
|
||||
Expr **StoredArgs = getTrailingObjects<Expr *>();
|
||||
for (unsigned I = 0; I != Args.size(); ++I) {
|
||||
if (Args[I]->containsUnexpandedParameterPack())
|
||||
ExprBits.ContainsUnexpandedParameterPack = true;
|
||||
@ -1180,16 +1162,14 @@ CXXUnresolvedConstructExpr::Create(const ASTContext &C,
|
||||
SourceLocation LParenLoc,
|
||||
ArrayRef<Expr*> Args,
|
||||
SourceLocation RParenLoc) {
|
||||
void *Mem = C.Allocate(sizeof(CXXUnresolvedConstructExpr) +
|
||||
sizeof(Expr *) * Args.size());
|
||||
void *Mem = C.Allocate(totalSizeToAlloc<Expr *>(Args.size()));
|
||||
return new (Mem) CXXUnresolvedConstructExpr(Type, LParenLoc, Args, RParenLoc);
|
||||
}
|
||||
|
||||
CXXUnresolvedConstructExpr *
|
||||
CXXUnresolvedConstructExpr::CreateEmpty(const ASTContext &C, unsigned NumArgs) {
|
||||
Stmt::EmptyShell Empty;
|
||||
void *Mem = C.Allocate(sizeof(CXXUnresolvedConstructExpr) +
|
||||
sizeof(Expr *) * NumArgs);
|
||||
void *Mem = C.Allocate(totalSizeToAlloc<Expr *>(NumArgs));
|
||||
return new (Mem) CXXUnresolvedConstructExpr(Empty, NumArgs);
|
||||
}
|
||||
|
||||
@ -1404,16 +1384,16 @@ SizeOfPackExpr::Create(ASTContext &Context, SourceLocation OperatorLoc,
|
||||
SourceLocation RParenLoc,
|
||||
Optional<unsigned> Length,
|
||||
ArrayRef<TemplateArgument> PartialArgs) {
|
||||
void *Storage = Context.Allocate(
|
||||
sizeof(SizeOfPackExpr) + sizeof(TemplateArgument) * PartialArgs.size());
|
||||
void *Storage =
|
||||
Context.Allocate(totalSizeToAlloc<TemplateArgument>(PartialArgs.size()));
|
||||
return new (Storage) SizeOfPackExpr(Context.getSizeType(), OperatorLoc, Pack,
|
||||
PackLoc, RParenLoc, Length, PartialArgs);
|
||||
}
|
||||
|
||||
SizeOfPackExpr *SizeOfPackExpr::CreateDeserialized(ASTContext &Context,
|
||||
unsigned NumPartialArgs) {
|
||||
void *Storage = Context.Allocate(
|
||||
sizeof(SizeOfPackExpr) + sizeof(TemplateArgument) * NumPartialArgs);
|
||||
void *Storage =
|
||||
Context.Allocate(totalSizeToAlloc<TemplateArgument>(NumPartialArgs));
|
||||
return new (Storage) SizeOfPackExpr(EmptyShell(), NumPartialArgs);
|
||||
}
|
||||
|
||||
@ -1440,24 +1420,22 @@ FunctionParmPackExpr::FunctionParmPackExpr(QualType T, ParmVarDecl *ParamPack,
|
||||
ParamPack(ParamPack), NameLoc(NameLoc), NumParameters(NumParams) {
|
||||
if (Params)
|
||||
std::uninitialized_copy(Params, Params + NumParams,
|
||||
reinterpret_cast<ParmVarDecl **>(this + 1));
|
||||
getTrailingObjects<ParmVarDecl *>());
|
||||
}
|
||||
|
||||
FunctionParmPackExpr *
|
||||
FunctionParmPackExpr::Create(const ASTContext &Context, QualType T,
|
||||
ParmVarDecl *ParamPack, SourceLocation NameLoc,
|
||||
ArrayRef<ParmVarDecl *> Params) {
|
||||
return new (Context.Allocate(sizeof(FunctionParmPackExpr) +
|
||||
sizeof(ParmVarDecl*) * Params.size()))
|
||||
FunctionParmPackExpr(T, ParamPack, NameLoc, Params.size(), Params.data());
|
||||
return new (Context.Allocate(totalSizeToAlloc<ParmVarDecl *>(Params.size())))
|
||||
FunctionParmPackExpr(T, ParamPack, NameLoc, Params.size(), Params.data());
|
||||
}
|
||||
|
||||
FunctionParmPackExpr *
|
||||
FunctionParmPackExpr::CreateEmpty(const ASTContext &Context,
|
||||
unsigned NumParams) {
|
||||
return new (Context.Allocate(sizeof(FunctionParmPackExpr) +
|
||||
sizeof(ParmVarDecl*) * NumParams))
|
||||
FunctionParmPackExpr(QualType(), nullptr, SourceLocation(), 0, nullptr);
|
||||
return new (Context.Allocate(totalSizeToAlloc<ParmVarDecl *>(NumParams)))
|
||||
FunctionParmPackExpr(QualType(), nullptr, SourceLocation(), 0, nullptr);
|
||||
}
|
||||
|
||||
void MaterializeTemporaryExpr::setExtendingDecl(const ValueDecl *ExtendedBy,
|
||||
@ -1494,8 +1472,8 @@ TypeTraitExpr::TypeTraitExpr(QualType T, SourceLocation Loc, TypeTrait Kind,
|
||||
TypeTraitExprBits.Value = Value;
|
||||
TypeTraitExprBits.NumArgs = Args.size();
|
||||
|
||||
TypeSourceInfo **ToArgs = getTypeSourceInfos();
|
||||
|
||||
TypeSourceInfo **ToArgs = getTrailingObjects<TypeSourceInfo *>();
|
||||
|
||||
for (unsigned I = 0, N = Args.size(); I != N; ++I) {
|
||||
if (Args[I]->getType()->isDependentType())
|
||||
setValueDependent(true);
|
||||
@ -1514,15 +1492,13 @@ TypeTraitExpr *TypeTraitExpr::Create(const ASTContext &C, QualType T,
|
||||
ArrayRef<TypeSourceInfo *> Args,
|
||||
SourceLocation RParenLoc,
|
||||
bool Value) {
|
||||
unsigned Size = sizeof(TypeTraitExpr) + sizeof(TypeSourceInfo*) * Args.size();
|
||||
void *Mem = C.Allocate(Size);
|
||||
void *Mem = C.Allocate(totalSizeToAlloc<TypeSourceInfo *>(Args.size()));
|
||||
return new (Mem) TypeTraitExpr(T, Loc, Kind, Args, RParenLoc, Value);
|
||||
}
|
||||
|
||||
TypeTraitExpr *TypeTraitExpr::CreateDeserialized(const ASTContext &C,
|
||||
unsigned NumArgs) {
|
||||
unsigned Size = sizeof(TypeTraitExpr) + sizeof(TypeSourceInfo*) * NumArgs;
|
||||
void *Mem = C.Allocate(Size);
|
||||
void *Mem = C.Allocate(totalSizeToAlloc<TypeSourceInfo *>(NumArgs));
|
||||
return new (Mem) TypeTraitExpr(EmptyShell());
|
||||
}
|
||||
|
||||
|
@ -39,16 +39,14 @@ ObjCArrayLiteral *ObjCArrayLiteral::Create(const ASTContext &C,
|
||||
ArrayRef<Expr *> Elements,
|
||||
QualType T, ObjCMethodDecl *Method,
|
||||
SourceRange SR) {
|
||||
void *Mem =
|
||||
C.Allocate(sizeof(ObjCArrayLiteral) + Elements.size() * sizeof(Expr *));
|
||||
void *Mem = C.Allocate(totalSizeToAlloc<Expr *>(Elements.size()));
|
||||
return new (Mem) ObjCArrayLiteral(Elements, T, Method, SR);
|
||||
}
|
||||
|
||||
ObjCArrayLiteral *ObjCArrayLiteral::CreateEmpty(const ASTContext &C,
|
||||
unsigned NumElements) {
|
||||
|
||||
void *Mem =
|
||||
C.Allocate(sizeof(ObjCArrayLiteral) + NumElements * sizeof(Expr *));
|
||||
void *Mem = C.Allocate(totalSizeToAlloc<Expr *>(NumElements));
|
||||
return new (Mem) ObjCArrayLiteral(EmptyShell(), NumElements);
|
||||
}
|
||||
|
||||
@ -60,8 +58,9 @@ ObjCDictionaryLiteral::ObjCDictionaryLiteral(ArrayRef<ObjCDictionaryElement> VK,
|
||||
false, false),
|
||||
NumElements(VK.size()), HasPackExpansions(HasPackExpansions), Range(SR),
|
||||
DictWithObjectsMethod(method) {
|
||||
KeyValuePair *KeyValues = getKeyValues();
|
||||
ExpansionData *Expansions = getExpansionData();
|
||||
KeyValuePair *KeyValues = getTrailingObjects<KeyValuePair>();
|
||||
ExpansionData *Expansions =
|
||||
HasPackExpansions ? getTrailingObjects<ExpansionData>() : nullptr;
|
||||
for (unsigned I = 0; I < NumElements; I++) {
|
||||
if (VK[I].Key->isTypeDependent() || VK[I].Key->isValueDependent() ||
|
||||
VK[I].Value->isTypeDependent() || VK[I].Value->isValueDependent())
|
||||
@ -91,23 +90,16 @@ ObjCDictionaryLiteral::Create(const ASTContext &C,
|
||||
ArrayRef<ObjCDictionaryElement> VK,
|
||||
bool HasPackExpansions, QualType T,
|
||||
ObjCMethodDecl *method, SourceRange SR) {
|
||||
unsigned ExpansionsSize = 0;
|
||||
if (HasPackExpansions)
|
||||
ExpansionsSize = sizeof(ExpansionData) * VK.size();
|
||||
|
||||
void *Mem = C.Allocate(sizeof(ObjCDictionaryLiteral) +
|
||||
sizeof(KeyValuePair) * VK.size() + ExpansionsSize);
|
||||
void *Mem = C.Allocate(totalSizeToAlloc<KeyValuePair, ExpansionData>(
|
||||
VK.size(), HasPackExpansions ? VK.size() : 0));
|
||||
return new (Mem) ObjCDictionaryLiteral(VK, HasPackExpansions, T, method, SR);
|
||||
}
|
||||
|
||||
ObjCDictionaryLiteral *
|
||||
ObjCDictionaryLiteral::CreateEmpty(const ASTContext &C, unsigned NumElements,
|
||||
bool HasPackExpansions) {
|
||||
unsigned ExpansionsSize = 0;
|
||||
if (HasPackExpansions)
|
||||
ExpansionsSize = sizeof(ExpansionData) * NumElements;
|
||||
void *Mem = C.Allocate(sizeof(ObjCDictionaryLiteral) +
|
||||
sizeof(KeyValuePair) * NumElements + ExpansionsSize);
|
||||
void *Mem = C.Allocate(totalSizeToAlloc<KeyValuePair, ExpansionData>(
|
||||
NumElements, HasPackExpansions ? NumElements : 0));
|
||||
return new (Mem)
|
||||
ObjCDictionaryLiteral(EmptyShell(), NumElements, HasPackExpansions);
|
||||
}
|
||||
@ -122,15 +114,6 @@ QualType ObjCPropertyRefExpr::getReceiverType(const ASTContext &ctx) const {
|
||||
return getBase()->getType();
|
||||
}
|
||||
|
||||
ObjCSubscriptRefExpr *
|
||||
ObjCSubscriptRefExpr::Create(const ASTContext &C, Expr *base, Expr *key,
|
||||
QualType T, ObjCMethodDecl *getMethod,
|
||||
ObjCMethodDecl *setMethod, SourceLocation RB) {
|
||||
void *Mem = C.Allocate(sizeof(ObjCSubscriptRefExpr));
|
||||
return new (Mem) ObjCSubscriptRefExpr(
|
||||
base, key, T, VK_LValue, OK_ObjCSubscript, getMethod, setMethod, RB);
|
||||
}
|
||||
|
||||
ObjCMessageExpr::ObjCMessageExpr(QualType T, ExprValueKind VK,
|
||||
SourceLocation LBracLoc,
|
||||
SourceLocation SuperLoc, bool IsInstanceSuper,
|
||||
@ -293,11 +276,9 @@ ObjCMessageExpr *ObjCMessageExpr::alloc(const ASTContext &C,
|
||||
|
||||
ObjCMessageExpr *ObjCMessageExpr::alloc(const ASTContext &C, unsigned NumArgs,
|
||||
unsigned NumStoredSelLocs) {
|
||||
unsigned Size = sizeof(ObjCMessageExpr) + sizeof(void *) +
|
||||
NumArgs * sizeof(Expr *) +
|
||||
NumStoredSelLocs * sizeof(SourceLocation);
|
||||
return (ObjCMessageExpr *)C.Allocate(
|
||||
Size, llvm::AlignOf<ObjCMessageExpr>::Alignment);
|
||||
totalSizeToAlloc<void *, SourceLocation>(NumArgs + 1, NumStoredSelLocs),
|
||||
llvm::AlignOf<ObjCMessageExpr>::Alignment);
|
||||
}
|
||||
|
||||
void ObjCMessageExpr::getSelectorLocs(
|
||||
@ -358,7 +339,7 @@ ObjCInterfaceDecl *ObjCMessageExpr::getReceiverInterface() const {
|
||||
Stmt::child_range ObjCMessageExpr::children() {
|
||||
Stmt **begin;
|
||||
if (getReceiverKind() == Instance)
|
||||
begin = reinterpret_cast<Stmt **>(this + 1);
|
||||
begin = reinterpret_cast<Stmt **>(getTrailingObjects<void *>());
|
||||
else
|
||||
begin = reinterpret_cast<Stmt **>(getArgs());
|
||||
return child_range(begin,
|
||||
|
@ -127,8 +127,6 @@ class MicrosoftMangleContextImpl : public MicrosoftMangleContext {
|
||||
CXXCtorType CT, uint32_t Size, uint32_t NVOffset,
|
||||
int32_t VBPtrOffset, uint32_t VBIndex,
|
||||
raw_ostream &Out) override;
|
||||
void mangleCXXCatchHandlerType(QualType T, uint32_t Flags,
|
||||
raw_ostream &Out) override;
|
||||
void mangleCXXRTTI(QualType T, raw_ostream &Out) override;
|
||||
void mangleCXXRTTIName(QualType T, raw_ostream &Out) override;
|
||||
void mangleCXXRTTIBaseClassDescriptor(const CXXRecordDecl *Derived,
|
||||
@ -221,7 +219,7 @@ class MicrosoftCXXNameMangler {
|
||||
typedef llvm::SmallVector<std::string, 10> BackRefVec;
|
||||
BackRefVec NameBackReferences;
|
||||
|
||||
typedef llvm::DenseMap<void *, unsigned> ArgBackRefMap;
|
||||
typedef llvm::DenseMap<const void *, unsigned> ArgBackRefMap;
|
||||
ArgBackRefMap TypeBackReferences;
|
||||
|
||||
typedef std::set<int> PassObjectSizeArgsSet;
|
||||
@ -1489,7 +1487,7 @@ void MicrosoftCXXNameMangler::manglePassObjectSizeArg(
|
||||
int Type = POSA->getType();
|
||||
|
||||
auto Iter = PassObjectSizeArgs.insert(Type).first;
|
||||
void *TypePtr = (void *)&*Iter;
|
||||
auto *TypePtr = (const void *)&*Iter;
|
||||
ArgBackRefMap::iterator Found = TypeBackReferences.find(TypePtr);
|
||||
|
||||
if (Found == TypeBackReferences.end()) {
|
||||
@ -2215,7 +2213,8 @@ void MicrosoftCXXNameMangler::mangleType(const ObjCObjectPointerType *T,
|
||||
void MicrosoftCXXNameMangler::mangleType(const LValueReferenceType *T,
|
||||
Qualifiers Quals, SourceRange Range) {
|
||||
QualType PointeeType = T->getPointeeType();
|
||||
Out << (Quals.hasVolatile() ? 'B' : 'A');
|
||||
assert(!Quals.hasConst() && !Quals.hasVolatile() && "unexpected qualifier!");
|
||||
Out << 'A';
|
||||
manglePointerExtQualifiers(Quals, PointeeType);
|
||||
mangleType(PointeeType, Range);
|
||||
}
|
||||
@ -2226,7 +2225,8 @@ void MicrosoftCXXNameMangler::mangleType(const LValueReferenceType *T,
|
||||
void MicrosoftCXXNameMangler::mangleType(const RValueReferenceType *T,
|
||||
Qualifiers Quals, SourceRange Range) {
|
||||
QualType PointeeType = T->getPointeeType();
|
||||
Out << (Quals.hasVolatile() ? "$$R" : "$$Q");
|
||||
assert(!Quals.hasConst() && !Quals.hasVolatile() && "unexpected qualifier!");
|
||||
Out << "$$Q";
|
||||
manglePointerExtQualifiers(Quals, PointeeType);
|
||||
mangleType(PointeeType, Range);
|
||||
}
|
||||
@ -2620,15 +2620,6 @@ void MicrosoftMangleContextImpl::mangleCXXRTTIName(QualType T,
|
||||
Mangler.mangleType(T, SourceRange(), MicrosoftCXXNameMangler::QMM_Result);
|
||||
}
|
||||
|
||||
void MicrosoftMangleContextImpl::mangleCXXCatchHandlerType(QualType T,
|
||||
uint32_t Flags,
|
||||
raw_ostream &Out) {
|
||||
MicrosoftCXXNameMangler Mangler(*this, Out);
|
||||
Mangler.getStream() << "llvm.eh.handlertype.";
|
||||
Mangler.mangleType(T, SourceRange(), MicrosoftCXXNameMangler::QMM_Result);
|
||||
Mangler.getStream() << '.' << Flags;
|
||||
}
|
||||
|
||||
void MicrosoftMangleContextImpl::mangleCXXVirtualDisplacementMap(
|
||||
const CXXRecordDecl *SrcRD, const CXXRecordDecl *DstRD, raw_ostream &Out) {
|
||||
MicrosoftCXXNameMangler Mangler(*this, Out);
|
||||
|
@ -40,9 +40,7 @@ OMPPrivateClause::Create(const ASTContext &C, SourceLocation StartLoc,
|
||||
SourceLocation LParenLoc, SourceLocation EndLoc,
|
||||
ArrayRef<Expr *> VL, ArrayRef<Expr *> PrivateVL) {
|
||||
// Allocate space for private variables and initializer expressions.
|
||||
void *Mem = C.Allocate(llvm::RoundUpToAlignment(sizeof(OMPPrivateClause),
|
||||
llvm::alignOf<Expr *>()) +
|
||||
2 * sizeof(Expr *) * VL.size());
|
||||
void *Mem = C.Allocate(totalSizeToAlloc<Expr *>(2 * VL.size()));
|
||||
OMPPrivateClause *Clause =
|
||||
new (Mem) OMPPrivateClause(StartLoc, LParenLoc, EndLoc, VL.size());
|
||||
Clause->setVarRefs(VL);
|
||||
@ -52,9 +50,7 @@ OMPPrivateClause::Create(const ASTContext &C, SourceLocation StartLoc,
|
||||
|
||||
OMPPrivateClause *OMPPrivateClause::CreateEmpty(const ASTContext &C,
|
||||
unsigned N) {
|
||||
void *Mem = C.Allocate(llvm::RoundUpToAlignment(sizeof(OMPPrivateClause),
|
||||
llvm::alignOf<Expr *>()) +
|
||||
2 * sizeof(Expr *) * N);
|
||||
void *Mem = C.Allocate(totalSizeToAlloc<Expr *>(2 * N));
|
||||
return new (Mem) OMPPrivateClause(N);
|
||||
}
|
||||
|
||||
@ -75,9 +71,7 @@ OMPFirstprivateClause::Create(const ASTContext &C, SourceLocation StartLoc,
|
||||
SourceLocation LParenLoc, SourceLocation EndLoc,
|
||||
ArrayRef<Expr *> VL, ArrayRef<Expr *> PrivateVL,
|
||||
ArrayRef<Expr *> InitVL) {
|
||||
void *Mem = C.Allocate(llvm::RoundUpToAlignment(sizeof(OMPFirstprivateClause),
|
||||
llvm::alignOf<Expr *>()) +
|
||||
3 * sizeof(Expr *) * VL.size());
|
||||
void *Mem = C.Allocate(totalSizeToAlloc<Expr *>(3 * VL.size()));
|
||||
OMPFirstprivateClause *Clause =
|
||||
new (Mem) OMPFirstprivateClause(StartLoc, LParenLoc, EndLoc, VL.size());
|
||||
Clause->setVarRefs(VL);
|
||||
@ -88,9 +82,7 @@ OMPFirstprivateClause::Create(const ASTContext &C, SourceLocation StartLoc,
|
||||
|
||||
OMPFirstprivateClause *OMPFirstprivateClause::CreateEmpty(const ASTContext &C,
|
||||
unsigned N) {
|
||||
void *Mem = C.Allocate(llvm::RoundUpToAlignment(sizeof(OMPFirstprivateClause),
|
||||
llvm::alignOf<Expr *>()) +
|
||||
3 * sizeof(Expr *) * N);
|
||||
void *Mem = C.Allocate(totalSizeToAlloc<Expr *>(3 * N));
|
||||
return new (Mem) OMPFirstprivateClause(N);
|
||||
}
|
||||
|
||||
@ -126,9 +118,7 @@ OMPLastprivateClause *OMPLastprivateClause::Create(
|
||||
const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc,
|
||||
SourceLocation EndLoc, ArrayRef<Expr *> VL, ArrayRef<Expr *> SrcExprs,
|
||||
ArrayRef<Expr *> DstExprs, ArrayRef<Expr *> AssignmentOps) {
|
||||
void *Mem = C.Allocate(llvm::RoundUpToAlignment(sizeof(OMPLastprivateClause),
|
||||
llvm::alignOf<Expr *>()) +
|
||||
5 * sizeof(Expr *) * VL.size());
|
||||
void *Mem = C.Allocate(totalSizeToAlloc<Expr *>(5 * VL.size()));
|
||||
OMPLastprivateClause *Clause =
|
||||
new (Mem) OMPLastprivateClause(StartLoc, LParenLoc, EndLoc, VL.size());
|
||||
Clause->setVarRefs(VL);
|
||||
@ -140,9 +130,7 @@ OMPLastprivateClause *OMPLastprivateClause::Create(
|
||||
|
||||
OMPLastprivateClause *OMPLastprivateClause::CreateEmpty(const ASTContext &C,
|
||||
unsigned N) {
|
||||
void *Mem = C.Allocate(llvm::RoundUpToAlignment(sizeof(OMPLastprivateClause),
|
||||
llvm::alignOf<Expr *>()) +
|
||||
5 * sizeof(Expr *) * N);
|
||||
void *Mem = C.Allocate(totalSizeToAlloc<Expr *>(5 * N));
|
||||
return new (Mem) OMPLastprivateClause(N);
|
||||
}
|
||||
|
||||
@ -151,9 +139,7 @@ OMPSharedClause *OMPSharedClause::Create(const ASTContext &C,
|
||||
SourceLocation LParenLoc,
|
||||
SourceLocation EndLoc,
|
||||
ArrayRef<Expr *> VL) {
|
||||
void *Mem = C.Allocate(llvm::RoundUpToAlignment(sizeof(OMPSharedClause),
|
||||
llvm::alignOf<Expr *>()) +
|
||||
sizeof(Expr *) * VL.size());
|
||||
void *Mem = C.Allocate(totalSizeToAlloc<Expr *>(VL.size()));
|
||||
OMPSharedClause *Clause =
|
||||
new (Mem) OMPSharedClause(StartLoc, LParenLoc, EndLoc, VL.size());
|
||||
Clause->setVarRefs(VL);
|
||||
@ -161,9 +147,7 @@ OMPSharedClause *OMPSharedClause::Create(const ASTContext &C,
|
||||
}
|
||||
|
||||
OMPSharedClause *OMPSharedClause::CreateEmpty(const ASTContext &C, unsigned N) {
|
||||
void *Mem = C.Allocate(llvm::RoundUpToAlignment(sizeof(OMPSharedClause),
|
||||
llvm::alignOf<Expr *>()) +
|
||||
sizeof(Expr *) * N);
|
||||
void *Mem = C.Allocate(totalSizeToAlloc<Expr *>(N));
|
||||
return new (Mem) OMPSharedClause(N);
|
||||
}
|
||||
|
||||
@ -198,9 +182,7 @@ OMPLinearClause *OMPLinearClause::Create(
|
||||
ArrayRef<Expr *> PL, ArrayRef<Expr *> IL, Expr *Step, Expr *CalcStep) {
|
||||
// Allocate space for 4 lists (Vars, Inits, Updates, Finals) and 2 expressions
|
||||
// (Step and CalcStep).
|
||||
void *Mem = C.Allocate(llvm::RoundUpToAlignment(sizeof(OMPLinearClause),
|
||||
llvm::alignOf<Expr *>()) +
|
||||
(5 * VL.size() + 2) * sizeof(Expr *));
|
||||
void *Mem = C.Allocate(totalSizeToAlloc<Expr *>(5 * VL.size() + 2));
|
||||
OMPLinearClause *Clause = new (Mem) OMPLinearClause(
|
||||
StartLoc, LParenLoc, Modifier, ModifierLoc, ColonLoc, EndLoc, VL.size());
|
||||
Clause->setVarRefs(VL);
|
||||
@ -221,9 +203,7 @@ OMPLinearClause *OMPLinearClause::CreateEmpty(const ASTContext &C,
|
||||
unsigned NumVars) {
|
||||
// Allocate space for 4 lists (Vars, Inits, Updates, Finals) and 2 expressions
|
||||
// (Step and CalcStep).
|
||||
void *Mem = C.Allocate(llvm::RoundUpToAlignment(sizeof(OMPLinearClause),
|
||||
llvm::alignOf<Expr *>()) +
|
||||
(5 * NumVars + 2) * sizeof(Expr *));
|
||||
void *Mem = C.Allocate(totalSizeToAlloc<Expr *>(5 * NumVars + 2));
|
||||
return new (Mem) OMPLinearClause(NumVars);
|
||||
}
|
||||
|
||||
@ -231,9 +211,7 @@ OMPAlignedClause *
|
||||
OMPAlignedClause::Create(const ASTContext &C, SourceLocation StartLoc,
|
||||
SourceLocation LParenLoc, SourceLocation ColonLoc,
|
||||
SourceLocation EndLoc, ArrayRef<Expr *> VL, Expr *A) {
|
||||
void *Mem = C.Allocate(llvm::RoundUpToAlignment(sizeof(OMPAlignedClause),
|
||||
llvm::alignOf<Expr *>()) +
|
||||
sizeof(Expr *) * (VL.size() + 1));
|
||||
void *Mem = C.Allocate(totalSizeToAlloc<Expr *>(VL.size() + 1));
|
||||
OMPAlignedClause *Clause = new (Mem)
|
||||
OMPAlignedClause(StartLoc, LParenLoc, ColonLoc, EndLoc, VL.size());
|
||||
Clause->setVarRefs(VL);
|
||||
@ -243,9 +221,7 @@ OMPAlignedClause::Create(const ASTContext &C, SourceLocation StartLoc,
|
||||
|
||||
OMPAlignedClause *OMPAlignedClause::CreateEmpty(const ASTContext &C,
|
||||
unsigned NumVars) {
|
||||
void *Mem = C.Allocate(llvm::RoundUpToAlignment(sizeof(OMPAlignedClause),
|
||||
llvm::alignOf<Expr *>()) +
|
||||
sizeof(Expr *) * (NumVars + 1));
|
||||
void *Mem = C.Allocate(totalSizeToAlloc<Expr *>(NumVars + 1));
|
||||
return new (Mem) OMPAlignedClause(NumVars);
|
||||
}
|
||||
|
||||
@ -275,9 +251,7 @@ OMPCopyinClause *OMPCopyinClause::Create(
|
||||
const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc,
|
||||
SourceLocation EndLoc, ArrayRef<Expr *> VL, ArrayRef<Expr *> SrcExprs,
|
||||
ArrayRef<Expr *> DstExprs, ArrayRef<Expr *> AssignmentOps) {
|
||||
void *Mem = C.Allocate(llvm::RoundUpToAlignment(sizeof(OMPCopyinClause),
|
||||
llvm::alignOf<Expr *>()) +
|
||||
4 * sizeof(Expr *) * VL.size());
|
||||
void *Mem = C.Allocate(totalSizeToAlloc<Expr *>(4 * VL.size()));
|
||||
OMPCopyinClause *Clause =
|
||||
new (Mem) OMPCopyinClause(StartLoc, LParenLoc, EndLoc, VL.size());
|
||||
Clause->setVarRefs(VL);
|
||||
@ -288,9 +262,7 @@ OMPCopyinClause *OMPCopyinClause::Create(
|
||||
}
|
||||
|
||||
OMPCopyinClause *OMPCopyinClause::CreateEmpty(const ASTContext &C, unsigned N) {
|
||||
void *Mem = C.Allocate(llvm::RoundUpToAlignment(sizeof(OMPCopyinClause),
|
||||
llvm::alignOf<Expr *>()) +
|
||||
4 * sizeof(Expr *) * N);
|
||||
void *Mem = C.Allocate(totalSizeToAlloc<Expr *>(4 * N));
|
||||
return new (Mem) OMPCopyinClause(N);
|
||||
}
|
||||
|
||||
@ -320,9 +292,7 @@ OMPCopyprivateClause *OMPCopyprivateClause::Create(
|
||||
const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc,
|
||||
SourceLocation EndLoc, ArrayRef<Expr *> VL, ArrayRef<Expr *> SrcExprs,
|
||||
ArrayRef<Expr *> DstExprs, ArrayRef<Expr *> AssignmentOps) {
|
||||
void *Mem = C.Allocate(llvm::RoundUpToAlignment(sizeof(OMPCopyprivateClause),
|
||||
llvm::alignOf<Expr *>()) +
|
||||
4 * sizeof(Expr *) * VL.size());
|
||||
void *Mem = C.Allocate(totalSizeToAlloc<Expr *>(4 * VL.size()));
|
||||
OMPCopyprivateClause *Clause =
|
||||
new (Mem) OMPCopyprivateClause(StartLoc, LParenLoc, EndLoc, VL.size());
|
||||
Clause->setVarRefs(VL);
|
||||
@ -334,9 +304,7 @@ OMPCopyprivateClause *OMPCopyprivateClause::Create(
|
||||
|
||||
OMPCopyprivateClause *OMPCopyprivateClause::CreateEmpty(const ASTContext &C,
|
||||
unsigned N) {
|
||||
void *Mem = C.Allocate(llvm::RoundUpToAlignment(sizeof(OMPCopyprivateClause),
|
||||
llvm::alignOf<Expr *>()) +
|
||||
4 * sizeof(Expr *) * N);
|
||||
void *Mem = C.Allocate(totalSizeToAlloc<Expr *>(4 * N));
|
||||
return new (Mem) OMPCopyprivateClause(N);
|
||||
}
|
||||
|
||||
@ -373,9 +341,7 @@ OMPReductionClause *OMPReductionClause::Create(
|
||||
NestedNameSpecifierLoc QualifierLoc, const DeclarationNameInfo &NameInfo,
|
||||
ArrayRef<Expr *> Privates, ArrayRef<Expr *> LHSExprs,
|
||||
ArrayRef<Expr *> RHSExprs, ArrayRef<Expr *> ReductionOps) {
|
||||
void *Mem = C.Allocate(llvm::RoundUpToAlignment(sizeof(OMPReductionClause),
|
||||
llvm::alignOf<Expr *>()) +
|
||||
5 * sizeof(Expr *) * VL.size());
|
||||
void *Mem = C.Allocate(totalSizeToAlloc<Expr *>(5 * VL.size()));
|
||||
OMPReductionClause *Clause = new (Mem) OMPReductionClause(
|
||||
StartLoc, LParenLoc, EndLoc, ColonLoc, VL.size(), QualifierLoc, NameInfo);
|
||||
Clause->setVarRefs(VL);
|
||||
@ -388,9 +354,7 @@ OMPReductionClause *OMPReductionClause::Create(
|
||||
|
||||
OMPReductionClause *OMPReductionClause::CreateEmpty(const ASTContext &C,
|
||||
unsigned N) {
|
||||
void *Mem = C.Allocate(llvm::RoundUpToAlignment(sizeof(OMPReductionClause),
|
||||
llvm::alignOf<Expr *>()) +
|
||||
5 * sizeof(Expr *) * N);
|
||||
void *Mem = C.Allocate(totalSizeToAlloc<Expr *>(5 * N));
|
||||
return new (Mem) OMPReductionClause(N);
|
||||
}
|
||||
|
||||
@ -399,9 +363,7 @@ OMPFlushClause *OMPFlushClause::Create(const ASTContext &C,
|
||||
SourceLocation LParenLoc,
|
||||
SourceLocation EndLoc,
|
||||
ArrayRef<Expr *> VL) {
|
||||
void *Mem = C.Allocate(llvm::RoundUpToAlignment(sizeof(OMPFlushClause),
|
||||
llvm::alignOf<Expr *>()) +
|
||||
sizeof(Expr *) * VL.size());
|
||||
void *Mem = C.Allocate(totalSizeToAlloc<Expr *>(VL.size()));
|
||||
OMPFlushClause *Clause =
|
||||
new (Mem) OMPFlushClause(StartLoc, LParenLoc, EndLoc, VL.size());
|
||||
Clause->setVarRefs(VL);
|
||||
@ -409,9 +371,7 @@ OMPFlushClause *OMPFlushClause::Create(const ASTContext &C,
|
||||
}
|
||||
|
||||
OMPFlushClause *OMPFlushClause::CreateEmpty(const ASTContext &C, unsigned N) {
|
||||
void *Mem = C.Allocate(llvm::RoundUpToAlignment(sizeof(OMPFlushClause),
|
||||
llvm::alignOf<Expr *>()) +
|
||||
sizeof(Expr *) * N);
|
||||
void *Mem = C.Allocate(totalSizeToAlloc<Expr *>(N));
|
||||
return new (Mem) OMPFlushClause(N);
|
||||
}
|
||||
|
||||
@ -420,9 +380,7 @@ OMPDependClause::Create(const ASTContext &C, SourceLocation StartLoc,
|
||||
SourceLocation LParenLoc, SourceLocation EndLoc,
|
||||
OpenMPDependClauseKind DepKind, SourceLocation DepLoc,
|
||||
SourceLocation ColonLoc, ArrayRef<Expr *> VL) {
|
||||
void *Mem = C.Allocate(llvm::RoundUpToAlignment(sizeof(OMPDependClause),
|
||||
llvm::alignOf<Expr *>()) +
|
||||
sizeof(Expr *) * VL.size());
|
||||
void *Mem = C.Allocate(totalSizeToAlloc<Expr *>(VL.size()));
|
||||
OMPDependClause *Clause =
|
||||
new (Mem) OMPDependClause(StartLoc, LParenLoc, EndLoc, VL.size());
|
||||
Clause->setVarRefs(VL);
|
||||
@ -433,9 +391,7 @@ OMPDependClause::Create(const ASTContext &C, SourceLocation StartLoc,
|
||||
}
|
||||
|
||||
OMPDependClause *OMPDependClause::CreateEmpty(const ASTContext &C, unsigned N) {
|
||||
void *Mem = C.Allocate(llvm::RoundUpToAlignment(sizeof(OMPDependClause),
|
||||
llvm::alignOf<Expr *>()) +
|
||||
sizeof(Expr *) * N);
|
||||
void *Mem = C.Allocate(totalSizeToAlloc<Expr *>(N));
|
||||
return new (Mem) OMPDependClause(N);
|
||||
}
|
||||
|
||||
@ -445,9 +401,7 @@ OMPMapClause *OMPMapClause::Create(const ASTContext &C, SourceLocation StartLoc,
|
||||
OpenMPMapClauseKind TypeModifier,
|
||||
OpenMPMapClauseKind Type,
|
||||
SourceLocation TypeLoc) {
|
||||
void *Mem = C.Allocate(
|
||||
llvm::RoundUpToAlignment(sizeof(OMPMapClause), llvm::alignOf<Expr *>()) +
|
||||
sizeof(Expr *) * VL.size());
|
||||
void *Mem = C.Allocate(totalSizeToAlloc<Expr *>(VL.size()));
|
||||
OMPMapClause *Clause = new (Mem) OMPMapClause(
|
||||
TypeModifier, Type, TypeLoc, StartLoc, LParenLoc, EndLoc, VL.size());
|
||||
Clause->setVarRefs(VL);
|
||||
@ -458,8 +412,6 @@ OMPMapClause *OMPMapClause::Create(const ASTContext &C, SourceLocation StartLoc,
|
||||
}
|
||||
|
||||
OMPMapClause *OMPMapClause::CreateEmpty(const ASTContext &C, unsigned N) {
|
||||
void *Mem = C.Allocate(
|
||||
llvm::RoundUpToAlignment(sizeof(OMPMapClause), llvm::alignOf<Expr *>()) +
|
||||
sizeof(Expr *) * N);
|
||||
void *Mem = C.Allocate(totalSizeToAlloc<Expr *>(N));
|
||||
return new (Mem) OMPMapClause(N);
|
||||
}
|
||||
|
@ -48,10 +48,20 @@ void Builtin::Context::InitializeTarget(const TargetInfo &Target,
|
||||
AuxTSRecords = AuxTarget->getTargetBuiltins();
|
||||
}
|
||||
|
||||
bool Builtin::Context::isBuiltinFunc(const char *Name) {
|
||||
StringRef FuncName(Name);
|
||||
for (unsigned i = Builtin::NotBuiltin + 1; i != Builtin::FirstTSBuiltin; ++i)
|
||||
if (FuncName.equals(BuiltinInfo[i].Name))
|
||||
return strchr(BuiltinInfo[i].Attributes, 'f') != nullptr;
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
bool Builtin::Context::builtinIsSupported(const Builtin::Info &BuiltinInfo,
|
||||
const LangOptions &LangOpts) {
|
||||
bool BuiltinsUnsupported = LangOpts.NoBuiltin &&
|
||||
strchr(BuiltinInfo.Attributes, 'f');
|
||||
bool BuiltinsUnsupported =
|
||||
(LangOpts.NoBuiltin || LangOpts.isNoBuiltinFunc(BuiltinInfo.Name)) &&
|
||||
strchr(BuiltinInfo.Attributes, 'f');
|
||||
bool MathBuiltinsUnsupported =
|
||||
LangOpts.NoMathBuiltin && BuiltinInfo.HeaderName &&
|
||||
llvm::StringRef(BuiltinInfo.HeaderName).equals("math.h");
|
||||
|
@ -11,6 +11,7 @@
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
#include "clang/Basic/LangOptions.h"
|
||||
#include "llvm/ADT/StringRef.h"
|
||||
|
||||
using namespace clang;
|
||||
|
||||
@ -36,3 +37,10 @@ void LangOptions::resetNonModularOptions() {
|
||||
ImplementationOfModule.clear();
|
||||
}
|
||||
|
||||
bool LangOptions::isNoBuiltinFunc(const char *Name) const {
|
||||
StringRef FuncName(Name);
|
||||
for (unsigned i = 0, e = NoBuiltinFuncs.size(); i != e; ++i)
|
||||
if (FuncName.equals(NoBuiltinFuncs[i]))
|
||||
return true;
|
||||
return false;
|
||||
}
|
||||
|
@ -2095,6 +2095,7 @@ class X86TargetInfo : public TargetInfo {
|
||||
bool HasXSAVEOPT = false;
|
||||
bool HasXSAVEC = false;
|
||||
bool HasXSAVES = false;
|
||||
bool HasPKU = false;
|
||||
|
||||
/// \brief Enumeration of all of the X86 CPUs supported by Clang.
|
||||
///
|
||||
@ -2596,6 +2597,7 @@ bool X86TargetInfo::initFeatureMap(
|
||||
setFeatureEnabledImpl(Features, "avx512vl", true);
|
||||
setFeatureEnabledImpl(Features, "xsavec", true);
|
||||
setFeatureEnabledImpl(Features, "xsaves", true);
|
||||
setFeatureEnabledImpl(Features, "pku", true);
|
||||
// FALLTHROUGH
|
||||
case CK_Broadwell:
|
||||
setFeatureEnabledImpl(Features, "rdseed", true);
|
||||
@ -3021,6 +3023,8 @@ bool X86TargetInfo::handleTargetFeatures(std::vector<std::string> &Features,
|
||||
HasXSAVEC = true;
|
||||
} else if (Feature == "+xsaves") {
|
||||
HasXSAVES = true;
|
||||
} else if (Feature == "+pku") {
|
||||
HasPKU = true;
|
||||
}
|
||||
|
||||
X86SSEEnum Level = llvm::StringSwitch<X86SSEEnum>(Feature)
|
||||
@ -3322,7 +3326,8 @@ void X86TargetInfo::getTargetDefines(const LangOptions &Opts,
|
||||
Builder.defineMacro("__XSAVEC__");
|
||||
if (HasXSAVES)
|
||||
Builder.defineMacro("__XSAVES__");
|
||||
|
||||
if (HasPKU)
|
||||
Builder.defineMacro("__PKU__");
|
||||
if (HasCX16)
|
||||
Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_16");
|
||||
|
||||
@ -3440,6 +3445,7 @@ bool X86TargetInfo::hasFeature(StringRef Feature) const {
|
||||
.Case("xsavec", HasXSAVEC)
|
||||
.Case("xsaves", HasXSAVES)
|
||||
.Case("xsaveopt", HasXSAVEOPT)
|
||||
.Case("pku", HasPKU)
|
||||
.Default(false);
|
||||
}
|
||||
|
||||
@ -5310,7 +5316,8 @@ class AArch64TargetInfo : public TargetInfo {
|
||||
bool setCPU(const std::string &Name) override {
|
||||
bool CPUKnown = llvm::StringSwitch<bool>(Name)
|
||||
.Case("generic", true)
|
||||
.Cases("cortex-a53", "cortex-a57", "cortex-a72", "cortex-a35", true)
|
||||
.Cases("cortex-a53", "cortex-a57", "cortex-a72",
|
||||
"cortex-a35", "exynos-m1", true)
|
||||
.Case("cyclone", true)
|
||||
.Default(false);
|
||||
return CPUKnown;
|
||||
|
@ -249,6 +249,13 @@ static TargetLibraryInfoImpl *createTLII(llvm::Triple &TargetTriple,
|
||||
TargetLibraryInfoImpl *TLII = new TargetLibraryInfoImpl(TargetTriple);
|
||||
if (!CodeGenOpts.SimplifyLibCalls)
|
||||
TLII->disableAllFunctions();
|
||||
else {
|
||||
// Disable individual libc/libm calls in TargetLibraryInfo.
|
||||
LibFunc::Func F;
|
||||
for (auto &FuncName : CodeGenOpts.getNoBuiltinFuncs())
|
||||
if (TLII->getLibFunc(FuncName, F))
|
||||
TLII->setUnavailable(F);
|
||||
}
|
||||
|
||||
switch (CodeGenOpts.getVecLib()) {
|
||||
case CodeGenOptions::Accelerate:
|
||||
|
@ -1431,11 +1431,9 @@ static void AddAttributesFromFunctionProtoType(ASTContext &Ctx,
|
||||
FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
|
||||
}
|
||||
|
||||
void CodeGenModule::ConstructAttributeList(const CGFunctionInfo &FI,
|
||||
CGCalleeInfo CalleeInfo,
|
||||
AttributeListType &PAL,
|
||||
unsigned &CallingConv,
|
||||
bool AttrOnCallSite) {
|
||||
void CodeGenModule::ConstructAttributeList(
|
||||
StringRef Name, const CGFunctionInfo &FI, CGCalleeInfo CalleeInfo,
|
||||
AttributeListType &PAL, unsigned &CallingConv, bool AttrOnCallSite) {
|
||||
llvm::AttrBuilder FuncAttrs;
|
||||
llvm::AttrBuilder RetAttrs;
|
||||
bool HasOptnone = false;
|
||||
@ -1510,7 +1508,8 @@ void CodeGenModule::ConstructAttributeList(const CGFunctionInfo &FI,
|
||||
|
||||
if (AttrOnCallSite) {
|
||||
// Attributes that should go on the call site only.
|
||||
if (!CodeGenOpts.SimplifyLibCalls)
|
||||
if (!CodeGenOpts.SimplifyLibCalls ||
|
||||
CodeGenOpts.isNoBuiltinFunc(Name.data()))
|
||||
FuncAttrs.addAttribute(llvm::Attribute::NoBuiltin);
|
||||
if (!CodeGenOpts.TrapFuncName.empty())
|
||||
FuncAttrs.addAttribute("trap-func-name", CodeGenOpts.TrapFuncName);
|
||||
@ -3490,8 +3489,9 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
|
||||
|
||||
unsigned CallingConv;
|
||||
CodeGen::AttributeListType AttributeList;
|
||||
CGM.ConstructAttributeList(CallInfo, CalleeInfo, AttributeList, CallingConv,
|
||||
true);
|
||||
CGM.ConstructAttributeList(Callee->getName(), CallInfo, CalleeInfo,
|
||||
AttributeList, CallingConv,
|
||||
/*AttrOnCallSite=*/true);
|
||||
llvm::AttributeSet Attrs = llvm::AttributeSet::get(getLLVMContext(),
|
||||
AttributeList);
|
||||
|
||||
|
@ -4770,11 +4770,7 @@ llvm::Constant *IvarLayoutBuilder::buildBitmap(CGObjCCommonMac &CGObjC,
|
||||
// This isn't a stable sort, but our algorithm should handle it fine.
|
||||
llvm::array_pod_sort(IvarsInfo.begin(), IvarsInfo.end());
|
||||
} else {
|
||||
#ifndef NDEBUG
|
||||
for (unsigned i = 1; i != IvarsInfo.size(); ++i) {
|
||||
assert(IvarsInfo[i - 1].Offset <= IvarsInfo[i].Offset);
|
||||
}
|
||||
#endif
|
||||
assert(std::is_sorted(IvarsInfo.begin(), IvarsInfo.end()));
|
||||
}
|
||||
assert(IvarsInfo.back().Offset < InstanceEnd);
|
||||
|
||||
|
@ -11,16 +11,19 @@
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#include "CGCXXABI.h"
|
||||
#include "CGCleanup.h"
|
||||
#include "CGOpenMPRuntime.h"
|
||||
#include "CodeGenFunction.h"
|
||||
#include "CGCleanup.h"
|
||||
#include "clang/AST/Decl.h"
|
||||
#include "clang/AST/StmtOpenMP.h"
|
||||
#include "llvm/ADT/ArrayRef.h"
|
||||
#include "llvm/Bitcode/ReaderWriter.h"
|
||||
#include "llvm/IR/CallSite.h"
|
||||
#include "llvm/IR/DerivedTypes.h"
|
||||
#include "llvm/IR/GlobalValue.h"
|
||||
#include "llvm/IR/Value.h"
|
||||
#include "llvm/Support/Format.h"
|
||||
#include "llvm/Support/raw_ostream.h"
|
||||
#include <cassert>
|
||||
|
||||
@ -215,25 +218,31 @@ class CGOpenMPInlinedRegionInfo : public CGOpenMPRegionInfo {
|
||||
|
||||
/// \brief API for captured statement code generation in OpenMP target
|
||||
/// constructs. For this captures, implicit parameters are used instead of the
|
||||
/// captured fields.
|
||||
/// captured fields. The name of the target region has to be unique in a given
|
||||
/// application so it is provided by the client, because only the client has
|
||||
/// the information to generate that.
|
||||
class CGOpenMPTargetRegionInfo : public CGOpenMPRegionInfo {
|
||||
public:
|
||||
CGOpenMPTargetRegionInfo(const CapturedStmt &CS,
|
||||
const RegionCodeGenTy &CodeGen)
|
||||
const RegionCodeGenTy &CodeGen, StringRef HelperName)
|
||||
: CGOpenMPRegionInfo(CS, TargetRegion, CodeGen, OMPD_target,
|
||||
/*HasCancel = */ false) {}
|
||||
/*HasCancel=*/false),
|
||||
HelperName(HelperName) {}
|
||||
|
||||
/// \brief This is unused for target regions because each starts executing
|
||||
/// with a single thread.
|
||||
const VarDecl *getThreadIDVariable() const override { return nullptr; }
|
||||
|
||||
/// \brief Get the name of the capture helper.
|
||||
StringRef getHelperName() const override { return ".omp_offloading."; }
|
||||
StringRef getHelperName() const override { return HelperName; }
|
||||
|
||||
static bool classof(const CGCapturedStmtInfo *Info) {
|
||||
return CGOpenMPRegionInfo::classof(Info) &&
|
||||
cast<CGOpenMPRegionInfo>(Info)->getRegionKind() == TargetRegion;
|
||||
}
|
||||
|
||||
private:
|
||||
StringRef HelperName;
|
||||
};
|
||||
|
||||
/// \brief RAII for emitting code of OpenMP constructs.
|
||||
@ -301,7 +310,8 @@ LValue CGOpenMPTaskOutlinedRegionInfo::getThreadIDVariableLValue(
|
||||
}
|
||||
|
||||
CGOpenMPRuntime::CGOpenMPRuntime(CodeGenModule &CGM)
|
||||
: CGM(CGM), DefaultOpenMPPSource(nullptr), KmpRoutineEntryPtrTy(nullptr) {
|
||||
: CGM(CGM), DefaultOpenMPPSource(nullptr), KmpRoutineEntryPtrTy(nullptr),
|
||||
OffloadEntriesInfoManager(CGM) {
|
||||
IdentTy = llvm::StructType::create(
|
||||
"ident_t", CGM.Int32Ty /* reserved_1 */, CGM.Int32Ty /* flags */,
|
||||
CGM.Int32Ty /* reserved_2 */, CGM.Int32Ty /* reserved_3 */,
|
||||
@ -311,6 +321,8 @@ CGOpenMPRuntime::CGOpenMPRuntime(CodeGenModule &CGM)
|
||||
llvm::PointerType::getUnqual(CGM.Int32Ty)};
|
||||
Kmpc_MicroTy = llvm::FunctionType::get(CGM.VoidTy, MicroParams, true);
|
||||
KmpCriticalNameTy = llvm::ArrayType::get(CGM.Int32Ty, /*NumElements*/ 8);
|
||||
|
||||
loadOffloadInfoMetadata();
|
||||
}
|
||||
|
||||
void CGOpenMPRuntime::clear() {
|
||||
@ -931,6 +943,26 @@ CGOpenMPRuntime::createRuntimeFunction(OpenMPRTLFunction Function) {
|
||||
RTLFn = CGM.CreateRuntimeFunction(FnTy, "__tgt_target");
|
||||
break;
|
||||
}
|
||||
case OMPRTL__tgt_register_lib: {
|
||||
// Build void __tgt_register_lib(__tgt_bin_desc *desc);
|
||||
QualType ParamTy =
|
||||
CGM.getContext().getPointerType(getTgtBinaryDescriptorQTy());
|
||||
llvm::Type *TypeParams[] = {CGM.getTypes().ConvertTypeForMem(ParamTy)};
|
||||
llvm::FunctionType *FnTy =
|
||||
llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg*/ false);
|
||||
RTLFn = CGM.CreateRuntimeFunction(FnTy, "__tgt_register_lib");
|
||||
break;
|
||||
}
|
||||
case OMPRTL__tgt_unregister_lib: {
|
||||
// Build void __tgt_unregister_lib(__tgt_bin_desc *desc);
|
||||
QualType ParamTy =
|
||||
CGM.getContext().getPointerType(getTgtBinaryDescriptorQTy());
|
||||
llvm::Type *TypeParams[] = {CGM.getTypes().ConvertTypeForMem(ParamTy)};
|
||||
llvm::FunctionType *FnTy =
|
||||
llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg*/ false);
|
||||
RTLFn = CGM.CreateRuntimeFunction(FnTy, "__tgt_unregister_lib");
|
||||
break;
|
||||
}
|
||||
}
|
||||
return RTLFn;
|
||||
}
|
||||
@ -1969,6 +2001,381 @@ enum KmpTaskTFields {
|
||||
};
|
||||
} // anonymous namespace
|
||||
|
||||
bool CGOpenMPRuntime::OffloadEntriesInfoManagerTy::empty() const {
|
||||
// FIXME: Add other entries type when they become supported.
|
||||
return OffloadEntriesTargetRegion.empty();
|
||||
}
|
||||
|
||||
/// \brief Initialize target region entry.
|
||||
void CGOpenMPRuntime::OffloadEntriesInfoManagerTy::
|
||||
initializeTargetRegionEntryInfo(unsigned DeviceID, unsigned FileID,
|
||||
StringRef ParentName, unsigned LineNum,
|
||||
unsigned ColNum, unsigned Order) {
|
||||
assert(CGM.getLangOpts().OpenMPIsDevice && "Initialization of entries is "
|
||||
"only required for the device "
|
||||
"code generation.");
|
||||
OffloadEntriesTargetRegion[DeviceID][FileID][ParentName][LineNum][ColNum] =
|
||||
OffloadEntryInfoTargetRegion(Order, /*Addr=*/nullptr, /*ID=*/nullptr);
|
||||
++OffloadingEntriesNum;
|
||||
}
|
||||
|
||||
void CGOpenMPRuntime::OffloadEntriesInfoManagerTy::
|
||||
registerTargetRegionEntryInfo(unsigned DeviceID, unsigned FileID,
|
||||
StringRef ParentName, unsigned LineNum,
|
||||
unsigned ColNum, llvm::Constant *Addr,
|
||||
llvm::Constant *ID) {
|
||||
// If we are emitting code for a target, the entry is already initialized,
|
||||
// only has to be registered.
|
||||
if (CGM.getLangOpts().OpenMPIsDevice) {
|
||||
assert(hasTargetRegionEntryInfo(DeviceID, FileID, ParentName, LineNum,
|
||||
ColNum) &&
|
||||
"Entry must exist.");
|
||||
auto &Entry = OffloadEntriesTargetRegion[DeviceID][FileID][ParentName]
|
||||
[LineNum][ColNum];
|
||||
assert(Entry.isValid() && "Entry not initialized!");
|
||||
Entry.setAddress(Addr);
|
||||
Entry.setID(ID);
|
||||
return;
|
||||
} else {
|
||||
OffloadEntryInfoTargetRegion Entry(OffloadingEntriesNum++, Addr, ID);
|
||||
OffloadEntriesTargetRegion[DeviceID][FileID][ParentName][LineNum][ColNum] =
|
||||
Entry;
|
||||
}
|
||||
}
|
||||
|
||||
bool CGOpenMPRuntime::OffloadEntriesInfoManagerTy::hasTargetRegionEntryInfo(
|
||||
unsigned DeviceID, unsigned FileID, StringRef ParentName, unsigned LineNum,
|
||||
unsigned ColNum) const {
|
||||
auto PerDevice = OffloadEntriesTargetRegion.find(DeviceID);
|
||||
if (PerDevice == OffloadEntriesTargetRegion.end())
|
||||
return false;
|
||||
auto PerFile = PerDevice->second.find(FileID);
|
||||
if (PerFile == PerDevice->second.end())
|
||||
return false;
|
||||
auto PerParentName = PerFile->second.find(ParentName);
|
||||
if (PerParentName == PerFile->second.end())
|
||||
return false;
|
||||
auto PerLine = PerParentName->second.find(LineNum);
|
||||
if (PerLine == PerParentName->second.end())
|
||||
return false;
|
||||
auto PerColumn = PerLine->second.find(ColNum);
|
||||
if (PerColumn == PerLine->second.end())
|
||||
return false;
|
||||
// Fail if this entry is already registered.
|
||||
if (PerColumn->second.getAddress() || PerColumn->second.getID())
|
||||
return false;
|
||||
return true;
|
||||
}
|
||||
|
||||
void CGOpenMPRuntime::OffloadEntriesInfoManagerTy::actOnTargetRegionEntriesInfo(
|
||||
const OffloadTargetRegionEntryInfoActTy &Action) {
|
||||
// Scan all target region entries and perform the provided action.
|
||||
for (auto &D : OffloadEntriesTargetRegion)
|
||||
for (auto &F : D.second)
|
||||
for (auto &P : F.second)
|
||||
for (auto &L : P.second)
|
||||
for (auto &C : L.second)
|
||||
Action(D.first, F.first, P.first(), L.first, C.first, C.second);
|
||||
}
|
||||
|
||||
/// \brief Create a Ctor/Dtor-like function whose body is emitted through
|
||||
/// \a Codegen. This is used to emit the two functions that register and
|
||||
/// unregister the descriptor of the current compilation unit.
|
||||
static llvm::Function *
|
||||
createOffloadingBinaryDescriptorFunction(CodeGenModule &CGM, StringRef Name,
|
||||
const RegionCodeGenTy &Codegen) {
|
||||
auto &C = CGM.getContext();
|
||||
FunctionArgList Args;
|
||||
ImplicitParamDecl DummyPtr(C, /*DC=*/nullptr, SourceLocation(),
|
||||
/*Id=*/nullptr, C.VoidPtrTy);
|
||||
Args.push_back(&DummyPtr);
|
||||
|
||||
CodeGenFunction CGF(CGM);
|
||||
GlobalDecl();
|
||||
auto &FI = CGM.getTypes().arrangeFreeFunctionDeclaration(
|
||||
C.VoidTy, Args, FunctionType::ExtInfo(),
|
||||
/*isVariadic=*/false);
|
||||
auto FTy = CGM.getTypes().GetFunctionType(FI);
|
||||
auto *Fn =
|
||||
CGM.CreateGlobalInitOrDestructFunction(FTy, Name, FI, SourceLocation());
|
||||
CGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, FI, Args, SourceLocation());
|
||||
Codegen(CGF);
|
||||
CGF.FinishFunction();
|
||||
return Fn;
|
||||
}
|
||||
|
||||
llvm::Function *
|
||||
CGOpenMPRuntime::createOffloadingBinaryDescriptorRegistration() {
|
||||
|
||||
// If we don't have entries or if we are emitting code for the device, we
|
||||
// don't need to do anything.
|
||||
if (CGM.getLangOpts().OpenMPIsDevice || OffloadEntriesInfoManager.empty())
|
||||
return nullptr;
|
||||
|
||||
auto &M = CGM.getModule();
|
||||
auto &C = CGM.getContext();
|
||||
|
||||
// Get list of devices we care about
|
||||
auto &Devices = CGM.getLangOpts().OMPTargetTriples;
|
||||
|
||||
// We should be creating an offloading descriptor only if there are devices
|
||||
// specified.
|
||||
assert(!Devices.empty() && "No OpenMP offloading devices??");
|
||||
|
||||
// Create the external variables that will point to the begin and end of the
|
||||
// host entries section. These will be defined by the linker.
|
||||
auto *OffloadEntryTy =
|
||||
CGM.getTypes().ConvertTypeForMem(getTgtOffloadEntryQTy());
|
||||
llvm::GlobalVariable *HostEntriesBegin = new llvm::GlobalVariable(
|
||||
M, OffloadEntryTy, /*isConstant=*/true,
|
||||
llvm::GlobalValue::ExternalLinkage, /*Initializer=*/0,
|
||||
".omp_offloading.entries_begin");
|
||||
llvm::GlobalVariable *HostEntriesEnd = new llvm::GlobalVariable(
|
||||
M, OffloadEntryTy, /*isConstant=*/true,
|
||||
llvm::GlobalValue::ExternalLinkage, /*Initializer=*/0,
|
||||
".omp_offloading.entries_end");
|
||||
|
||||
// Create all device images
|
||||
llvm::SmallVector<llvm::Constant *, 4> DeviceImagesEntires;
|
||||
auto *DeviceImageTy = cast<llvm::StructType>(
|
||||
CGM.getTypes().ConvertTypeForMem(getTgtDeviceImageQTy()));
|
||||
|
||||
for (unsigned i = 0; i < Devices.size(); ++i) {
|
||||
StringRef T = Devices[i].getTriple();
|
||||
auto *ImgBegin = new llvm::GlobalVariable(
|
||||
M, CGM.Int8Ty, /*isConstant=*/true, llvm::GlobalValue::ExternalLinkage,
|
||||
/*Initializer=*/0, Twine(".omp_offloading.img_start.") + Twine(T));
|
||||
auto *ImgEnd = new llvm::GlobalVariable(
|
||||
M, CGM.Int8Ty, /*isConstant=*/true, llvm::GlobalValue::ExternalLinkage,
|
||||
/*Initializer=*/0, Twine(".omp_offloading.img_end.") + Twine(T));
|
||||
|
||||
llvm::Constant *Dev =
|
||||
llvm::ConstantStruct::get(DeviceImageTy, ImgBegin, ImgEnd,
|
||||
HostEntriesBegin, HostEntriesEnd, nullptr);
|
||||
DeviceImagesEntires.push_back(Dev);
|
||||
}
|
||||
|
||||
// Create device images global array.
|
||||
llvm::ArrayType *DeviceImagesInitTy =
|
||||
llvm::ArrayType::get(DeviceImageTy, DeviceImagesEntires.size());
|
||||
llvm::Constant *DeviceImagesInit =
|
||||
llvm::ConstantArray::get(DeviceImagesInitTy, DeviceImagesEntires);
|
||||
|
||||
llvm::GlobalVariable *DeviceImages = new llvm::GlobalVariable(
|
||||
M, DeviceImagesInitTy, /*isConstant=*/true,
|
||||
llvm::GlobalValue::InternalLinkage, DeviceImagesInit,
|
||||
".omp_offloading.device_images");
|
||||
DeviceImages->setUnnamedAddr(true);
|
||||
|
||||
// This is a Zero array to be used in the creation of the constant expressions
|
||||
llvm::Constant *Index[] = {llvm::Constant::getNullValue(CGM.Int32Ty),
|
||||
llvm::Constant::getNullValue(CGM.Int32Ty)};
|
||||
|
||||
// Create the target region descriptor.
|
||||
auto *BinaryDescriptorTy = cast<llvm::StructType>(
|
||||
CGM.getTypes().ConvertTypeForMem(getTgtBinaryDescriptorQTy()));
|
||||
llvm::Constant *TargetRegionsDescriptorInit = llvm::ConstantStruct::get(
|
||||
BinaryDescriptorTy, llvm::ConstantInt::get(CGM.Int32Ty, Devices.size()),
|
||||
llvm::ConstantExpr::getGetElementPtr(DeviceImagesInitTy, DeviceImages,
|
||||
Index),
|
||||
HostEntriesBegin, HostEntriesEnd, nullptr);
|
||||
|
||||
auto *Desc = new llvm::GlobalVariable(
|
||||
M, BinaryDescriptorTy, /*isConstant=*/true,
|
||||
llvm::GlobalValue::InternalLinkage, TargetRegionsDescriptorInit,
|
||||
".omp_offloading.descriptor");
|
||||
|
||||
// Emit code to register or unregister the descriptor at execution
|
||||
// startup or closing, respectively.
|
||||
|
||||
// Create a variable to drive the registration and unregistration of the
|
||||
// descriptor, so we can reuse the logic that emits Ctors and Dtors.
|
||||
auto *IdentInfo = &C.Idents.get(".omp_offloading.reg_unreg_var");
|
||||
ImplicitParamDecl RegUnregVar(C, C.getTranslationUnitDecl(), SourceLocation(),
|
||||
IdentInfo, C.CharTy);
|
||||
|
||||
auto *UnRegFn = createOffloadingBinaryDescriptorFunction(
|
||||
CGM, ".omp_offloading.descriptor_unreg", [&](CodeGenFunction &CGF) {
|
||||
CGF.EmitCallOrInvoke(createRuntimeFunction(OMPRTL__tgt_unregister_lib),
|
||||
Desc);
|
||||
});
|
||||
auto *RegFn = createOffloadingBinaryDescriptorFunction(
|
||||
CGM, ".omp_offloading.descriptor_reg", [&](CodeGenFunction &CGF) {
|
||||
CGF.EmitCallOrInvoke(createRuntimeFunction(OMPRTL__tgt_register_lib),
|
||||
Desc);
|
||||
CGM.getCXXABI().registerGlobalDtor(CGF, RegUnregVar, UnRegFn, Desc);
|
||||
});
|
||||
return RegFn;
|
||||
}
|
||||
|
||||
void CGOpenMPRuntime::createOffloadEntry(llvm::Constant *Addr, StringRef Name,
|
||||
uint64_t Size) {
|
||||
auto *TgtOffloadEntryType = cast<llvm::StructType>(
|
||||
CGM.getTypes().ConvertTypeForMem(getTgtOffloadEntryQTy()));
|
||||
llvm::LLVMContext &C = CGM.getModule().getContext();
|
||||
llvm::Module &M = CGM.getModule();
|
||||
|
||||
// Make sure the address has the right type.
|
||||
llvm::Constant *AddrPtr = llvm::ConstantExpr::getBitCast(Addr, CGM.VoidPtrTy);
|
||||
|
||||
// Create constant string with the name.
|
||||
llvm::Constant *StrPtrInit = llvm::ConstantDataArray::getString(C, Name);
|
||||
|
||||
llvm::GlobalVariable *Str =
|
||||
new llvm::GlobalVariable(M, StrPtrInit->getType(), /*isConstant=*/true,
|
||||
llvm::GlobalValue::InternalLinkage, StrPtrInit,
|
||||
".omp_offloading.entry_name");
|
||||
Str->setUnnamedAddr(true);
|
||||
llvm::Constant *StrPtr = llvm::ConstantExpr::getBitCast(Str, CGM.Int8PtrTy);
|
||||
|
||||
// Create the entry struct.
|
||||
llvm::Constant *EntryInit = llvm::ConstantStruct::get(
|
||||
TgtOffloadEntryType, AddrPtr, StrPtr,
|
||||
llvm::ConstantInt::get(CGM.SizeTy, Size), nullptr);
|
||||
llvm::GlobalVariable *Entry = new llvm::GlobalVariable(
|
||||
M, TgtOffloadEntryType, true, llvm::GlobalValue::ExternalLinkage,
|
||||
EntryInit, ".omp_offloading.entry");
|
||||
|
||||
// The entry has to be created in the section the linker expects it to be.
|
||||
Entry->setSection(".omp_offloading.entries");
|
||||
// We can't have any padding between symbols, so we need to have 1-byte
|
||||
// alignment.
|
||||
Entry->setAlignment(1);
|
||||
return;
|
||||
}
|
||||
|
||||
void CGOpenMPRuntime::createOffloadEntriesAndInfoMetadata() {
|
||||
// Emit the offloading entries and metadata so that the device codegen side
|
||||
// can
|
||||
// easily figure out what to emit. The produced metadata looks like this:
|
||||
//
|
||||
// !omp_offload.info = !{!1, ...}
|
||||
//
|
||||
// Right now we only generate metadata for function that contain target
|
||||
// regions.
|
||||
|
||||
// If we do not have entries, we dont need to do anything.
|
||||
if (OffloadEntriesInfoManager.empty())
|
||||
return;
|
||||
|
||||
llvm::Module &M = CGM.getModule();
|
||||
llvm::LLVMContext &C = M.getContext();
|
||||
SmallVector<OffloadEntriesInfoManagerTy::OffloadEntryInfo *, 16>
|
||||
OrderedEntries(OffloadEntriesInfoManager.size());
|
||||
|
||||
// Create the offloading info metadata node.
|
||||
llvm::NamedMDNode *MD = M.getOrInsertNamedMetadata("omp_offload.info");
|
||||
|
||||
// Auxiliar methods to create metadata values and strings.
|
||||
auto getMDInt = [&](unsigned v) {
|
||||
return llvm::ConstantAsMetadata::get(
|
||||
llvm::ConstantInt::get(llvm::Type::getInt32Ty(C), v));
|
||||
};
|
||||
|
||||
auto getMDString = [&](StringRef v) { return llvm::MDString::get(C, v); };
|
||||
|
||||
// Create function that emits metadata for each target region entry;
|
||||
auto &&TargetRegionMetadataEmitter = [&](
|
||||
unsigned DeviceID, unsigned FileID, StringRef ParentName, unsigned Line,
|
||||
unsigned Column,
|
||||
OffloadEntriesInfoManagerTy::OffloadEntryInfoTargetRegion &E) {
|
||||
llvm::SmallVector<llvm::Metadata *, 32> Ops;
|
||||
// Generate metadata for target regions. Each entry of this metadata
|
||||
// contains:
|
||||
// - Entry 0 -> Kind of this type of metadata (0).
|
||||
// - Entry 1 -> Device ID of the file where the entry was identified.
|
||||
// - Entry 2 -> File ID of the file where the entry was identified.
|
||||
// - Entry 3 -> Mangled name of the function where the entry was identified.
|
||||
// - Entry 4 -> Line in the file where the entry was identified.
|
||||
// - Entry 5 -> Column in the file where the entry was identified.
|
||||
// - Entry 6 -> Order the entry was created.
|
||||
// The first element of the metadata node is the kind.
|
||||
Ops.push_back(getMDInt(E.getKind()));
|
||||
Ops.push_back(getMDInt(DeviceID));
|
||||
Ops.push_back(getMDInt(FileID));
|
||||
Ops.push_back(getMDString(ParentName));
|
||||
Ops.push_back(getMDInt(Line));
|
||||
Ops.push_back(getMDInt(Column));
|
||||
Ops.push_back(getMDInt(E.getOrder()));
|
||||
|
||||
// Save this entry in the right position of the ordered entries array.
|
||||
OrderedEntries[E.getOrder()] = &E;
|
||||
|
||||
// Add metadata to the named metadata node.
|
||||
MD->addOperand(llvm::MDNode::get(C, Ops));
|
||||
};
|
||||
|
||||
OffloadEntriesInfoManager.actOnTargetRegionEntriesInfo(
|
||||
TargetRegionMetadataEmitter);
|
||||
|
||||
for (auto *E : OrderedEntries) {
|
||||
assert(E && "All ordered entries must exist!");
|
||||
if (auto *CE =
|
||||
dyn_cast<OffloadEntriesInfoManagerTy::OffloadEntryInfoTargetRegion>(
|
||||
E)) {
|
||||
assert(CE->getID() && CE->getAddress() &&
|
||||
"Entry ID and Addr are invalid!");
|
||||
createOffloadEntry(CE->getID(), CE->getAddress()->getName(), /*Size=*/0);
|
||||
} else
|
||||
llvm_unreachable("Unsupported entry kind.");
|
||||
}
|
||||
}
|
||||
|
||||
/// \brief Loads all the offload entries information from the host IR
|
||||
/// metadata.
|
||||
void CGOpenMPRuntime::loadOffloadInfoMetadata() {
|
||||
// If we are in target mode, load the metadata from the host IR. This code has
|
||||
// to match the metadaata creation in createOffloadEntriesAndInfoMetadata().
|
||||
|
||||
if (!CGM.getLangOpts().OpenMPIsDevice)
|
||||
return;
|
||||
|
||||
if (CGM.getLangOpts().OMPHostIRFile.empty())
|
||||
return;
|
||||
|
||||
auto Buf = llvm::MemoryBuffer::getFile(CGM.getLangOpts().OMPHostIRFile);
|
||||
if (Buf.getError())
|
||||
return;
|
||||
|
||||
llvm::LLVMContext C;
|
||||
auto ME = llvm::parseBitcodeFile(Buf.get()->getMemBufferRef(), C);
|
||||
|
||||
if (ME.getError())
|
||||
return;
|
||||
|
||||
llvm::NamedMDNode *MD = ME.get()->getNamedMetadata("omp_offload.info");
|
||||
if (!MD)
|
||||
return;
|
||||
|
||||
for (auto I : MD->operands()) {
|
||||
llvm::MDNode *MN = cast<llvm::MDNode>(I);
|
||||
|
||||
auto getMDInt = [&](unsigned Idx) {
|
||||
llvm::ConstantAsMetadata *V =
|
||||
cast<llvm::ConstantAsMetadata>(MN->getOperand(Idx));
|
||||
return cast<llvm::ConstantInt>(V->getValue())->getZExtValue();
|
||||
};
|
||||
|
||||
auto getMDString = [&](unsigned Idx) {
|
||||
llvm::MDString *V = cast<llvm::MDString>(MN->getOperand(Idx));
|
||||
return V->getString();
|
||||
};
|
||||
|
||||
switch (getMDInt(0)) {
|
||||
default:
|
||||
llvm_unreachable("Unexpected metadata!");
|
||||
break;
|
||||
case OffloadEntriesInfoManagerTy::OffloadEntryInfo::
|
||||
OFFLOAD_ENTRY_INFO_TARGET_REGION:
|
||||
OffloadEntriesInfoManager.initializeTargetRegionEntryInfo(
|
||||
/*DeviceID=*/getMDInt(1), /*FileID=*/getMDInt(2),
|
||||
/*ParentName=*/getMDString(3), /*Line=*/getMDInt(4),
|
||||
/*Column=*/getMDInt(5), /*Order=*/getMDInt(6));
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void CGOpenMPRuntime::emitKmpRoutineEntryT(QualType KmpInt32Ty) {
|
||||
if (!KmpRoutineEntryPtrTy) {
|
||||
// Build typedef kmp_int32 (* kmp_routine_entry_t)(kmp_int32, void *); type.
|
||||
@ -1992,6 +2399,80 @@ static FieldDecl *addFieldToRecordDecl(ASTContext &C, DeclContext *DC,
|
||||
return Field;
|
||||
}
|
||||
|
||||
QualType CGOpenMPRuntime::getTgtOffloadEntryQTy() {
|
||||
|
||||
// Make sure the type of the entry is already created. This is the type we
|
||||
// have to create:
|
||||
// struct __tgt_offload_entry{
|
||||
// void *addr; // Pointer to the offload entry info.
|
||||
// // (function or global)
|
||||
// char *name; // Name of the function or global.
|
||||
// size_t size; // Size of the entry info (0 if it a function).
|
||||
// };
|
||||
if (TgtOffloadEntryQTy.isNull()) {
|
||||
ASTContext &C = CGM.getContext();
|
||||
auto *RD = C.buildImplicitRecord("__tgt_offload_entry");
|
||||
RD->startDefinition();
|
||||
addFieldToRecordDecl(C, RD, C.VoidPtrTy);
|
||||
addFieldToRecordDecl(C, RD, C.getPointerType(C.CharTy));
|
||||
addFieldToRecordDecl(C, RD, C.getSizeType());
|
||||
RD->completeDefinition();
|
||||
TgtOffloadEntryQTy = C.getRecordType(RD);
|
||||
}
|
||||
return TgtOffloadEntryQTy;
|
||||
}
|
||||
|
||||
QualType CGOpenMPRuntime::getTgtDeviceImageQTy() {
|
||||
// These are the types we need to build:
|
||||
// struct __tgt_device_image{
|
||||
// void *ImageStart; // Pointer to the target code start.
|
||||
// void *ImageEnd; // Pointer to the target code end.
|
||||
// // We also add the host entries to the device image, as it may be useful
|
||||
// // for the target runtime to have access to that information.
|
||||
// __tgt_offload_entry *EntriesBegin; // Begin of the table with all
|
||||
// // the entries.
|
||||
// __tgt_offload_entry *EntriesEnd; // End of the table with all the
|
||||
// // entries (non inclusive).
|
||||
// };
|
||||
if (TgtDeviceImageQTy.isNull()) {
|
||||
ASTContext &C = CGM.getContext();
|
||||
auto *RD = C.buildImplicitRecord("__tgt_device_image");
|
||||
RD->startDefinition();
|
||||
addFieldToRecordDecl(C, RD, C.VoidPtrTy);
|
||||
addFieldToRecordDecl(C, RD, C.VoidPtrTy);
|
||||
addFieldToRecordDecl(C, RD, C.getPointerType(getTgtOffloadEntryQTy()));
|
||||
addFieldToRecordDecl(C, RD, C.getPointerType(getTgtOffloadEntryQTy()));
|
||||
RD->completeDefinition();
|
||||
TgtDeviceImageQTy = C.getRecordType(RD);
|
||||
}
|
||||
return TgtDeviceImageQTy;
|
||||
}
|
||||
|
||||
QualType CGOpenMPRuntime::getTgtBinaryDescriptorQTy() {
|
||||
// struct __tgt_bin_desc{
|
||||
// int32_t NumDevices; // Number of devices supported.
|
||||
// __tgt_device_image *DeviceImages; // Arrays of device images
|
||||
// // (one per device).
|
||||
// __tgt_offload_entry *EntriesBegin; // Begin of the table with all the
|
||||
// // entries.
|
||||
// __tgt_offload_entry *EntriesEnd; // End of the table with all the
|
||||
// // entries (non inclusive).
|
||||
// };
|
||||
if (TgtBinaryDescriptorQTy.isNull()) {
|
||||
ASTContext &C = CGM.getContext();
|
||||
auto *RD = C.buildImplicitRecord("__tgt_bin_desc");
|
||||
RD->startDefinition();
|
||||
addFieldToRecordDecl(
|
||||
C, RD, C.getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/true));
|
||||
addFieldToRecordDecl(C, RD, C.getPointerType(getTgtDeviceImageQTy()));
|
||||
addFieldToRecordDecl(C, RD, C.getPointerType(getTgtOffloadEntryQTy()));
|
||||
addFieldToRecordDecl(C, RD, C.getPointerType(getTgtOffloadEntryQTy()));
|
||||
RD->completeDefinition();
|
||||
TgtBinaryDescriptorQTy = C.getRecordType(RD);
|
||||
}
|
||||
return TgtBinaryDescriptorQTy;
|
||||
}
|
||||
|
||||
namespace {
|
||||
struct PrivateHelpersTy {
|
||||
PrivateHelpersTy(const VarDecl *Original, const VarDecl *PrivateCopy,
|
||||
@ -3238,20 +3719,115 @@ void CGOpenMPRuntime::emitCancelCall(CodeGenFunction &CGF, SourceLocation Loc,
|
||||
}
|
||||
}
|
||||
|
||||
llvm::Value *
|
||||
CGOpenMPRuntime::emitTargetOutlinedFunction(const OMPExecutableDirective &D,
|
||||
const RegionCodeGenTy &CodeGen) {
|
||||
/// \brief Obtain information that uniquely identifies a target entry. This
|
||||
/// consists of the file and device IDs as well as line and column numbers
|
||||
/// associated with the relevant entry source location.
|
||||
static void getTargetEntryUniqueInfo(ASTContext &C, SourceLocation Loc,
|
||||
unsigned &DeviceID, unsigned &FileID,
|
||||
unsigned &LineNum, unsigned &ColumnNum) {
|
||||
|
||||
auto &SM = C.getSourceManager();
|
||||
|
||||
// The loc should be always valid and have a file ID (the user cannot use
|
||||
// #pragma directives in macros)
|
||||
|
||||
assert(Loc.isValid() && "Source location is expected to be always valid.");
|
||||
assert(Loc.isFileID() && "Source location is expected to refer to a file.");
|
||||
|
||||
PresumedLoc PLoc = SM.getPresumedLoc(Loc);
|
||||
assert(PLoc.isValid() && "Source location is expected to be always valid.");
|
||||
|
||||
llvm::sys::fs::UniqueID ID;
|
||||
if (llvm::sys::fs::getUniqueID(PLoc.getFilename(), ID))
|
||||
llvm_unreachable("Source file with target region no longer exists!");
|
||||
|
||||
DeviceID = ID.getDevice();
|
||||
FileID = ID.getFile();
|
||||
LineNum = PLoc.getLine();
|
||||
ColumnNum = PLoc.getColumn();
|
||||
return;
|
||||
}
|
||||
|
||||
void CGOpenMPRuntime::emitTargetOutlinedFunction(
|
||||
const OMPExecutableDirective &D, StringRef ParentName,
|
||||
llvm::Function *&OutlinedFn, llvm::Constant *&OutlinedFnID,
|
||||
bool IsOffloadEntry) {
|
||||
|
||||
assert(!ParentName.empty() && "Invalid target region parent name!");
|
||||
|
||||
const CapturedStmt &CS = *cast<CapturedStmt>(D.getAssociatedStmt());
|
||||
|
||||
// Emit target region as a standalone region.
|
||||
auto &&CodeGen = [&CS](CodeGenFunction &CGF) {
|
||||
CGF.EmitStmt(CS.getCapturedStmt());
|
||||
};
|
||||
|
||||
// Create a unique name for the proxy/entry function that using the source
|
||||
// location information of the current target region. The name will be
|
||||
// something like:
|
||||
//
|
||||
// .omp_offloading.DD_FFFF.PP.lBB.cCC
|
||||
//
|
||||
// where DD_FFFF is an ID unique to the file (device and file IDs), PP is the
|
||||
// mangled name of the function that encloses the target region, BB is the
|
||||
// line number of the target region, and CC is the column number of the target
|
||||
// region.
|
||||
|
||||
unsigned DeviceID;
|
||||
unsigned FileID;
|
||||
unsigned Line;
|
||||
unsigned Column;
|
||||
getTargetEntryUniqueInfo(CGM.getContext(), D.getLocStart(), DeviceID, FileID,
|
||||
Line, Column);
|
||||
SmallString<64> EntryFnName;
|
||||
{
|
||||
llvm::raw_svector_ostream OS(EntryFnName);
|
||||
OS << ".omp_offloading" << llvm::format(".%x", DeviceID)
|
||||
<< llvm::format(".%x.", FileID) << ParentName << ".l" << Line << ".c"
|
||||
<< Column;
|
||||
}
|
||||
|
||||
CodeGenFunction CGF(CGM, true);
|
||||
CGOpenMPTargetRegionInfo CGInfo(CS, CodeGen);
|
||||
CGOpenMPTargetRegionInfo CGInfo(CS, CodeGen, EntryFnName);
|
||||
CodeGenFunction::CGCapturedStmtRAII CapInfoRAII(CGF, &CGInfo);
|
||||
return CGF.GenerateOpenMPCapturedStmtFunction(CS);
|
||||
|
||||
OutlinedFn = CGF.GenerateOpenMPCapturedStmtFunction(CS);
|
||||
|
||||
// If this target outline function is not an offload entry, we don't need to
|
||||
// register it.
|
||||
if (!IsOffloadEntry)
|
||||
return;
|
||||
|
||||
// The target region ID is used by the runtime library to identify the current
|
||||
// target region, so it only has to be unique and not necessarily point to
|
||||
// anything. It could be the pointer to the outlined function that implements
|
||||
// the target region, but we aren't using that so that the compiler doesn't
|
||||
// need to keep that, and could therefore inline the host function if proven
|
||||
// worthwhile during optimization. In the other hand, if emitting code for the
|
||||
// device, the ID has to be the function address so that it can retrieved from
|
||||
// the offloading entry and launched by the runtime library. We also mark the
|
||||
// outlined function to have external linkage in case we are emitting code for
|
||||
// the device, because these functions will be entry points to the device.
|
||||
|
||||
if (CGM.getLangOpts().OpenMPIsDevice) {
|
||||
OutlinedFnID = llvm::ConstantExpr::getBitCast(OutlinedFn, CGM.Int8PtrTy);
|
||||
OutlinedFn->setLinkage(llvm::GlobalValue::ExternalLinkage);
|
||||
} else
|
||||
OutlinedFnID = new llvm::GlobalVariable(
|
||||
CGM.getModule(), CGM.Int8Ty, /*isConstant=*/true,
|
||||
llvm::GlobalValue::PrivateLinkage,
|
||||
llvm::Constant::getNullValue(CGM.Int8Ty), ".omp_offload.region_id");
|
||||
|
||||
// Register the information for the entry associated with this target region.
|
||||
OffloadEntriesInfoManager.registerTargetRegionEntryInfo(
|
||||
DeviceID, FileID, ParentName, Line, Column, OutlinedFn, OutlinedFnID);
|
||||
return;
|
||||
}
|
||||
|
||||
void CGOpenMPRuntime::emitTargetCall(CodeGenFunction &CGF,
|
||||
const OMPExecutableDirective &D,
|
||||
llvm::Value *OutlinedFn,
|
||||
llvm::Value *OutlinedFnID,
|
||||
const Expr *IfCond, const Expr *Device,
|
||||
ArrayRef<llvm::Value *> CapturedVars) {
|
||||
if (!CGF.HaveInsertPoint())
|
||||
@ -3275,6 +3851,8 @@ void CGOpenMPRuntime::emitTargetCall(CodeGenFunction &CGF,
|
||||
OMP_DEVICEID_UNDEF = -1,
|
||||
};
|
||||
|
||||
assert(OutlinedFn && "Invalid outlined function!");
|
||||
|
||||
auto &Ctx = CGF.getContext();
|
||||
|
||||
// Fill up the arrays with the all the captured variables.
|
||||
@ -3373,7 +3951,7 @@ void CGOpenMPRuntime::emitTargetCall(CodeGenFunction &CGF,
|
||||
|
||||
// Fill up the pointer arrays and transfer execution to the device.
|
||||
auto &&ThenGen = [this, &Ctx, &BasePointers, &Pointers, &Sizes, &MapTypes,
|
||||
hasVLACaptures, Device, OffloadError,
|
||||
hasVLACaptures, Device, OutlinedFnID, OffloadError,
|
||||
OffloadErrorQType](CodeGenFunction &CGF) {
|
||||
unsigned PointerNumVal = BasePointers.size();
|
||||
llvm::Value *PointerNum = CGF.Builder.getInt32(PointerNumVal);
|
||||
@ -3504,10 +4082,8 @@ void CGOpenMPRuntime::emitTargetCall(CodeGenFunction &CGF,
|
||||
// compiler doesn't need to keep that, and could therefore inline the host
|
||||
// function if proven worthwhile during optimization.
|
||||
|
||||
llvm::Value *HostPtr = new llvm::GlobalVariable(
|
||||
CGM.getModule(), CGM.Int8Ty, /*isConstant=*/true,
|
||||
llvm::GlobalValue::PrivateLinkage,
|
||||
llvm::Constant::getNullValue(CGM.Int8Ty), ".offload_hstptr");
|
||||
// From this point on, we need to have an ID of the target region defined.
|
||||
assert(OutlinedFnID && "Invalid outlined function ID!");
|
||||
|
||||
// Emit device ID if any.
|
||||
llvm::Value *DeviceID;
|
||||
@ -3518,25 +4094,35 @@ void CGOpenMPRuntime::emitTargetCall(CodeGenFunction &CGF,
|
||||
DeviceID = CGF.Builder.getInt32(OMP_DEVICEID_UNDEF);
|
||||
|
||||
llvm::Value *OffloadingArgs[] = {
|
||||
DeviceID, HostPtr, PointerNum, BasePointersArray,
|
||||
PointersArray, SizesArray, MapTypesArray};
|
||||
DeviceID, OutlinedFnID, PointerNum, BasePointersArray,
|
||||
PointersArray, SizesArray, MapTypesArray};
|
||||
auto Return = CGF.EmitRuntimeCall(createRuntimeFunction(OMPRTL__tgt_target),
|
||||
OffloadingArgs);
|
||||
|
||||
CGF.EmitStoreOfScalar(Return, OffloadError);
|
||||
};
|
||||
|
||||
if (IfCond) {
|
||||
// Notify that the host version must be executed.
|
||||
auto &&ElseGen = [this, OffloadError,
|
||||
OffloadErrorQType](CodeGenFunction &CGF) {
|
||||
CGF.EmitStoreOfScalar(llvm::ConstantInt::get(CGM.Int32Ty, /*V=*/-1u),
|
||||
OffloadError);
|
||||
};
|
||||
emitOMPIfClause(CGF, IfCond, ThenGen, ElseGen);
|
||||
// Notify that the host version must be executed.
|
||||
auto &&ElseGen = [this, OffloadError,
|
||||
OffloadErrorQType](CodeGenFunction &CGF) {
|
||||
CGF.EmitStoreOfScalar(llvm::ConstantInt::get(CGM.Int32Ty, /*V=*/-1u),
|
||||
OffloadError);
|
||||
};
|
||||
|
||||
// If we have a target function ID it means that we need to support
|
||||
// offloading, otherwise, just execute on the host. We need to execute on host
|
||||
// regardless of the conditional in the if clause if, e.g., the user do not
|
||||
// specify target triples.
|
||||
if (OutlinedFnID) {
|
||||
if (IfCond) {
|
||||
emitOMPIfClause(CGF, IfCond, ThenGen, ElseGen);
|
||||
} else {
|
||||
CodeGenFunction::RunCleanupsScope Scope(CGF);
|
||||
ThenGen(CGF);
|
||||
}
|
||||
} else {
|
||||
CodeGenFunction::RunCleanupsScope Scope(CGF);
|
||||
ThenGen(CGF);
|
||||
ElseGen(CGF);
|
||||
}
|
||||
|
||||
// Check the error code and execute the host version if required.
|
||||
@ -3553,3 +4139,120 @@ void CGOpenMPRuntime::emitTargetCall(CodeGenFunction &CGF,
|
||||
CGF.EmitBlock(OffloadContBlock, /*IsFinished=*/true);
|
||||
return;
|
||||
}
|
||||
|
||||
void CGOpenMPRuntime::scanForTargetRegionsFunctions(const Stmt *S,
|
||||
StringRef ParentName) {
|
||||
if (!S)
|
||||
return;
|
||||
|
||||
// If we find a OMP target directive, codegen the outline function and
|
||||
// register the result.
|
||||
// FIXME: Add other directives with target when they become supported.
|
||||
bool isTargetDirective = isa<OMPTargetDirective>(S);
|
||||
|
||||
if (isTargetDirective) {
|
||||
auto *E = cast<OMPExecutableDirective>(S);
|
||||
unsigned DeviceID;
|
||||
unsigned FileID;
|
||||
unsigned Line;
|
||||
unsigned Column;
|
||||
getTargetEntryUniqueInfo(CGM.getContext(), E->getLocStart(), DeviceID,
|
||||
FileID, Line, Column);
|
||||
|
||||
// Is this a target region that should not be emitted as an entry point? If
|
||||
// so just signal we are done with this target region.
|
||||
if (!OffloadEntriesInfoManager.hasTargetRegionEntryInfo(
|
||||
DeviceID, FileID, ParentName, Line, Column))
|
||||
return;
|
||||
|
||||
llvm::Function *Fn;
|
||||
llvm::Constant *Addr;
|
||||
emitTargetOutlinedFunction(*E, ParentName, Fn, Addr,
|
||||
/*isOffloadEntry=*/true);
|
||||
assert(Fn && Addr && "Target region emission failed.");
|
||||
return;
|
||||
}
|
||||
|
||||
if (const OMPExecutableDirective *E = dyn_cast<OMPExecutableDirective>(S)) {
|
||||
if (!E->getAssociatedStmt())
|
||||
return;
|
||||
|
||||
scanForTargetRegionsFunctions(
|
||||
cast<CapturedStmt>(E->getAssociatedStmt())->getCapturedStmt(),
|
||||
ParentName);
|
||||
return;
|
||||
}
|
||||
|
||||
// If this is a lambda function, look into its body.
|
||||
if (auto *L = dyn_cast<LambdaExpr>(S))
|
||||
S = L->getBody();
|
||||
|
||||
// Keep looking for target regions recursively.
|
||||
for (auto *II : S->children())
|
||||
scanForTargetRegionsFunctions(II, ParentName);
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
bool CGOpenMPRuntime::emitTargetFunctions(GlobalDecl GD) {
|
||||
auto &FD = *cast<FunctionDecl>(GD.getDecl());
|
||||
|
||||
// If emitting code for the host, we do not process FD here. Instead we do
|
||||
// the normal code generation.
|
||||
if (!CGM.getLangOpts().OpenMPIsDevice)
|
||||
return false;
|
||||
|
||||
// Try to detect target regions in the function.
|
||||
scanForTargetRegionsFunctions(FD.getBody(), CGM.getMangledName(GD));
|
||||
|
||||
// We should not emit any function othen that the ones created during the
|
||||
// scanning. Therefore, we signal that this function is completely dealt
|
||||
// with.
|
||||
return true;
|
||||
}
|
||||
|
||||
bool CGOpenMPRuntime::emitTargetGlobalVariable(GlobalDecl GD) {
|
||||
if (!CGM.getLangOpts().OpenMPIsDevice)
|
||||
return false;
|
||||
|
||||
// Check if there are Ctors/Dtors in this declaration and look for target
|
||||
// regions in it. We use the complete variant to produce the kernel name
|
||||
// mangling.
|
||||
QualType RDTy = cast<VarDecl>(GD.getDecl())->getType();
|
||||
if (auto *RD = RDTy->getBaseElementTypeUnsafe()->getAsCXXRecordDecl()) {
|
||||
for (auto *Ctor : RD->ctors()) {
|
||||
StringRef ParentName =
|
||||
CGM.getMangledName(GlobalDecl(Ctor, Ctor_Complete));
|
||||
scanForTargetRegionsFunctions(Ctor->getBody(), ParentName);
|
||||
}
|
||||
auto *Dtor = RD->getDestructor();
|
||||
if (Dtor) {
|
||||
StringRef ParentName =
|
||||
CGM.getMangledName(GlobalDecl(Dtor, Dtor_Complete));
|
||||
scanForTargetRegionsFunctions(Dtor->getBody(), ParentName);
|
||||
}
|
||||
}
|
||||
|
||||
// If we are in target mode we do not emit any global (declare target is not
|
||||
// implemented yet). Therefore we signal that GD was processed in this case.
|
||||
return true;
|
||||
}
|
||||
|
||||
bool CGOpenMPRuntime::emitTargetGlobal(GlobalDecl GD) {
|
||||
auto *VD = GD.getDecl();
|
||||
if (isa<FunctionDecl>(VD))
|
||||
return emitTargetFunctions(GD);
|
||||
|
||||
return emitTargetGlobalVariable(GD);
|
||||
}
|
||||
|
||||
llvm::Function *CGOpenMPRuntime::emitRegistrationFunction() {
|
||||
// If we have offloading in the current module, we need to emit the entries
|
||||
// now and register the offloading descriptor.
|
||||
createOffloadEntriesAndInfoMetadata();
|
||||
|
||||
// Create and register the offloading binary descriptors. This is the main
|
||||
// entity that captures all the information about offloading in the current
|
||||
// compilation unit.
|
||||
return createOffloadingBinaryDescriptorRegistration();
|
||||
}
|
||||
|
@ -35,6 +35,7 @@ class Value;
|
||||
|
||||
namespace clang {
|
||||
class Expr;
|
||||
class GlobalDecl;
|
||||
class OMPExecutableDirective;
|
||||
class VarDecl;
|
||||
|
||||
@ -165,6 +166,10 @@ class CGOpenMPRuntime {
|
||||
// arg_num, void** args_base, void **args, size_t *arg_sizes, int32_t
|
||||
// *arg_types);
|
||||
OMPRTL__tgt_target,
|
||||
// Call to void __tgt_register_lib(__tgt_bin_desc *desc);
|
||||
OMPRTL__tgt_register_lib,
|
||||
// Call to void __tgt_unregister_lib(__tgt_bin_desc *desc);
|
||||
OMPRTL__tgt_unregister_lib,
|
||||
};
|
||||
|
||||
/// \brief Values for bit flags used in the ident_t to describe the fields.
|
||||
@ -288,7 +293,181 @@ class CGOpenMPRuntime {
|
||||
/// } flags;
|
||||
/// } kmp_depend_info_t;
|
||||
QualType KmpDependInfoTy;
|
||||
/// \brief Type struct __tgt_offload_entry{
|
||||
/// void *addr; // Pointer to the offload entry info.
|
||||
/// // (function or global)
|
||||
/// char *name; // Name of the function or global.
|
||||
/// size_t size; // Size of the entry info (0 if it a function).
|
||||
/// };
|
||||
QualType TgtOffloadEntryQTy;
|
||||
/// struct __tgt_device_image{
|
||||
/// void *ImageStart; // Pointer to the target code start.
|
||||
/// void *ImageEnd; // Pointer to the target code end.
|
||||
/// // We also add the host entries to the device image, as it may be useful
|
||||
/// // for the target runtime to have access to that information.
|
||||
/// __tgt_offload_entry *EntriesBegin; // Begin of the table with all
|
||||
/// // the entries.
|
||||
/// __tgt_offload_entry *EntriesEnd; // End of the table with all the
|
||||
/// // entries (non inclusive).
|
||||
/// };
|
||||
QualType TgtDeviceImageQTy;
|
||||
/// struct __tgt_bin_desc{
|
||||
/// int32_t NumDevices; // Number of devices supported.
|
||||
/// __tgt_device_image *DeviceImages; // Arrays of device images
|
||||
/// // (one per device).
|
||||
/// __tgt_offload_entry *EntriesBegin; // Begin of the table with all the
|
||||
/// // entries.
|
||||
/// __tgt_offload_entry *EntriesEnd; // End of the table with all the
|
||||
/// // entries (non inclusive).
|
||||
/// };
|
||||
QualType TgtBinaryDescriptorQTy;
|
||||
/// \brief Entity that registers the offloading constants that were emitted so
|
||||
/// far.
|
||||
class OffloadEntriesInfoManagerTy {
|
||||
CodeGenModule &CGM;
|
||||
|
||||
/// \brief Number of entries registered so far.
|
||||
unsigned OffloadingEntriesNum;
|
||||
|
||||
public:
|
||||
/// \brief Base class of the entries info.
|
||||
class OffloadEntryInfo {
|
||||
public:
|
||||
/// \brief Kind of a given entry. Currently, only target regions are
|
||||
/// supported.
|
||||
enum OffloadingEntryInfoKinds {
|
||||
// Entry is a target region.
|
||||
OFFLOAD_ENTRY_INFO_TARGET_REGION = 0,
|
||||
// Invalid entry info.
|
||||
OFFLOAD_ENTRY_INFO_INVALID = ~0u
|
||||
};
|
||||
|
||||
OffloadEntryInfo() : Order(~0u), Kind(OFFLOAD_ENTRY_INFO_INVALID) {}
|
||||
explicit OffloadEntryInfo(OffloadingEntryInfoKinds Kind, unsigned Order)
|
||||
: Order(Order), Kind(Kind) {}
|
||||
|
||||
bool isValid() const { return Order != ~0u; }
|
||||
unsigned getOrder() const { return Order; }
|
||||
OffloadingEntryInfoKinds getKind() const { return Kind; }
|
||||
static bool classof(const OffloadEntryInfo *Info) { return true; }
|
||||
|
||||
protected:
|
||||
// \brief Order this entry was emitted.
|
||||
unsigned Order;
|
||||
|
||||
OffloadingEntryInfoKinds Kind;
|
||||
};
|
||||
|
||||
/// \brief Return true if a there are no entries defined.
|
||||
bool empty() const;
|
||||
/// \brief Return number of entries defined so far.
|
||||
unsigned size() const { return OffloadingEntriesNum; }
|
||||
OffloadEntriesInfoManagerTy(CodeGenModule &CGM)
|
||||
: CGM(CGM), OffloadingEntriesNum(0) {}
|
||||
|
||||
///
|
||||
/// Target region entries related.
|
||||
///
|
||||
/// \brief Target region entries info.
|
||||
class OffloadEntryInfoTargetRegion : public OffloadEntryInfo {
|
||||
// \brief Address of the entity that has to be mapped for offloading.
|
||||
llvm::Constant *Addr;
|
||||
// \brief Address that can be used as the ID of the entry.
|
||||
llvm::Constant *ID;
|
||||
|
||||
public:
|
||||
OffloadEntryInfoTargetRegion()
|
||||
: OffloadEntryInfo(OFFLOAD_ENTRY_INFO_TARGET_REGION, ~0u),
|
||||
Addr(nullptr), ID(nullptr) {}
|
||||
explicit OffloadEntryInfoTargetRegion(unsigned Order,
|
||||
llvm::Constant *Addr,
|
||||
llvm::Constant *ID)
|
||||
: OffloadEntryInfo(OFFLOAD_ENTRY_INFO_TARGET_REGION, Order),
|
||||
Addr(Addr), ID(ID) {}
|
||||
|
||||
llvm::Constant *getAddress() const { return Addr; }
|
||||
llvm::Constant *getID() const { return ID; }
|
||||
void setAddress(llvm::Constant *V) {
|
||||
assert(!Addr && "Address as been set before!");
|
||||
Addr = V;
|
||||
}
|
||||
void setID(llvm::Constant *V) {
|
||||
assert(!ID && "ID as been set before!");
|
||||
ID = V;
|
||||
}
|
||||
static bool classof(const OffloadEntryInfo *Info) {
|
||||
return Info->getKind() == OFFLOAD_ENTRY_INFO_TARGET_REGION;
|
||||
}
|
||||
};
|
||||
/// \brief Initialize target region entry.
|
||||
void initializeTargetRegionEntryInfo(unsigned DeviceID, unsigned FileID,
|
||||
StringRef ParentName, unsigned LineNum,
|
||||
unsigned ColNum, unsigned Order);
|
||||
/// \brief Register target region entry.
|
||||
void registerTargetRegionEntryInfo(unsigned DeviceID, unsigned FileID,
|
||||
StringRef ParentName, unsigned LineNum,
|
||||
unsigned ColNum, llvm::Constant *Addr,
|
||||
llvm::Constant *ID);
|
||||
/// \brief Return true if a target region entry with the provided
|
||||
/// information exists.
|
||||
bool hasTargetRegionEntryInfo(unsigned DeviceID, unsigned FileID,
|
||||
StringRef ParentName, unsigned LineNum,
|
||||
unsigned ColNum) const;
|
||||
/// brief Applies action \a Action on all registered entries.
|
||||
typedef llvm::function_ref<void(unsigned, unsigned, StringRef, unsigned,
|
||||
unsigned, OffloadEntryInfoTargetRegion &)>
|
||||
OffloadTargetRegionEntryInfoActTy;
|
||||
void actOnTargetRegionEntriesInfo(
|
||||
const OffloadTargetRegionEntryInfoActTy &Action);
|
||||
|
||||
private:
|
||||
// Storage for target region entries kind. The storage is to be indexed by
|
||||
// file ID, device ID, parent function name, lane number, and column number.
|
||||
typedef llvm::DenseMap<unsigned, OffloadEntryInfoTargetRegion>
|
||||
OffloadEntriesTargetRegionPerColumn;
|
||||
typedef llvm::DenseMap<unsigned, OffloadEntriesTargetRegionPerColumn>
|
||||
OffloadEntriesTargetRegionPerLine;
|
||||
typedef llvm::StringMap<OffloadEntriesTargetRegionPerLine>
|
||||
OffloadEntriesTargetRegionPerParentName;
|
||||
typedef llvm::DenseMap<unsigned, OffloadEntriesTargetRegionPerParentName>
|
||||
OffloadEntriesTargetRegionPerFile;
|
||||
typedef llvm::DenseMap<unsigned, OffloadEntriesTargetRegionPerFile>
|
||||
OffloadEntriesTargetRegionPerDevice;
|
||||
typedef OffloadEntriesTargetRegionPerDevice OffloadEntriesTargetRegionTy;
|
||||
OffloadEntriesTargetRegionTy OffloadEntriesTargetRegion;
|
||||
};
|
||||
OffloadEntriesInfoManagerTy OffloadEntriesInfoManager;
|
||||
|
||||
/// \brief Creates and registers offloading binary descriptor for the current
|
||||
/// compilation unit. The function that does the registration is returned.
|
||||
llvm::Function *createOffloadingBinaryDescriptorRegistration();
|
||||
|
||||
/// \brief Creates offloading entry for the provided address \a Addr,
|
||||
/// name \a Name and size \a Size.
|
||||
void createOffloadEntry(llvm::Constant *Addr, StringRef Name, uint64_t Size);
|
||||
|
||||
/// \brief Creates all the offload entries in the current compilation unit
|
||||
/// along with the associated metadata.
|
||||
void createOffloadEntriesAndInfoMetadata();
|
||||
|
||||
/// \brief Loads all the offload entries information from the host IR
|
||||
/// metadata.
|
||||
void loadOffloadInfoMetadata();
|
||||
|
||||
/// \brief Returns __tgt_offload_entry type.
|
||||
QualType getTgtOffloadEntryQTy();
|
||||
|
||||
/// \brief Returns __tgt_device_image type.
|
||||
QualType getTgtDeviceImageQTy();
|
||||
|
||||
/// \brief Returns __tgt_bin_desc type.
|
||||
QualType getTgtBinaryDescriptorQTy();
|
||||
|
||||
/// \brief Start scanning from statement \a S and and emit all target regions
|
||||
/// found along the way.
|
||||
/// \param S Starting statement.
|
||||
/// \param ParentName Name of the function declaration that is being scanned.
|
||||
void scanForTargetRegionsFunctions(const Stmt *S, StringRef ParentName);
|
||||
|
||||
/// \brief Build type kmp_routine_entry_t (if not built yet).
|
||||
void emitKmpRoutineEntryT(QualType KmpInt32Ty);
|
||||
@ -743,16 +922,24 @@ class CGOpenMPRuntime {
|
||||
|
||||
/// \brief Emit outilined function for 'target' directive.
|
||||
/// \param D Directive to emit.
|
||||
/// \param CodeGen Code generation sequence for the \a D directive.
|
||||
virtual llvm::Value *
|
||||
emitTargetOutlinedFunction(const OMPExecutableDirective &D,
|
||||
const RegionCodeGenTy &CodeGen);
|
||||
/// \param ParentName Name of the function that encloses the target region.
|
||||
/// \param OutlinedFn Outlined function value to be defined by this call.
|
||||
/// \param OutlinedFnID Outlined function ID value to be defined by this call.
|
||||
/// \param IsOffloadEntry True if the outlined function is an offload entry.
|
||||
/// An oulined function may not be an entry if, e.g. the if clause always
|
||||
/// evaluates to false.
|
||||
virtual void emitTargetOutlinedFunction(const OMPExecutableDirective &D,
|
||||
StringRef ParentName,
|
||||
llvm::Function *&OutlinedFn,
|
||||
llvm::Constant *&OutlinedFnID,
|
||||
bool IsOffloadEntry);
|
||||
|
||||
/// \brief Emit the target offloading code associated with \a D. The emitted
|
||||
/// code attempts offloading the execution to the device, an the event of
|
||||
/// a failure it executes the host version outlined in \a OutlinedFn.
|
||||
/// \param D Directive to emit.
|
||||
/// \param OutlinedFn Host version of the code to be offloaded.
|
||||
/// \param OutlinedFnID ID of host version of the code to be offloaded.
|
||||
/// \param IfCond Expression evaluated in if clause associated with the target
|
||||
/// directive, or null if no if clause is used.
|
||||
/// \param Device Expression evaluated in device clause associated with the
|
||||
@ -760,9 +947,31 @@ class CGOpenMPRuntime {
|
||||
/// \param CapturedVars Values captured in the current region.
|
||||
virtual void emitTargetCall(CodeGenFunction &CGF,
|
||||
const OMPExecutableDirective &D,
|
||||
llvm::Value *OutlinedFn, const Expr *IfCond,
|
||||
llvm::Value *OutlinedFn,
|
||||
llvm::Value *OutlinedFnID, const Expr *IfCond,
|
||||
const Expr *Device,
|
||||
ArrayRef<llvm::Value *> CapturedVars);
|
||||
|
||||
/// \brief Emit the target regions enclosed in \a GD function definition or
|
||||
/// the function itself in case it is a valid device function. Returns true if
|
||||
/// \a GD was dealt with successfully.
|
||||
/// \param FD Function to scan.
|
||||
virtual bool emitTargetFunctions(GlobalDecl GD);
|
||||
|
||||
/// \brief Emit the global variable if it is a valid device global variable.
|
||||
/// Returns true if \a GD was dealt with successfully.
|
||||
/// \param GD Variable declaration to emit.
|
||||
virtual bool emitTargetGlobalVariable(GlobalDecl GD);
|
||||
|
||||
/// \brief Emit the global \a GD if it is meaningful for the target. Returns
|
||||
/// if it was emitted succesfully.
|
||||
/// \param GD Global to scan.
|
||||
virtual bool emitTargetGlobal(GlobalDecl GD);
|
||||
|
||||
/// \brief Creates the offloading descriptor in the event any target region
|
||||
/// was emitted in the current module and return the function that registers
|
||||
/// it.
|
||||
virtual llvm::Function *emitRegistrationFunction();
|
||||
};
|
||||
|
||||
} // namespace CodeGen
|
||||
|
@ -1125,7 +1125,8 @@ emitPrivateLinearVars(CodeGenFunction &CGF, const OMPExecutableDirective &D,
|
||||
}
|
||||
|
||||
static void emitSimdlenSafelenClause(CodeGenFunction &CGF,
|
||||
const OMPExecutableDirective &D) {
|
||||
const OMPExecutableDirective &D,
|
||||
bool IsMonotonic) {
|
||||
if (!CGF.HaveInsertPoint())
|
||||
return;
|
||||
if (const auto *C = D.getSingleClause<OMPSimdlenClause>()) {
|
||||
@ -1136,7 +1137,8 @@ static void emitSimdlenSafelenClause(CodeGenFunction &CGF,
|
||||
// In presence of finite 'safelen', it may be unsafe to mark all
|
||||
// the memory instructions parallel, because loop-carried
|
||||
// dependences of 'safelen' iterations are possible.
|
||||
CGF.LoopStack.setParallel(!D.getSingleClause<OMPSafelenClause>());
|
||||
if (!IsMonotonic)
|
||||
CGF.LoopStack.setParallel(!D.getSingleClause<OMPSafelenClause>());
|
||||
} else if (const auto *C = D.getSingleClause<OMPSafelenClause>()) {
|
||||
RValue Len = CGF.EmitAnyExpr(C->getSafelen(), AggValueSlot::ignored(),
|
||||
/*ignoreResult=*/true);
|
||||
@ -1149,11 +1151,12 @@ static void emitSimdlenSafelenClause(CodeGenFunction &CGF,
|
||||
}
|
||||
}
|
||||
|
||||
void CodeGenFunction::EmitOMPSimdInit(const OMPLoopDirective &D) {
|
||||
void CodeGenFunction::EmitOMPSimdInit(const OMPLoopDirective &D,
|
||||
bool IsMonotonic) {
|
||||
// Walk clauses and process safelen/lastprivate.
|
||||
LoopStack.setParallel();
|
||||
LoopStack.setParallel(!IsMonotonic);
|
||||
LoopStack.setVectorizeEnable(true);
|
||||
emitSimdlenSafelenClause(*this, D);
|
||||
emitSimdlenSafelenClause(*this, D, IsMonotonic);
|
||||
}
|
||||
|
||||
void CodeGenFunction::EmitOMPSimdFinal(const OMPLoopDirective &D) {
|
||||
@ -1255,12 +1258,10 @@ void CodeGenFunction::EmitOMPSimdDirective(const OMPSimdDirective &S) {
|
||||
CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_simd, CodeGen);
|
||||
}
|
||||
|
||||
void CodeGenFunction::EmitOMPForOuterLoop(OpenMPScheduleClauseKind ScheduleKind,
|
||||
const OMPLoopDirective &S,
|
||||
OMPPrivateScope &LoopScope,
|
||||
bool Ordered, Address LB,
|
||||
Address UB, Address ST,
|
||||
Address IL, llvm::Value *Chunk) {
|
||||
void CodeGenFunction::EmitOMPForOuterLoop(
|
||||
OpenMPScheduleClauseKind ScheduleKind, bool IsMonotonic,
|
||||
const OMPLoopDirective &S, OMPPrivateScope &LoopScope, bool Ordered,
|
||||
Address LB, Address UB, Address ST, Address IL, llvm::Value *Chunk) {
|
||||
auto &RT = CGM.getOpenMPRuntime();
|
||||
|
||||
// Dynamic scheduling of the outer loop (dynamic, guided, auto, runtime).
|
||||
@ -1378,13 +1379,10 @@ void CodeGenFunction::EmitOMPForOuterLoop(OpenMPScheduleClauseKind ScheduleKind,
|
||||
|
||||
// Generate !llvm.loop.parallel metadata for loads and stores for loops
|
||||
// with dynamic/guided scheduling and without ordered clause.
|
||||
if (!isOpenMPSimdDirective(S.getDirectiveKind())) {
|
||||
LoopStack.setParallel((ScheduleKind == OMPC_SCHEDULE_dynamic ||
|
||||
ScheduleKind == OMPC_SCHEDULE_guided) &&
|
||||
!Ordered);
|
||||
} else {
|
||||
EmitOMPSimdInit(S);
|
||||
}
|
||||
if (!isOpenMPSimdDirective(S.getDirectiveKind()))
|
||||
LoopStack.setParallel(!IsMonotonic);
|
||||
else
|
||||
EmitOMPSimdInit(S, IsMonotonic);
|
||||
|
||||
SourceLocation Loc = S.getLocStart();
|
||||
EmitOMPInnerLoop(S, LoopScope.requiresCleanups(), S.getCond(), S.getInc(),
|
||||
@ -1425,14 +1423,30 @@ static LValue EmitOMPHelperVar(CodeGenFunction &CGF,
|
||||
return CGF.EmitLValue(Helper);
|
||||
}
|
||||
|
||||
static std::pair<llvm::Value * /*Chunk*/, OpenMPScheduleClauseKind>
|
||||
namespace {
|
||||
struct ScheduleKindModifiersTy {
|
||||
OpenMPScheduleClauseKind Kind;
|
||||
OpenMPScheduleClauseModifier M1;
|
||||
OpenMPScheduleClauseModifier M2;
|
||||
ScheduleKindModifiersTy(OpenMPScheduleClauseKind Kind,
|
||||
OpenMPScheduleClauseModifier M1,
|
||||
OpenMPScheduleClauseModifier M2)
|
||||
: Kind(Kind), M1(M1), M2(M2) {}
|
||||
};
|
||||
} // namespace
|
||||
|
||||
static std::pair<llvm::Value * /*Chunk*/, ScheduleKindModifiersTy>
|
||||
emitScheduleClause(CodeGenFunction &CGF, const OMPLoopDirective &S,
|
||||
bool OuterRegion) {
|
||||
// Detect the loop schedule kind and chunk.
|
||||
auto ScheduleKind = OMPC_SCHEDULE_unknown;
|
||||
OpenMPScheduleClauseModifier M1 = OMPC_SCHEDULE_MODIFIER_unknown;
|
||||
OpenMPScheduleClauseModifier M2 = OMPC_SCHEDULE_MODIFIER_unknown;
|
||||
llvm::Value *Chunk = nullptr;
|
||||
if (const auto *C = S.getSingleClause<OMPScheduleClause>()) {
|
||||
ScheduleKind = C->getScheduleKind();
|
||||
M1 = C->getFirstScheduleModifier();
|
||||
M2 = C->getSecondScheduleModifier();
|
||||
if (const auto *Ch = C->getChunkSize()) {
|
||||
if (auto *ImpRef = cast_or_null<DeclRefExpr>(C->getHelperChunkSize())) {
|
||||
if (OuterRegion) {
|
||||
@ -1454,7 +1468,7 @@ emitScheduleClause(CodeGenFunction &CGF, const OMPLoopDirective &S,
|
||||
}
|
||||
}
|
||||
}
|
||||
return std::make_pair(Chunk, ScheduleKind);
|
||||
return std::make_pair(Chunk, ScheduleKindModifiersTy(ScheduleKind, M1, M2));
|
||||
}
|
||||
|
||||
bool CodeGenFunction::EmitOMPWorksharingLoop(const OMPLoopDirective &S) {
|
||||
@ -1530,16 +1544,21 @@ bool CodeGenFunction::EmitOMPWorksharingLoop(const OMPLoopDirective &S) {
|
||||
auto ScheduleInfo =
|
||||
emitScheduleClause(*this, S, /*OuterRegion=*/false);
|
||||
Chunk = ScheduleInfo.first;
|
||||
ScheduleKind = ScheduleInfo.second;
|
||||
ScheduleKind = ScheduleInfo.second.Kind;
|
||||
const OpenMPScheduleClauseModifier M1 = ScheduleInfo.second.M1;
|
||||
const OpenMPScheduleClauseModifier M2 = ScheduleInfo.second.M2;
|
||||
const unsigned IVSize = getContext().getTypeSize(IVExpr->getType());
|
||||
const bool IVSigned = IVExpr->getType()->hasSignedIntegerRepresentation();
|
||||
const bool Ordered = S.getSingleClause<OMPOrderedClause>() != nullptr;
|
||||
// OpenMP 4.5, 2.7.1 Loop Construct, Description.
|
||||
// If the static schedule kind is specified or if the ordered clause is
|
||||
// specified, and if no monotonic modifier is specified, the effect will
|
||||
// be as if the monotonic modifier was specified.
|
||||
if (RT.isStaticNonchunked(ScheduleKind,
|
||||
/* Chunked */ Chunk != nullptr) &&
|
||||
!Ordered) {
|
||||
if (isOpenMPSimdDirective(S.getDirectiveKind())) {
|
||||
EmitOMPSimdInit(S);
|
||||
}
|
||||
if (isOpenMPSimdDirective(S.getDirectiveKind()))
|
||||
EmitOMPSimdInit(S, /*IsMonotonic=*/true);
|
||||
// OpenMP [2.7.1, Loop Construct, Description, table 2-1]
|
||||
// When no chunk_size is specified, the iteration space is divided into
|
||||
// chunks that are approximately equal in size, and at most one chunk is
|
||||
@ -1549,7 +1568,8 @@ bool CodeGenFunction::EmitOMPWorksharingLoop(const OMPLoopDirective &S) {
|
||||
IVSize, IVSigned, Ordered,
|
||||
IL.getAddress(), LB.getAddress(),
|
||||
UB.getAddress(), ST.getAddress());
|
||||
auto LoopExit = getJumpDestInCurrentScope(createBasicBlock("omp.loop.exit"));
|
||||
auto LoopExit =
|
||||
getJumpDestInCurrentScope(createBasicBlock("omp.loop.exit"));
|
||||
// UB = min(UB, GlobalUB);
|
||||
EmitIgnoredExpr(S.getEnsureUpperBound());
|
||||
// IV = LB;
|
||||
@ -1566,9 +1586,14 @@ bool CodeGenFunction::EmitOMPWorksharingLoop(const OMPLoopDirective &S) {
|
||||
// Tell the runtime we are done.
|
||||
RT.emitForStaticFinish(*this, S.getLocStart());
|
||||
} else {
|
||||
const bool IsMonotonic = Ordered ||
|
||||
ScheduleKind == OMPC_SCHEDULE_static ||
|
||||
ScheduleKind == OMPC_SCHEDULE_unknown ||
|
||||
M1 == OMPC_SCHEDULE_MODIFIER_monotonic ||
|
||||
M2 == OMPC_SCHEDULE_MODIFIER_monotonic;
|
||||
// Emit the outer loop, which requests its work chunk [LB..UB] from
|
||||
// runtime and runs the inner loop to process it.
|
||||
EmitOMPForOuterLoop(ScheduleKind, S, LoopScope, Ordered,
|
||||
EmitOMPForOuterLoop(ScheduleKind, IsMonotonic, S, LoopScope, Ordered,
|
||||
LB.getAddress(), UB.getAddress(), ST.getAddress(),
|
||||
IL.getAddress(), Chunk);
|
||||
}
|
||||
@ -2546,14 +2571,8 @@ void CodeGenFunction::EmitOMPTargetDirective(const OMPTargetDirective &S) {
|
||||
llvm::SmallVector<llvm::Value *, 16> CapturedVars;
|
||||
GenerateOpenMPCapturedVars(CS, CapturedVars);
|
||||
|
||||
// Emit target region as a standalone region.
|
||||
auto &&CodeGen = [&CS](CodeGenFunction &CGF) {
|
||||
CGF.EmitStmt(CS.getCapturedStmt());
|
||||
};
|
||||
|
||||
// Obtain the target region outlined function.
|
||||
llvm::Value *Fn =
|
||||
CGM.getOpenMPRuntime().emitTargetOutlinedFunction(S, CodeGen);
|
||||
llvm::Function *Fn = nullptr;
|
||||
llvm::Constant *FnID = nullptr;
|
||||
|
||||
// Check if we have any if clause associated with the directive.
|
||||
const Expr *IfCond = nullptr;
|
||||
@ -2568,7 +2587,34 @@ void CodeGenFunction::EmitOMPTargetDirective(const OMPTargetDirective &S) {
|
||||
Device = C->getDevice();
|
||||
}
|
||||
|
||||
CGM.getOpenMPRuntime().emitTargetCall(*this, S, Fn, IfCond, Device,
|
||||
// Check if we have an if clause whose conditional always evaluates to false
|
||||
// or if we do not have any targets specified. If so the target region is not
|
||||
// an offload entry point.
|
||||
bool IsOffloadEntry = true;
|
||||
if (IfCond) {
|
||||
bool Val;
|
||||
if (ConstantFoldsToSimpleInteger(IfCond, Val) && !Val)
|
||||
IsOffloadEntry = false;
|
||||
}
|
||||
if (CGM.getLangOpts().OMPTargetTriples.empty())
|
||||
IsOffloadEntry = false;
|
||||
|
||||
assert(CurFuncDecl && "No parent declaration for target region!");
|
||||
StringRef ParentName;
|
||||
// In case we have Ctors/Dtors we use the complete type variant to produce
|
||||
// the mangling of the device outlined kernel.
|
||||
if (auto *D = dyn_cast<CXXConstructorDecl>(CurFuncDecl))
|
||||
ParentName = CGM.getMangledName(GlobalDecl(D, Ctor_Complete));
|
||||
else if (auto *D = dyn_cast<CXXDestructorDecl>(CurFuncDecl))
|
||||
ParentName = CGM.getMangledName(GlobalDecl(D, Dtor_Complete));
|
||||
else
|
||||
ParentName =
|
||||
CGM.getMangledName(GlobalDecl(cast<FunctionDecl>(CurFuncDecl)));
|
||||
|
||||
CGM.getOpenMPRuntime().emitTargetOutlinedFunction(S, ParentName, Fn, FnID,
|
||||
IsOffloadEntry);
|
||||
|
||||
CGM.getOpenMPRuntime().emitTargetCall(*this, S, Fn, FnID, IfCond, Device,
|
||||
CapturedVars);
|
||||
}
|
||||
|
||||
|
@ -378,8 +378,8 @@ void CodeGenFunction::EmitMustTailThunk(const CXXMethodDecl *MD,
|
||||
// Apply the standard set of call attributes.
|
||||
unsigned CallingConv;
|
||||
CodeGen::AttributeListType AttributeList;
|
||||
CGM.ConstructAttributeList(*CurFnInfo, MD, AttributeList, CallingConv,
|
||||
/*AttrOnCallSite=*/true);
|
||||
CGM.ConstructAttributeList(Callee->getName(), *CurFnInfo, MD, AttributeList,
|
||||
CallingConv, /*AttrOnCallSite=*/true);
|
||||
llvm::AttributeSet Attrs =
|
||||
llvm::AttributeSet::get(getLLVMContext(), AttributeList);
|
||||
Call->setAttributes(Attrs);
|
||||
|
@ -2365,17 +2365,17 @@ class CodeGenFunction : public CodeGenTypeCache {
|
||||
|
||||
/// Helpers for the OpenMP loop directives.
|
||||
void EmitOMPLoopBody(const OMPLoopDirective &D, JumpDest LoopExit);
|
||||
void EmitOMPSimdInit(const OMPLoopDirective &D);
|
||||
void EmitOMPSimdInit(const OMPLoopDirective &D, bool IsMonotonic = false);
|
||||
void EmitOMPSimdFinal(const OMPLoopDirective &D);
|
||||
/// \brief Emit code for the worksharing loop-based directive.
|
||||
/// \return true, if this construct has any lastprivate clause, false -
|
||||
/// otherwise.
|
||||
bool EmitOMPWorksharingLoop(const OMPLoopDirective &S);
|
||||
void EmitOMPForOuterLoop(OpenMPScheduleClauseKind ScheduleKind,
|
||||
const OMPLoopDirective &S,
|
||||
OMPPrivateScope &LoopScope, bool Ordered,
|
||||
Address LB, Address UB, Address ST,
|
||||
Address IL, llvm::Value *Chunk);
|
||||
bool IsMonotonic, const OMPLoopDirective &S,
|
||||
OMPPrivateScope &LoopScope, bool Ordered, Address LB,
|
||||
Address UB, Address ST, Address IL,
|
||||
llvm::Value *Chunk);
|
||||
/// \brief Emit code for sections directive.
|
||||
OpenMPDirectiveKind EmitSections(const OMPExecutableDirective &S);
|
||||
|
||||
|
@ -375,6 +375,10 @@ void CodeGenModule::Release() {
|
||||
if (llvm::Function *CudaDtorFunction = CUDARuntime->makeModuleDtorFunction())
|
||||
AddGlobalDtor(CudaDtorFunction);
|
||||
}
|
||||
if (OpenMPRuntime)
|
||||
if (llvm::Function *OpenMPRegistrationFunction =
|
||||
OpenMPRuntime->emitRegistrationFunction())
|
||||
AddGlobalCtor(OpenMPRegistrationFunction, 0);
|
||||
if (PGOReader) {
|
||||
getModule().setMaximumFunctionCount(PGOReader->getMaximumFunctionCount());
|
||||
if (PGOStats.hasDiagnostics())
|
||||
@ -770,7 +774,8 @@ void CodeGenModule::SetLLVMFunctionAttributes(const Decl *D,
|
||||
llvm::Function *F) {
|
||||
unsigned CallingConv;
|
||||
AttributeListType AttributeList;
|
||||
ConstructAttributeList(Info, D, AttributeList, CallingConv, false);
|
||||
ConstructAttributeList(F->getName(), Info, D, AttributeList, CallingConv,
|
||||
false);
|
||||
F->setAttributes(llvm::AttributeSet::get(getLLVMContext(), AttributeList));
|
||||
F->setCallingConv(static_cast<llvm::CallingConv::ID>(CallingConv));
|
||||
}
|
||||
@ -1490,6 +1495,11 @@ void CodeGenModule::EmitGlobal(GlobalDecl GD) {
|
||||
}
|
||||
}
|
||||
|
||||
// If this is OpenMP device, check if it is legal to emit this global
|
||||
// normally.
|
||||
if (OpenMPRuntime && OpenMPRuntime->emitTargetGlobal(GD))
|
||||
return;
|
||||
|
||||
// Ignore declarations, they will be emitted on their first use.
|
||||
if (const auto *FD = dyn_cast<FunctionDecl>(Global)) {
|
||||
// Forward declarations are emitted lazily on first use.
|
||||
@ -3596,6 +3606,9 @@ void CodeGenModule::EmitTopLevelDecl(Decl *D) {
|
||||
// File-scope asm is ignored during device-side CUDA compilation.
|
||||
if (LangOpts.CUDA && LangOpts.CUDAIsDevice)
|
||||
break;
|
||||
// File-scope asm is ignored during device-side OpenMP compilation.
|
||||
if (LangOpts.OpenMPIsDevice)
|
||||
break;
|
||||
auto *AD = cast<FileScopeAsmDecl>(D);
|
||||
getModule().appendModuleInlineAsm(AD->getAsmString()->getString());
|
||||
break;
|
||||
|
@ -966,13 +966,14 @@ class CodeGenModule : public CodeGenTypeCache {
|
||||
/// Get the LLVM attributes and calling convention to use for a particular
|
||||
/// function type.
|
||||
///
|
||||
/// \param Name - The function name.
|
||||
/// \param Info - The function type information.
|
||||
/// \param CalleeInfo - The callee information these attributes are being
|
||||
/// constructed for. If valid, the attributes applied to this decl may
|
||||
/// contribute to the function attributes and calling convention.
|
||||
/// \param PAL [out] - On return, the attribute list to use.
|
||||
/// \param CallingConv [out] - On return, the LLVM calling convention to use.
|
||||
void ConstructAttributeList(const CGFunctionInfo &Info,
|
||||
void ConstructAttributeList(StringRef Name, const CGFunctionInfo &Info,
|
||||
CGCalleeInfo CalleeInfo, AttributeListType &PAL,
|
||||
unsigned &CallingConv, bool AttrOnCallSite);
|
||||
|
||||
|
@ -721,17 +721,7 @@ CodeGenPGO::applyFunctionAttributes(llvm::IndexedInstrProfReader *PGOReader,
|
||||
if (!haveRegionCounts())
|
||||
return;
|
||||
|
||||
uint64_t MaxFunctionCount = PGOReader->getMaximumFunctionCount();
|
||||
uint64_t FunctionCount = getRegionCount(nullptr);
|
||||
if (FunctionCount >= (uint64_t)(0.3 * (double)MaxFunctionCount))
|
||||
// Turn on InlineHint attribute for hot functions.
|
||||
// FIXME: 30% is from preliminary tuning on SPEC, it may not be optimal.
|
||||
Fn->addFnAttr(llvm::Attribute::InlineHint);
|
||||
else if (FunctionCount <= (uint64_t)(0.01 * (double)MaxFunctionCount))
|
||||
// Turn on Cold attribute for cold functions.
|
||||
// FIXME: 1% is from preliminary tuning on SPEC, it may not be optimal.
|
||||
Fn->addFnAttr(llvm::Attribute::Cold);
|
||||
|
||||
Fn->setEntryCount(FunctionCount);
|
||||
}
|
||||
|
||||
|
@ -993,24 +993,30 @@ void CoverageMappingModuleGen::emit() {
|
||||
llvm::ArrayType::get(FunctionRecordTy, FunctionRecords.size());
|
||||
auto RecordsVal = llvm::ConstantArray::get(RecordsTy, FunctionRecords);
|
||||
|
||||
llvm::Type *CovDataHeaderTypes[] = {
|
||||
#define COVMAP_HEADER(Type, LLVMType, Name, Init) LLVMType,
|
||||
#include "llvm/ProfileData/InstrProfData.inc"
|
||||
};
|
||||
auto CovDataHeaderTy =
|
||||
llvm::StructType::get(Ctx, makeArrayRef(CovDataHeaderTypes));
|
||||
llvm::Constant *CovDataHeaderVals[] = {
|
||||
#define COVMAP_HEADER(Type, LLVMType, Name, Init) Init,
|
||||
#include "llvm/ProfileData/InstrProfData.inc"
|
||||
};
|
||||
auto CovDataHeaderVal = llvm::ConstantStruct::get(
|
||||
CovDataHeaderTy, makeArrayRef(CovDataHeaderVals));
|
||||
|
||||
// Create the coverage data record
|
||||
llvm::Type *CovDataTypes[] = {Int32Ty, Int32Ty,
|
||||
Int32Ty, Int32Ty,
|
||||
RecordsTy, FilenamesAndMappingsVal->getType()};
|
||||
llvm::Type *CovDataTypes[] = {CovDataHeaderTy, RecordsTy,
|
||||
FilenamesAndMappingsVal->getType()};
|
||||
auto CovDataTy = llvm::StructType::get(Ctx, makeArrayRef(CovDataTypes));
|
||||
llvm::Constant *TUDataVals[] = {
|
||||
llvm::ConstantInt::get(Int32Ty, FunctionRecords.size()),
|
||||
llvm::ConstantInt::get(Int32Ty, FilenamesSize),
|
||||
llvm::ConstantInt::get(Int32Ty, CoverageMappingSize),
|
||||
llvm::ConstantInt::get(Int32Ty,
|
||||
/*Version=*/CoverageMappingVersion1),
|
||||
RecordsVal, FilenamesAndMappingsVal};
|
||||
llvm::Constant *TUDataVals[] = {CovDataHeaderVal, RecordsVal,
|
||||
FilenamesAndMappingsVal};
|
||||
auto CovDataVal =
|
||||
llvm::ConstantStruct::get(CovDataTy, makeArrayRef(TUDataVals));
|
||||
auto CovData = new llvm::GlobalVariable(CGM.getModule(), CovDataTy, true,
|
||||
llvm::GlobalValue::InternalLinkage,
|
||||
CovDataVal,
|
||||
llvm::getCoverageMappingVarName());
|
||||
auto CovData = new llvm::GlobalVariable(
|
||||
CGM.getModule(), CovDataTy, true, llvm::GlobalValue::InternalLinkage,
|
||||
CovDataVal, llvm::getCoverageMappingVarName());
|
||||
|
||||
CovData->setSection(getCoverageSection(CGM));
|
||||
CovData->setAlignment(8);
|
||||
|
@ -699,11 +699,11 @@ void Driver::generateCompilationDiagnostics(Compilation &C,
|
||||
}
|
||||
|
||||
void Driver::setUpResponseFiles(Compilation &C, Command &Cmd) {
|
||||
// Since argumentsFitWithinSystemLimits() may underestimate system's capacity
|
||||
// Since commandLineFitsWithinSystemLimits() may underestimate system's capacity
|
||||
// if the tool does not support response files, there is a chance/ that things
|
||||
// will just work without a response file, so we silently just skip it.
|
||||
if (Cmd.getCreator().getResponseFilesSupport() == Tool::RF_None ||
|
||||
llvm::sys::argumentsFitWithinSystemLimits(Cmd.getArguments()))
|
||||
llvm::sys::commandLineFitsWithinSystemLimits(Cmd.getExecutable(), Cmd.getArguments()))
|
||||
return;
|
||||
|
||||
std::string TmpName = GetTemporaryPath("response", "txt");
|
||||
|
@ -2611,7 +2611,8 @@ void HexagonToolChain::getHexagonLibraryPaths(const ArgList &Args,
|
||||
// Other standard paths
|
||||
//----------------------------------------------------------------------------
|
||||
std::vector<std::string> RootDirs;
|
||||
std::copy(D.PrefixDirs.begin(), D.PrefixDirs.end(), RootDirs.begin());
|
||||
std::copy(D.PrefixDirs.begin(), D.PrefixDirs.end(),
|
||||
std::back_inserter(RootDirs));
|
||||
|
||||
std::string TargetDir = getHexagonTargetDir(D.getInstalledDir(),
|
||||
D.PrefixDirs);
|
||||
|
@ -939,7 +939,7 @@ static void getARMTargetFeatures(const ToolChain &TC,
|
||||
Features.push_back("+reserve-r9");
|
||||
|
||||
// The kext linker doesn't know how to deal with movw/movt.
|
||||
if (KernelOrKext)
|
||||
if (KernelOrKext || Args.hasArg(options::OPT_mno_movt))
|
||||
Features.push_back("+no-movt");
|
||||
}
|
||||
|
||||
@ -2107,7 +2107,7 @@ static bool DecodeAArch64Mcpu(const Driver &D, StringRef Mcpu, StringRef &CPU,
|
||||
std::pair<StringRef, StringRef> Split = Mcpu.split("+");
|
||||
CPU = Split.first;
|
||||
if (CPU == "cyclone" || CPU == "cortex-a53" || CPU == "cortex-a57" ||
|
||||
CPU == "cortex-a72" || CPU == "cortex-a35") {
|
||||
CPU == "cortex-a72" || CPU == "cortex-a35" || CPU == "exynos-m1") {
|
||||
Features.push_back("+neon");
|
||||
Features.push_back("+crc");
|
||||
Features.push_back("+crypto");
|
||||
@ -3592,6 +3592,12 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
|
||||
if (!IsWindowsMSVC)
|
||||
CmdArgs.push_back("-analyzer-checker=unix");
|
||||
|
||||
// Disable some unix checkers for PS4.
|
||||
if (IsPS4CPU) {
|
||||
CmdArgs.push_back("-analyzer-disable-checker=unix.API");
|
||||
CmdArgs.push_back("-analyzer-disable-checker=unix.Vfork");
|
||||
}
|
||||
|
||||
if (getToolChain().getTriple().getVendor() == llvm::Triple::Apple)
|
||||
CmdArgs.push_back("-analyzer-checker=osx");
|
||||
|
||||
@ -3600,14 +3606,15 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
|
||||
if (types::isCXX(Input.getType()))
|
||||
CmdArgs.push_back("-analyzer-checker=cplusplus");
|
||||
|
||||
// Enable the following experimental checkers for testing.
|
||||
CmdArgs.push_back(
|
||||
"-analyzer-checker=security.insecureAPI.UncheckedReturn");
|
||||
CmdArgs.push_back("-analyzer-checker=security.insecureAPI.getpw");
|
||||
CmdArgs.push_back("-analyzer-checker=security.insecureAPI.gets");
|
||||
CmdArgs.push_back("-analyzer-checker=security.insecureAPI.mktemp");
|
||||
CmdArgs.push_back("-analyzer-checker=security.insecureAPI.mkstemp");
|
||||
CmdArgs.push_back("-analyzer-checker=security.insecureAPI.vfork");
|
||||
if (!IsPS4CPU) {
|
||||
CmdArgs.push_back(
|
||||
"-analyzer-checker=security.insecureAPI.UncheckedReturn");
|
||||
CmdArgs.push_back("-analyzer-checker=security.insecureAPI.getpw");
|
||||
CmdArgs.push_back("-analyzer-checker=security.insecureAPI.gets");
|
||||
CmdArgs.push_back("-analyzer-checker=security.insecureAPI.mktemp");
|
||||
CmdArgs.push_back("-analyzer-checker=security.insecureAPI.mkstemp");
|
||||
CmdArgs.push_back("-analyzer-checker=security.insecureAPI.vfork");
|
||||
}
|
||||
|
||||
// Default nullability checks.
|
||||
CmdArgs.push_back("-analyzer-checker=nullability.NullPassedToNonnull");
|
||||
@ -4733,11 +4740,33 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
|
||||
A->render(Args, CmdArgs);
|
||||
}
|
||||
|
||||
// -fbuiltin is default unless -mkernel is used
|
||||
if (!Args.hasFlag(options::OPT_fbuiltin, options::OPT_fno_builtin,
|
||||
!Args.hasArg(options::OPT_mkernel)))
|
||||
// -fbuiltin is default unless -mkernel is used.
|
||||
bool UseBuiltins =
|
||||
Args.hasFlag(options::OPT_fbuiltin, options::OPT_fno_builtin,
|
||||
!Args.hasArg(options::OPT_mkernel));
|
||||
if (!UseBuiltins)
|
||||
CmdArgs.push_back("-fno-builtin");
|
||||
|
||||
// -ffreestanding implies -fno-builtin.
|
||||
if (Args.hasArg(options::OPT_ffreestanding))
|
||||
UseBuiltins = false;
|
||||
|
||||
// Process the -fno-builtin-* options.
|
||||
for (const auto &Arg : Args) {
|
||||
const Option &O = Arg->getOption();
|
||||
if (!O.matches(options::OPT_fno_builtin_))
|
||||
continue;
|
||||
|
||||
Arg->claim();
|
||||
// If -fno-builtin is specified, then there's no need to pass the option to
|
||||
// the frontend.
|
||||
if (!UseBuiltins)
|
||||
continue;
|
||||
|
||||
StringRef FuncName = Arg->getValue();
|
||||
CmdArgs.push_back(Args.MakeArgString("-fno-builtin-" + FuncName));
|
||||
}
|
||||
|
||||
if (!Args.hasFlag(options::OPT_fassume_sane_operator_new,
|
||||
options::OPT_fno_assume_sane_operator_new))
|
||||
CmdArgs.push_back("-fno-assume-sane-operator-new");
|
||||
@ -6179,6 +6208,11 @@ void gcc::Compiler::RenderExtraToolArgs(const JobAction &JA,
|
||||
case types::TY_LTO_BC:
|
||||
CmdArgs.push_back("-c");
|
||||
break;
|
||||
// We assume we've got an "integrated" assembler in that gcc will produce an
|
||||
// object file itself.
|
||||
case types::TY_Object:
|
||||
CmdArgs.push_back("-c");
|
||||
break;
|
||||
case types::TY_PP_Asm:
|
||||
CmdArgs.push_back("-S");
|
||||
break;
|
||||
@ -9473,6 +9507,8 @@ void visualstudio::Linker::ConstructJob(Compilation &C, const JobAction &JA,
|
||||
A.renderAsInput(Args, CmdArgs);
|
||||
}
|
||||
|
||||
TC.addProfileRTLibs(Args, CmdArgs);
|
||||
|
||||
// We need to special case some linker paths. In the case of lld, we need to
|
||||
// translate 'lld' into 'lld-link', and in the case of the regular msvc
|
||||
// linker, we need to use a special search algorithm.
|
||||
|
@ -149,6 +149,10 @@ class LLVM_LIBRARY_VISIBILITY Common : public GnuTool {
|
||||
Common(const char *Name, const char *ShortName, const ToolChain &TC)
|
||||
: GnuTool(Name, ShortName, TC) {}
|
||||
|
||||
// A gcc tool has an "integrated" assembler that it will call to produce an
|
||||
// object. Let it use that assembler so that we don't have to deal with
|
||||
// assembly syntax incompatibilities.
|
||||
bool hasIntegratedAssembler() const override { return true; }
|
||||
void ConstructJob(Compilation &C, const JobAction &JA,
|
||||
const InputInfo &Output, const InputInfoList &Inputs,
|
||||
const llvm::opt::ArgList &TCArgs,
|
||||
|
@ -38,6 +38,12 @@ static unsigned getLengthToMatchingParen(const FormatToken &Tok) {
|
||||
return End->TotalLength - Tok.TotalLength + 1;
|
||||
}
|
||||
|
||||
static unsigned getLengthToNextOperator(const FormatToken &Tok) {
|
||||
if (!Tok.NextOperator)
|
||||
return 0;
|
||||
return Tok.NextOperator->TotalLength - Tok.TotalLength;
|
||||
}
|
||||
|
||||
// Returns \c true if \c Tok is the "." or "->" of a call and starts the next
|
||||
// segment of a builder type call.
|
||||
static bool startsSegmentOfBuilderTypeCall(const FormatToken &Tok) {
|
||||
@ -153,7 +159,8 @@ bool ContinuationIndenter::mustBreak(const LineState &State) {
|
||||
!Current.isOneOf(tok::r_paren, tok::r_brace))
|
||||
return true;
|
||||
if (((Previous.is(TT_DictLiteral) && Previous.is(tok::l_brace)) ||
|
||||
Previous.is(TT_ArrayInitializerLSquare)) &&
|
||||
(Previous.is(TT_ArrayInitializerLSquare) &&
|
||||
Previous.ParameterCount > 1)) &&
|
||||
Style.ColumnLimit > 0 &&
|
||||
getLengthToMatchingParen(Previous) + State.Column - 1 >
|
||||
getColumnLimit(State))
|
||||
@ -170,9 +177,13 @@ bool ContinuationIndenter::mustBreak(const LineState &State) {
|
||||
return true;
|
||||
|
||||
unsigned NewLineColumn = getNewLineColumn(State);
|
||||
if (State.Column < NewLineColumn)
|
||||
if (State.Column <= NewLineColumn)
|
||||
return false;
|
||||
|
||||
if (Current.isMemberAccess() &&
|
||||
State.Column + getLengthToNextOperator(Current) > Style.ColumnLimit)
|
||||
return true;
|
||||
|
||||
if (Style.AlwaysBreakBeforeMultilineStrings &&
|
||||
(NewLineColumn == State.FirstIndent + Style.ContinuationIndentWidth ||
|
||||
Previous.is(tok::comma) || Current.NestingLevel < 2) &&
|
||||
@ -246,8 +257,10 @@ bool ContinuationIndenter::mustBreak(const LineState &State) {
|
||||
Previous.is(tok::l_brace) && !Current.isOneOf(tok::r_brace, tok::comment))
|
||||
return true;
|
||||
|
||||
if (Current.is(tok::lessless) && Previous.is(tok::identifier) &&
|
||||
Previous.TokenText == "endl")
|
||||
if (Current.is(tok::lessless) &&
|
||||
((Previous.is(tok::identifier) && Previous.TokenText == "endl") ||
|
||||
(Previous.Tok.isLiteral() && (Previous.TokenText.endswith("\\n\"") ||
|
||||
Previous.TokenText == "\'\\n\'"))))
|
||||
return true;
|
||||
|
||||
return false;
|
||||
@ -316,16 +329,16 @@ void ContinuationIndenter::addTokenOnCurrentLine(LineState &State, bool DryRun,
|
||||
|
||||
if (Current.is(TT_SelectorName) &&
|
||||
!State.Stack.back().ObjCSelectorNameFound) {
|
||||
unsigned MinIndent =
|
||||
std::max(State.FirstIndent + Style.ContinuationIndentWidth,
|
||||
State.Stack.back().Indent);
|
||||
unsigned FirstColonPos = State.Column + Spaces + Current.ColumnWidth;
|
||||
if (Current.LongestObjCSelectorName == 0)
|
||||
State.Stack.back().AlignColons = false;
|
||||
else if (State.Stack.back().Indent + Current.LongestObjCSelectorName >
|
||||
State.Column + Spaces + Current.ColumnWidth)
|
||||
State.Stack.back().ColonPos =
|
||||
std::max(State.FirstIndent + Style.ContinuationIndentWidth,
|
||||
State.Stack.back().Indent) +
|
||||
Current.LongestObjCSelectorName;
|
||||
else if (MinIndent + Current.LongestObjCSelectorName > FirstColonPos)
|
||||
State.Stack.back().ColonPos = MinIndent + Current.LongestObjCSelectorName;
|
||||
else
|
||||
State.Stack.back().ColonPos = State.Column + Spaces + Current.ColumnWidth;
|
||||
State.Stack.back().ColonPos = FirstColonPos;
|
||||
}
|
||||
|
||||
// In "AlwaysBreak" mode, enforce wrapping directly after the parenthesis by
|
||||
@ -377,7 +390,7 @@ void ContinuationIndenter::addTokenOnCurrentLine(LineState &State, bool DryRun,
|
||||
TT_CtorInitializerColon)) &&
|
||||
((Previous.getPrecedence() != prec::Assignment &&
|
||||
(Previous.isNot(tok::lessless) || Previous.OperatorIndex != 0 ||
|
||||
!Previous.LastOperator)) ||
|
||||
Previous.NextOperator)) ||
|
||||
Current.StartsBinaryExpression)) {
|
||||
// Always indent relative to the RHS of the expression unless this is a
|
||||
// simple assignment without binary expression on the RHS. Also indent
|
||||
@ -692,7 +705,7 @@ unsigned ContinuationIndenter::moveStateToNextToken(LineState &State,
|
||||
std::min(State.LowestLevelOnLine, Current.NestingLevel);
|
||||
if (Current.isMemberAccess())
|
||||
State.Stack.back().StartOfFunctionCall =
|
||||
Current.LastOperator ? 0 : State.Column;
|
||||
!Current.NextOperator ? 0 : State.Column;
|
||||
if (Current.is(TT_SelectorName)) {
|
||||
State.Stack.back().ObjCSelectorNameFound = true;
|
||||
if (Style.IndentWrappedFunctionNames) {
|
||||
@ -728,7 +741,7 @@ unsigned ContinuationIndenter::moveStateToNextToken(LineState &State,
|
||||
// }, a, b, c);
|
||||
if (Current.isNot(tok::comment) && Previous &&
|
||||
Previous->isOneOf(tok::l_brace, TT_ArrayInitializerLSquare) &&
|
||||
State.Stack.size() > 1) {
|
||||
!Previous->is(TT_DictLiteral) && State.Stack.size() > 1) {
|
||||
if (State.Stack[State.Stack.size() - 2].NestedBlockInlined && Newline)
|
||||
for (unsigned i = 0, e = State.Stack.size() - 1; i != e; ++i)
|
||||
State.Stack[i].NoLineBreak = true;
|
||||
|
@ -218,10 +218,12 @@ void CommaSeparatedList::precomputeFormattingInfos(const FormatToken *Token) {
|
||||
ItemBegin = ItemEnd->Next;
|
||||
}
|
||||
|
||||
// Don't use column layout for nested lists, lists with few elements and in
|
||||
// presence of separating comments.
|
||||
if ((Token->NestingLevel != 0 && Token->is(tok::l_brace)) ||
|
||||
Commas.size() < 5 || HasSeparatingComment)
|
||||
// Don't use column layout for lists with few elements and in presence of
|
||||
// separating comments.
|
||||
if (Commas.size() < 5 || HasSeparatingComment)
|
||||
return;
|
||||
|
||||
if (Token->NestingLevel != 0 && Token->is(tok::l_brace) && Commas.size() < 19)
|
||||
return;
|
||||
|
||||
// We can never place more than ColumnLimit / 3 items in a row (because of the
|
||||
|
@ -248,9 +248,9 @@ struct FormatToken {
|
||||
/// with the same precedence, contains the 0-based operator index.
|
||||
unsigned OperatorIndex = 0;
|
||||
|
||||
/// \brief Is this the last operator (or "."/"->") in a sequence of operators
|
||||
/// with the same precedence?
|
||||
bool LastOperator = false;
|
||||
/// \brief If this is an operator (or "."/"->") in a sequence of operators
|
||||
/// with the same precedence, points to the next operator.
|
||||
FormatToken *NextOperator = nullptr;
|
||||
|
||||
/// \brief Is this token part of a \c DeclStmt defining multiple variables?
|
||||
///
|
||||
|
@ -285,10 +285,13 @@ class AnnotatingParser {
|
||||
Contexts.back().ContextKind == tok::l_brace &&
|
||||
Parent->isOneOf(tok::l_brace, tok::comma)) {
|
||||
Left->Type = TT_JsComputedPropertyName;
|
||||
} else if (Parent &&
|
||||
Parent->isOneOf(tok::at, tok::equal, tok::comma, tok::l_paren,
|
||||
tok::l_square, tok::question, tok::colon,
|
||||
tok::kw_return)) {
|
||||
} else if (Style.Language == FormatStyle::LK_Proto ||
|
||||
(Parent &&
|
||||
Parent->isOneOf(TT_BinaryOperator, tok::at, tok::comma,
|
||||
tok::l_paren, tok::l_square, tok::question,
|
||||
tok::colon, tok::kw_return,
|
||||
// Should only be relevant to JavaScript:
|
||||
tok::kw_default))) {
|
||||
Left->Type = TT_ArrayInitializerLSquare;
|
||||
} else {
|
||||
BindingIncrease = 10;
|
||||
@ -458,16 +461,16 @@ class AnnotatingParser {
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (Contexts.back().ColonIsDictLiteral) {
|
||||
if (Contexts.back().ColonIsDictLiteral ||
|
||||
Style.Language == FormatStyle::LK_Proto) {
|
||||
Tok->Type = TT_DictLiteral;
|
||||
} else if (Contexts.back().ColonIsObjCMethodExpr ||
|
||||
Line.startsWith(TT_ObjCMethodSpecifier)) {
|
||||
Tok->Type = TT_ObjCMethodExpr;
|
||||
Tok->Previous->Type = TT_SelectorName;
|
||||
if (Tok->Previous->ColumnWidth >
|
||||
Contexts.back().LongestObjCSelectorName) {
|
||||
Contexts.back().LongestObjCSelectorName)
|
||||
Contexts.back().LongestObjCSelectorName = Tok->Previous->ColumnWidth;
|
||||
}
|
||||
if (!Contexts.back().FirstObjCSelectorName)
|
||||
Contexts.back().FirstObjCSelectorName = Tok->Previous;
|
||||
} else if (Contexts.back().ColonIsForRangeExpr) {
|
||||
@ -1327,6 +1330,8 @@ class ExpressionParser {
|
||||
} else {
|
||||
// Operator found.
|
||||
if (CurrentPrecedence == Precedence) {
|
||||
if (LatestOperator)
|
||||
LatestOperator->NextOperator = Current;
|
||||
LatestOperator = Current;
|
||||
Current->OperatorIndex = OperatorIndex;
|
||||
++OperatorIndex;
|
||||
@ -1336,7 +1341,7 @@ class ExpressionParser {
|
||||
}
|
||||
|
||||
if (LatestOperator && (Current || Precedence > 0)) {
|
||||
LatestOperator->LastOperator = true;
|
||||
// LatestOperator->LastOperator = true;
|
||||
if (Precedence == PrecedenceArrowAndPeriod) {
|
||||
// Call expressions don't have a binary operator precedence.
|
||||
addFakeParenthesis(Start, prec::Unknown);
|
||||
@ -1715,11 +1720,13 @@ unsigned TokenAnnotator::splitPenalty(const AnnotatedLine &Line,
|
||||
Right.Next->is(TT_DictLiteral)))
|
||||
return 1;
|
||||
if (Right.is(tok::l_square)) {
|
||||
if (Style.Language == FormatStyle::LK_Proto || Left.is(tok::r_square))
|
||||
if (Style.Language == FormatStyle::LK_Proto)
|
||||
return 1;
|
||||
if (Left.is(tok::r_square))
|
||||
return 25;
|
||||
// Slightly prefer formatting local lambda definitions like functions.
|
||||
if (Right.is(TT_LambdaLSquare) && Left.is(tok::equal))
|
||||
return 50;
|
||||
return 35;
|
||||
if (!Right.isOneOf(TT_ObjCMethodExpr, TT_LambdaLSquare,
|
||||
TT_ArrayInitializerLSquare))
|
||||
return 500;
|
||||
@ -1766,7 +1773,15 @@ unsigned TokenAnnotator::splitPenalty(const AnnotatedLine &Line,
|
||||
// which might otherwise be blown up onto many lines. Here, clang-format
|
||||
// won't produce "hanging" indents anyway as there is no other trailing
|
||||
// call.
|
||||
return Right.LastOperator ? 150 : 40;
|
||||
//
|
||||
// Also apply higher penalty is not a call as that might lead to a wrapping
|
||||
// like:
|
||||
//
|
||||
// aaaaaaa
|
||||
// .aaaaaaaaa.bbbbbbbb(cccccccc);
|
||||
return !Right.NextOperator || !Right.NextOperator->Previous->closesScope()
|
||||
? 150
|
||||
: 35;
|
||||
}
|
||||
|
||||
if (Right.is(TT_TrailingAnnotation) &&
|
||||
@ -1818,7 +1833,7 @@ unsigned TokenAnnotator::splitPenalty(const AnnotatedLine &Line,
|
||||
|
||||
if (Right.is(tok::lessless)) {
|
||||
if (Left.is(tok::string_literal) &&
|
||||
(!Right.LastOperator || Right.OperatorIndex != 1)) {
|
||||
(Right.NextOperator || Right.OperatorIndex != 1)) {
|
||||
StringRef Content = Left.TokenText;
|
||||
if (Content.startswith("\""))
|
||||
Content = Content.drop_front(1);
|
||||
@ -1875,6 +1890,8 @@ bool TokenAnnotator::spaceRequiredBetween(const AnnotatedLine &Line,
|
||||
tok::numeric_constant, tok::l_paren, tok::l_brace,
|
||||
tok::kw_true, tok::kw_false))
|
||||
return false;
|
||||
if (Left.is(tok::colon))
|
||||
return !Left.is(TT_ObjCMethodExpr);
|
||||
if (Left.is(tok::coloncolon))
|
||||
return false;
|
||||
if (Left.is(tok::less) || Right.isOneOf(tok::greater, tok::less))
|
||||
@ -1925,8 +1942,6 @@ bool TokenAnnotator::spaceRequiredBetween(const AnnotatedLine &Line,
|
||||
!Right.isOneOf(TT_ObjCMethodExpr, TT_LambdaLSquare) &&
|
||||
!Left.isOneOf(tok::numeric_constant, TT_DictLiteral))
|
||||
return false;
|
||||
if (Left.is(tok::colon))
|
||||
return !Left.is(TT_ObjCMethodExpr);
|
||||
if (Left.is(tok::l_brace) && Right.is(tok::r_brace))
|
||||
return !Left.Children.empty(); // No spaces in "{}".
|
||||
if ((Left.is(tok::l_brace) && Left.BlockKind != BK_Block) ||
|
||||
@ -1996,6 +2011,9 @@ bool TokenAnnotator::spaceRequiredBefore(const AnnotatedLine &Line,
|
||||
if (Left.isOneOf(Keywords.kw_let, Keywords.kw_var, TT_JsFatArrow,
|
||||
Keywords.kw_in))
|
||||
return true;
|
||||
if (Left.is(tok::kw_default) && Left.Previous &&
|
||||
Left.Previous->is(tok::kw_export))
|
||||
return true;
|
||||
if (Left.is(Keywords.kw_is) && Right.is(tok::l_brace))
|
||||
return true;
|
||||
if (Right.isOneOf(TT_JsTypeColon, TT_JsTypeOptionalQuestion))
|
||||
|
@ -21,4 +21,12 @@ CodeGenOptions::CodeGenOptions() {
|
||||
memcpy(CoverageVersion, "402*", 4);
|
||||
}
|
||||
|
||||
bool CodeGenOptions::isNoBuiltinFunc(const char *Name) const {
|
||||
StringRef FuncName(Name);
|
||||
for (unsigned i = 0, e = NoBuiltinFuncs.size(); i != e; ++i)
|
||||
if (FuncName.equals(NoBuiltinFuncs[i]))
|
||||
return true;
|
||||
return false;
|
||||
}
|
||||
|
||||
} // end namespace clang
|
||||
|
@ -8,13 +8,14 @@
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#include "TestModuleFileExtension.h"
|
||||
#include "clang/Frontend/CompilerInvocation.h"
|
||||
#include "clang/Basic/Builtins.h"
|
||||
#include "clang/Basic/FileManager.h"
|
||||
#include "clang/Basic/Version.h"
|
||||
#include "clang/Config/config.h"
|
||||
#include "clang/Driver/DriverDiagnostic.h"
|
||||
#include "clang/Driver/Options.h"
|
||||
#include "clang/Driver/Util.h"
|
||||
#include "clang/Frontend/CompilerInvocation.h"
|
||||
#include "clang/Frontend/FrontendDiagnostic.h"
|
||||
#include "clang/Frontend/LangStandard.h"
|
||||
#include "clang/Frontend/Utils.h"
|
||||
@ -135,6 +136,20 @@ static void addDiagnosticArgs(ArgList &Args, OptSpecifier Group,
|
||||
}
|
||||
}
|
||||
|
||||
static void getAllNoBuiltinFuncValues(ArgList &Args,
|
||||
std::vector<std::string> &Funcs) {
|
||||
SmallVector<const char *, 8> Values;
|
||||
for (const auto &Arg : Args) {
|
||||
const Option &O = Arg->getOption();
|
||||
if (O.matches(options::OPT_fno_builtin_)) {
|
||||
const char *FuncName = Arg->getValue();
|
||||
if (Builtin::Context::isBuiltinFunc(FuncName))
|
||||
Values.push_back(FuncName);
|
||||
}
|
||||
}
|
||||
Funcs.insert(Funcs.end(), Values.begin(), Values.end());
|
||||
}
|
||||
|
||||
static bool ParseAnalyzerArgs(AnalyzerOptions &Opts, ArgList &Args,
|
||||
DiagnosticsEngine &Diags) {
|
||||
using namespace options;
|
||||
@ -399,18 +414,29 @@ static bool ParseCodeGenArgs(CodeGenOptions &Opts, ArgList &Args, InputKind IK,
|
||||
}
|
||||
|
||||
if (Arg *A = Args.getLastArg(OPT_debug_info_kind_EQ)) {
|
||||
Opts.setDebugInfo(
|
||||
llvm::StringSwitch<CodeGenOptions::DebugInfoKind>(A->getValue())
|
||||
unsigned Val =
|
||||
llvm::StringSwitch<unsigned>(A->getValue())
|
||||
.Case("line-tables-only", CodeGenOptions::DebugLineTablesOnly)
|
||||
.Case("limited", CodeGenOptions::LimitedDebugInfo)
|
||||
.Case("standalone", CodeGenOptions::FullDebugInfo));
|
||||
.Case("standalone", CodeGenOptions::FullDebugInfo)
|
||||
.Default(~0U);
|
||||
if (Val == ~0U)
|
||||
Diags.Report(diag::err_drv_invalid_value) << A->getAsString(Args)
|
||||
<< A->getValue();
|
||||
else
|
||||
Opts.setDebugInfo(static_cast<CodeGenOptions::DebugInfoKind>(Val));
|
||||
}
|
||||
if (Arg *A = Args.getLastArg(OPT_debugger_tuning_EQ)) {
|
||||
Opts.setDebuggerTuning(
|
||||
llvm::StringSwitch<CodeGenOptions::DebuggerKind>(A->getValue())
|
||||
.Case("gdb", CodeGenOptions::DebuggerKindGDB)
|
||||
.Case("lldb", CodeGenOptions::DebuggerKindLLDB)
|
||||
.Case("sce", CodeGenOptions::DebuggerKindSCE));
|
||||
unsigned Val = llvm::StringSwitch<unsigned>(A->getValue())
|
||||
.Case("gdb", CodeGenOptions::DebuggerKindGDB)
|
||||
.Case("lldb", CodeGenOptions::DebuggerKindLLDB)
|
||||
.Case("sce", CodeGenOptions::DebuggerKindSCE)
|
||||
.Default(~0U);
|
||||
if (Val == ~0U)
|
||||
Diags.Report(diag::err_drv_invalid_value) << A->getAsString(Args)
|
||||
<< A->getValue();
|
||||
else
|
||||
Opts.setDebuggerTuning(static_cast<CodeGenOptions::DebuggerKind>(Val));
|
||||
}
|
||||
Opts.DwarfVersion = getLastArgIntValue(Args, OPT_dwarf_version_EQ, 0, Diags);
|
||||
Opts.DebugColumnInfo = Args.hasArg(OPT_dwarf_column_info);
|
||||
@ -441,6 +467,8 @@ static bool ParseCodeGenArgs(CodeGenOptions &Opts, ArgList &Args, InputKind IK,
|
||||
Opts.OptimizeSize = getOptimizationLevelSize(Args);
|
||||
Opts.SimplifyLibCalls = !(Args.hasArg(OPT_fno_builtin) ||
|
||||
Args.hasArg(OPT_ffreestanding));
|
||||
if (Opts.SimplifyLibCalls)
|
||||
getAllNoBuiltinFuncValues(Args, Opts.NoBuiltinFuncs);
|
||||
Opts.UnrollLoops =
|
||||
Args.hasFlag(OPT_funroll_loops, OPT_fno_unroll_loops,
|
||||
(Opts.OptimizationLevel > 1 && !Opts.OptimizeSize));
|
||||
@ -1658,6 +1686,8 @@ static void ParseLangArgs(LangOptions &Opts, ArgList &Args, InputKind IK,
|
||||
Opts.ShortEnums = Args.hasArg(OPT_fshort_enums);
|
||||
Opts.Freestanding = Args.hasArg(OPT_ffreestanding);
|
||||
Opts.NoBuiltin = Args.hasArg(OPT_fno_builtin) || Opts.Freestanding;
|
||||
if (!Opts.NoBuiltin)
|
||||
getAllNoBuiltinFuncValues(Args, Opts.NoBuiltinFuncs);
|
||||
Opts.NoMathBuiltin = Args.hasArg(OPT_fno_math_builtin);
|
||||
Opts.AssumeSaneOperatorNew = !Args.hasArg(OPT_fno_assume_sane_operator_new);
|
||||
Opts.SizedDeallocation = Args.hasArg(OPT_fsized_deallocation);
|
||||
@ -1784,6 +1814,30 @@ static void ParseLangArgs(LangOptions &Opts, ArgList &Args, InputKind IK,
|
||||
Opts.OpenMP = Args.hasArg(options::OPT_fopenmp);
|
||||
Opts.OpenMPUseTLS =
|
||||
Opts.OpenMP && !Args.hasArg(options::OPT_fnoopenmp_use_tls);
|
||||
Opts.OpenMPIsDevice =
|
||||
Opts.OpenMP && Args.hasArg(options::OPT_fopenmp_is_device);
|
||||
|
||||
// Get the OpenMP target triples if any.
|
||||
if (Arg *A = Args.getLastArg(options::OPT_omptargets_EQ)) {
|
||||
|
||||
for (unsigned i = 0; i < A->getNumValues(); ++i) {
|
||||
llvm::Triple TT(A->getValue(i));
|
||||
|
||||
if (TT.getArch() == llvm::Triple::UnknownArch)
|
||||
Diags.Report(clang::diag::err_drv_invalid_omp_target) << A->getValue(i);
|
||||
else
|
||||
Opts.OMPTargetTriples.push_back(TT);
|
||||
}
|
||||
}
|
||||
|
||||
// Get OpenMP host file path if any and report if a non existent file is
|
||||
// found
|
||||
if (Arg *A = Args.getLastArg(options::OPT_omp_host_ir_file_path)) {
|
||||
Opts.OMPHostIRFile = A->getValue();
|
||||
if (!llvm::sys::fs::exists(Opts.OMPHostIRFile))
|
||||
Diags.Report(clang::diag::err_drv_omp_host_ir_file_not_found)
|
||||
<< Opts.OMPHostIRFile;
|
||||
}
|
||||
|
||||
// Record whether the __DEPRECATED define was requested.
|
||||
Opts.Deprecated = Args.hasFlag(OPT_fdeprecated_macro,
|
||||
|
@ -12,6 +12,7 @@ set(files
|
||||
avx512vlintrin.h
|
||||
avx512dqintrin.h
|
||||
avx512vldqintrin.h
|
||||
pkuintrin.h
|
||||
avxintrin.h
|
||||
bmi2intrin.h
|
||||
bmiintrin.h
|
||||
|
@ -32,50 +32,26 @@
|
||||
static __inline__ unsigned long long __attribute__((__always_inline__, __nodebug__))
|
||||
__readeflags(void)
|
||||
{
|
||||
unsigned long long __res = 0;
|
||||
__asm__ __volatile__ ("pushf\n\t"
|
||||
"popq %0\n"
|
||||
:"=r"(__res)
|
||||
:
|
||||
:
|
||||
);
|
||||
return __res;
|
||||
return __builtin_ia32_readeflags_u64();
|
||||
}
|
||||
|
||||
static __inline__ void __attribute__((__always_inline__, __nodebug__))
|
||||
__writeeflags(unsigned long long __f)
|
||||
{
|
||||
__asm__ __volatile__ ("pushq %0\n\t"
|
||||
"popf\n"
|
||||
:
|
||||
:"r"(__f)
|
||||
:"flags"
|
||||
);
|
||||
__builtin_ia32_writeeflags_u64(__f);
|
||||
}
|
||||
|
||||
#else /* !__x86_64__ */
|
||||
static __inline__ unsigned int __attribute__((__always_inline__, __nodebug__))
|
||||
__readeflags(void)
|
||||
{
|
||||
unsigned int __res = 0;
|
||||
__asm__ __volatile__ ("pushf\n\t"
|
||||
"popl %0\n"
|
||||
:"=r"(__res)
|
||||
:
|
||||
:
|
||||
);
|
||||
return __res;
|
||||
return __builtin_ia32_readeflags_u32();
|
||||
}
|
||||
|
||||
static __inline__ void __attribute__((__always_inline__, __nodebug__))
|
||||
__writeeflags(unsigned int __f)
|
||||
{
|
||||
__asm__ __volatile__ ("pushl %0\n\t"
|
||||
"popf\n"
|
||||
:
|
||||
:"r"(__f)
|
||||
:"flags"
|
||||
);
|
||||
__builtin_ia32_writeeflags_u32(__f);
|
||||
}
|
||||
#endif /* !__x86_64__ */
|
||||
|
||||
|
@ -79,6 +79,8 @@ _mm256_cvtph_ps(__m128i __a)
|
||||
|
||||
#include <avx512erintrin.h>
|
||||
|
||||
#include <pkuintrin.h>
|
||||
|
||||
static __inline__ int __attribute__((__always_inline__, __nodebug__, __target__("rdrnd")))
|
||||
_rdrand16_step(unsigned short *__p)
|
||||
{
|
||||
|
48
lib/Headers/pkuintrin.h
Normal file
48
lib/Headers/pkuintrin.h
Normal file
@ -0,0 +1,48 @@
|
||||
/*===------------- pkuintrin.h - PKU intrinsics ------------------===
|
||||
*
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
* of this software and associated documentation files (the "Software"), to deal
|
||||
* in the Software without restriction, including without limitation the rights
|
||||
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
* copies of the Software, and to permit persons to whom the Software is
|
||||
* furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
* THE SOFTWARE.
|
||||
*
|
||||
*===-----------------------------------------------------------------------===
|
||||
*/
|
||||
#ifndef __IMMINTRIN_H
|
||||
#error "Never use <pkuintrin.h> directly; include <immintrin.h> instead."
|
||||
#endif
|
||||
|
||||
#ifndef __PKUINTRIN_H
|
||||
#define __PKUINTRIN_H
|
||||
|
||||
/* Define the default attributes for the functions in this file. */
|
||||
#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("pku")))
|
||||
|
||||
static __inline__ unsigned int __DEFAULT_FN_ATTRS
|
||||
_rdpkru_u32(void)
|
||||
{
|
||||
return __builtin_ia32_rdpkru();
|
||||
}
|
||||
|
||||
static __inline__ void __DEFAULT_FN_ATTRS
|
||||
_wrpkru(unsigned int val)
|
||||
{
|
||||
return __builtin_ia32_wrpkru(val);
|
||||
}
|
||||
|
||||
#undef __DEFAULT_FN_ATTRS
|
||||
|
||||
#endif
|
@ -1334,8 +1334,23 @@ ExprResult Parser::ParseCastExpression(bool isUnaryExpression,
|
||||
return ExprError();
|
||||
}
|
||||
|
||||
// Check to see whether Res is a function designator only. If it is and we
|
||||
// are compiling for OpenCL, we need to return an error as this implies
|
||||
// that the address of the function is being taken, which is illegal in CL.
|
||||
|
||||
// These can be followed by postfix-expr pieces.
|
||||
return ParsePostfixExpressionSuffix(Res);
|
||||
Res = ParsePostfixExpressionSuffix(Res);
|
||||
if (getLangOpts().OpenCL)
|
||||
if (Expr *PostfixExpr = Res.get()) {
|
||||
QualType Ty = PostfixExpr->getType();
|
||||
if (!Ty.isNull() && Ty->isFunctionType()) {
|
||||
Diag(PostfixExpr->getExprLoc(),
|
||||
diag::err_opencl_taking_function_address_parser);
|
||||
return ExprError();
|
||||
}
|
||||
}
|
||||
|
||||
return Res;
|
||||
}
|
||||
|
||||
/// \brief Once the leading part of a postfix-expression is parsed, this
|
||||
|
@ -241,9 +241,8 @@ ExprResult Parser::ParseMSAsmIdentifier(llvm::SmallVectorImpl<Token> &LineToks,
|
||||
ConsumeToken(); // Consume the period.
|
||||
IdentifierInfo *Id = Tok.getIdentifierInfo();
|
||||
ConsumeToken(); // Consume the identifier.
|
||||
unsigned OffsetUnused;
|
||||
Result = Actions.LookupInlineAsmVarDeclField(
|
||||
Result.get(), Id->getName(), OffsetUnused, Info, Tok.getLocation());
|
||||
Result = Actions.LookupInlineAsmVarDeclField(Result.get(), Id->getName(),
|
||||
Info, Tok.getLocation());
|
||||
}
|
||||
|
||||
// Figure out how many tokens we are into LineToks.
|
||||
|
@ -6983,7 +6983,7 @@ void DiagnoseFloatingLiteralImpCast(Sema &S, FloatingLiteral *FL, QualType T,
|
||||
|
||||
SmallString<16> PrettyTargetValue;
|
||||
if (T->isSpecificBuiltinType(BuiltinType::Bool))
|
||||
PrettyTargetValue = IntegerValue == 0 ? "false" : "true";
|
||||
PrettyTargetValue = Value.isZero() ? "false" : "true";
|
||||
else
|
||||
IntegerValue.toString(PrettyTargetValue);
|
||||
|
||||
|
@ -10911,12 +10911,8 @@ Decl *Sema::ActOnStartOfFunctionDef(Scope *FnBodyScope, Decl *D,
|
||||
// from the translation unit and reattach to the current context.
|
||||
if (D->getLexicalDeclContext() == Context.getTranslationUnitDecl()) {
|
||||
// Is the decl actually in the context?
|
||||
for (const auto *DI : Context.getTranslationUnitDecl()->decls()) {
|
||||
if (DI == D) {
|
||||
Context.getTranslationUnitDecl()->removeDecl(D);
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (Context.getTranslationUnitDecl()->containsDecl(D))
|
||||
Context.getTranslationUnitDecl()->removeDecl(D);
|
||||
// Either way, reassign the lexical decl context to our FunctionDecl.
|
||||
D->setLexicalDeclContext(CurContext);
|
||||
}
|
||||
@ -12281,16 +12277,35 @@ Decl *Sema::ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK,
|
||||
if (!Invalid) {
|
||||
// If this is a use, just return the declaration we found, unless
|
||||
// we have attributes.
|
||||
|
||||
// FIXME: In the future, return a variant or some other clue
|
||||
// for the consumer of this Decl to know it doesn't own it.
|
||||
// For our current ASTs this shouldn't be a problem, but will
|
||||
// need to be changed with DeclGroups.
|
||||
if (!Attr &&
|
||||
((TUK == TUK_Reference &&
|
||||
(!PrevTagDecl->getFriendObjectKind() || getLangOpts().MicrosoftExt))
|
||||
|| TUK == TUK_Friend))
|
||||
return PrevTagDecl;
|
||||
if (TUK == TUK_Reference || TUK == TUK_Friend) {
|
||||
if (Attr) {
|
||||
// FIXME: Diagnose these attributes. For now, we create a new
|
||||
// declaration to hold them.
|
||||
} else if (TUK == TUK_Reference &&
|
||||
(PrevTagDecl->getFriendObjectKind() ==
|
||||
Decl::FOK_Undeclared ||
|
||||
getOwningModule(PrevDecl) !=
|
||||
PP.getModuleContainingLocation(KWLoc)) &&
|
||||
SS.isEmpty()) {
|
||||
// This declaration is a reference to an existing entity, but
|
||||
// has different visibility from that entity: it either makes
|
||||
// a friend visible or it makes a type visible in a new module.
|
||||
// In either case, create a new declaration. We only do this if
|
||||
// the declaration would have meant the same thing if no prior
|
||||
// declaration were found, that is, if it was found in the same
|
||||
// scope where we would have injected a declaration.
|
||||
DeclContext *InjectedDC = CurContext;
|
||||
while (!InjectedDC->isFileContext() &&
|
||||
!InjectedDC->isFunctionOrMethod())
|
||||
InjectedDC = InjectedDC->getParent();
|
||||
if (!InjectedDC->getRedeclContext()->Equals(
|
||||
PrevDecl->getDeclContext()->getRedeclContext()))
|
||||
return PrevTagDecl;
|
||||
// This is in the injected scope, create a new declaration.
|
||||
} else {
|
||||
return PrevTagDecl;
|
||||
}
|
||||
}
|
||||
|
||||
// Diagnose attempts to redefine a tag.
|
||||
if (TUK == TUK_Definition) {
|
||||
|
@ -9470,6 +9470,10 @@ static void getDefaultArgExprsForConstructors(Sema &S, CXXRecordDecl *Class) {
|
||||
if (Class->getDescribedClassTemplate())
|
||||
return;
|
||||
|
||||
CallingConv ExpectedCallingConv = S.Context.getDefaultCallingConvention(
|
||||
/*IsVariadic=*/false, /*IsCXXMethod=*/true);
|
||||
|
||||
CXXConstructorDecl *LastExportedDefaultCtor = nullptr;
|
||||
for (Decl *Member : Class->decls()) {
|
||||
auto *CD = dyn_cast<CXXConstructorDecl>(Member);
|
||||
if (!CD) {
|
||||
@ -9481,7 +9485,25 @@ static void getDefaultArgExprsForConstructors(Sema &S, CXXRecordDecl *Class) {
|
||||
continue;
|
||||
}
|
||||
|
||||
for (unsigned I = 0, E = CD->getNumParams(); I != E; ++I) {
|
||||
CallingConv ActualCallingConv =
|
||||
CD->getType()->getAs<FunctionProtoType>()->getCallConv();
|
||||
|
||||
// Skip default constructors with typical calling conventions and no default
|
||||
// arguments.
|
||||
unsigned NumParams = CD->getNumParams();
|
||||
if (ExpectedCallingConv == ActualCallingConv && NumParams == 0)
|
||||
continue;
|
||||
|
||||
if (LastExportedDefaultCtor) {
|
||||
S.Diag(LastExportedDefaultCtor->getLocation(),
|
||||
diag::err_attribute_dll_ambiguous_default_ctor) << Class;
|
||||
S.Diag(CD->getLocation(), diag::note_entity_declared_at)
|
||||
<< CD->getDeclName();
|
||||
return;
|
||||
}
|
||||
LastExportedDefaultCtor = CD;
|
||||
|
||||
for (unsigned I = 0; I != NumParams; ++I) {
|
||||
// Skip any default arguments that we've already instantiated.
|
||||
if (S.Context.getDefaultArgExprForConstructor(CD, I))
|
||||
continue;
|
||||
|
@ -756,9 +756,9 @@ ExprResult Sema::BuildObjCSubscriptExpression(SourceLocation RB, Expr *BaseExpr,
|
||||
BaseExpr = Result.get();
|
||||
|
||||
// Build the pseudo-object expression.
|
||||
return ObjCSubscriptRefExpr::Create(Context, BaseExpr, IndexExpr,
|
||||
Context.PseudoObjectTy, getterMethod,
|
||||
setterMethod, RB);
|
||||
return new (Context) ObjCSubscriptRefExpr(
|
||||
BaseExpr, IndexExpr, Context.PseudoObjectTy, VK_LValue, OK_ObjCSubscript,
|
||||
getterMethod, setterMethod, RB);
|
||||
}
|
||||
|
||||
ExprResult Sema::BuildObjCArrayLiteral(SourceRange SR, MultiExprArg Elements) {
|
||||
|
@ -1680,6 +1680,13 @@ StmtResult Sema::ActOnOpenMPRegionEnd(StmtResult S,
|
||||
}
|
||||
ErrorFound = true;
|
||||
}
|
||||
if (isOpenMPWorksharingDirective(DSAStack->getCurrentDirective()) &&
|
||||
isOpenMPSimdDirective(DSAStack->getCurrentDirective()) && OC &&
|
||||
OC->getNumForLoops()) {
|
||||
Diag(OC->getLocStart(), diag::err_omp_ordered_simd)
|
||||
<< getOpenMPDirectiveName(DSAStack->getCurrentDirective());
|
||||
ErrorFound = true;
|
||||
}
|
||||
if (ErrorFound) {
|
||||
ActOnCapturedRegionError();
|
||||
return StmtError();
|
||||
|
@ -543,6 +543,12 @@ namespace {
|
||||
struct DFIParamWithArguments : DFIArguments {
|
||||
TemplateParameter Param;
|
||||
};
|
||||
// Structure used by DeductionFailureInfo to store template argument
|
||||
// information and the index of the problematic call argument.
|
||||
struct DFIDeducedMismatchArgs : DFIArguments {
|
||||
TemplateArgumentList *TemplateArgs;
|
||||
unsigned CallArgIndex;
|
||||
};
|
||||
}
|
||||
|
||||
/// \brief Convert from Sema's representation of template deduction information
|
||||
@ -554,13 +560,14 @@ clang::MakeDeductionFailureInfo(ASTContext &Context,
|
||||
DeductionFailureInfo Result;
|
||||
Result.Result = static_cast<unsigned>(TDK);
|
||||
Result.HasDiagnostic = false;
|
||||
Result.Data = nullptr;
|
||||
switch (TDK) {
|
||||
case Sema::TDK_Success:
|
||||
case Sema::TDK_Invalid:
|
||||
case Sema::TDK_InstantiationDepth:
|
||||
case Sema::TDK_TooManyArguments:
|
||||
case Sema::TDK_TooFewArguments:
|
||||
case Sema::TDK_MiscellaneousDeductionFailure:
|
||||
Result.Data = nullptr;
|
||||
break;
|
||||
|
||||
case Sema::TDK_Incomplete:
|
||||
@ -568,6 +575,17 @@ clang::MakeDeductionFailureInfo(ASTContext &Context,
|
||||
Result.Data = Info.Param.getOpaqueValue();
|
||||
break;
|
||||
|
||||
case Sema::TDK_DeducedMismatch: {
|
||||
// FIXME: Should allocate from normal heap so that we can free this later.
|
||||
auto *Saved = new (Context) DFIDeducedMismatchArgs;
|
||||
Saved->FirstArg = Info.FirstArg;
|
||||
Saved->SecondArg = Info.SecondArg;
|
||||
Saved->TemplateArgs = Info.take();
|
||||
Saved->CallArgIndex = Info.CallArgIndex;
|
||||
Result.Data = Saved;
|
||||
break;
|
||||
}
|
||||
|
||||
case Sema::TDK_NonDeducedMismatch: {
|
||||
// FIXME: Should allocate from normal heap so that we can free this later.
|
||||
DFIArguments *Saved = new (Context) DFIArguments;
|
||||
@ -601,9 +619,6 @@ clang::MakeDeductionFailureInfo(ASTContext &Context,
|
||||
case Sema::TDK_FailedOverloadResolution:
|
||||
Result.Data = Info.Expression;
|
||||
break;
|
||||
|
||||
case Sema::TDK_MiscellaneousDeductionFailure:
|
||||
break;
|
||||
}
|
||||
|
||||
return Result;
|
||||
@ -623,6 +638,7 @@ void DeductionFailureInfo::Destroy() {
|
||||
|
||||
case Sema::TDK_Inconsistent:
|
||||
case Sema::TDK_Underqualified:
|
||||
case Sema::TDK_DeducedMismatch:
|
||||
case Sema::TDK_NonDeducedMismatch:
|
||||
// FIXME: Destroy the data?
|
||||
Data = nullptr;
|
||||
@ -657,6 +673,7 @@ TemplateParameter DeductionFailureInfo::getTemplateParameter() {
|
||||
case Sema::TDK_TooManyArguments:
|
||||
case Sema::TDK_TooFewArguments:
|
||||
case Sema::TDK_SubstitutionFailure:
|
||||
case Sema::TDK_DeducedMismatch:
|
||||
case Sema::TDK_NonDeducedMismatch:
|
||||
case Sema::TDK_FailedOverloadResolution:
|
||||
return TemplateParameter();
|
||||
@ -692,6 +709,9 @@ TemplateArgumentList *DeductionFailureInfo::getTemplateArgumentList() {
|
||||
case Sema::TDK_FailedOverloadResolution:
|
||||
return nullptr;
|
||||
|
||||
case Sema::TDK_DeducedMismatch:
|
||||
return static_cast<DFIDeducedMismatchArgs*>(Data)->TemplateArgs;
|
||||
|
||||
case Sema::TDK_SubstitutionFailure:
|
||||
return static_cast<TemplateArgumentList*>(Data);
|
||||
|
||||
@ -718,6 +738,7 @@ const TemplateArgument *DeductionFailureInfo::getFirstArg() {
|
||||
|
||||
case Sema::TDK_Inconsistent:
|
||||
case Sema::TDK_Underqualified:
|
||||
case Sema::TDK_DeducedMismatch:
|
||||
case Sema::TDK_NonDeducedMismatch:
|
||||
return &static_cast<DFIArguments*>(Data)->FirstArg;
|
||||
|
||||
@ -744,6 +765,7 @@ const TemplateArgument *DeductionFailureInfo::getSecondArg() {
|
||||
|
||||
case Sema::TDK_Inconsistent:
|
||||
case Sema::TDK_Underqualified:
|
||||
case Sema::TDK_DeducedMismatch:
|
||||
case Sema::TDK_NonDeducedMismatch:
|
||||
return &static_cast<DFIArguments*>(Data)->SecondArg;
|
||||
|
||||
@ -763,6 +785,14 @@ Expr *DeductionFailureInfo::getExpr() {
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
llvm::Optional<unsigned> DeductionFailureInfo::getCallArgIndex() {
|
||||
if (static_cast<Sema::TemplateDeductionResult>(Result) ==
|
||||
Sema::TDK_DeducedMismatch)
|
||||
return static_cast<DFIDeducedMismatchArgs*>(Data)->CallArgIndex;
|
||||
|
||||
return llvm::None;
|
||||
}
|
||||
|
||||
void OverloadCandidateSet::destroyCandidates() {
|
||||
for (iterator i = begin(), e = end(); i != e; ++i) {
|
||||
for (unsigned ii = 0, ie = i->NumConversions; ii != ie; ++ii)
|
||||
@ -9397,6 +9427,23 @@ static void DiagnoseBadDeduction(Sema &S, Decl *Templated,
|
||||
return;
|
||||
}
|
||||
|
||||
case Sema::TDK_DeducedMismatch: {
|
||||
// Format the template argument list into the argument string.
|
||||
SmallString<128> TemplateArgString;
|
||||
if (TemplateArgumentList *Args =
|
||||
DeductionFailure.getTemplateArgumentList()) {
|
||||
TemplateArgString = " ";
|
||||
TemplateArgString += S.getTemplateArgumentBindingsText(
|
||||
getDescribedTemplate(Templated)->getTemplateParameters(), *Args);
|
||||
}
|
||||
|
||||
S.Diag(Templated->getLocation(), diag::note_ovl_candidate_deduced_mismatch)
|
||||
<< (*DeductionFailure.getCallArgIndex() + 1)
|
||||
<< *DeductionFailure.getFirstArg() << *DeductionFailure.getSecondArg()
|
||||
<< TemplateArgString;
|
||||
break;
|
||||
}
|
||||
|
||||
case Sema::TDK_NonDeducedMismatch: {
|
||||
// FIXME: Provide a source location to indicate what we couldn't match.
|
||||
TemplateArgument FirstTA = *DeductionFailure.getFirstArg();
|
||||
@ -9686,6 +9733,7 @@ static unsigned RankDeductionFailure(const DeductionFailureInfo &DFI) {
|
||||
return 2;
|
||||
|
||||
case Sema::TDK_SubstitutionFailure:
|
||||
case Sema::TDK_DeducedMismatch:
|
||||
case Sema::TDK_NonDeducedMismatch:
|
||||
case Sema::TDK_MiscellaneousDeductionFailure:
|
||||
return 3;
|
||||
|
@ -589,10 +589,8 @@ ExprResult Sema::LookupInlineAsmIdentifier(CXXScopeSpec &SS,
|
||||
|
||||
QualType T = Result.get()->getType();
|
||||
|
||||
// For now, reject dependent types.
|
||||
if (T->isDependentType()) {
|
||||
Diag(Id.getLocStart(), diag::err_asm_incomplete_type) << T;
|
||||
return ExprError();
|
||||
return Result;
|
||||
}
|
||||
|
||||
// Any sort of function type is fine.
|
||||
@ -674,12 +672,23 @@ bool Sema::LookupInlineAsmField(StringRef Base, StringRef Member,
|
||||
}
|
||||
|
||||
ExprResult
|
||||
Sema::LookupInlineAsmVarDeclField(Expr *E, StringRef Member, unsigned &Offset,
|
||||
Sema::LookupInlineAsmVarDeclField(Expr *E, StringRef Member,
|
||||
llvm::InlineAsmIdentifierInfo &Info,
|
||||
SourceLocation AsmLoc) {
|
||||
Info.clear();
|
||||
|
||||
const RecordType *RT = E->getType()->getAs<RecordType>();
|
||||
QualType T = E->getType();
|
||||
if (T->isDependentType()) {
|
||||
DeclarationNameInfo NameInfo;
|
||||
NameInfo.setLoc(AsmLoc);
|
||||
NameInfo.setName(&Context.Idents.get(Member));
|
||||
return CXXDependentScopeMemberExpr::Create(
|
||||
Context, E, T, /*IsArrow=*/false, AsmLoc, NestedNameSpecifierLoc(),
|
||||
SourceLocation(),
|
||||
/*FirstQualifierInScope=*/nullptr, NameInfo, /*TemplateArgs=*/nullptr);
|
||||
}
|
||||
|
||||
const RecordType *RT = T->getAs<RecordType>();
|
||||
// FIXME: Diagnose this as field access into a scalar type.
|
||||
if (!RT)
|
||||
return ExprResult();
|
||||
@ -697,9 +706,6 @@ Sema::LookupInlineAsmVarDeclField(Expr *E, StringRef Member, unsigned &Offset,
|
||||
if (!FD)
|
||||
return ExprResult();
|
||||
|
||||
Offset = (unsigned)Context.toCharUnitsFromBits(Context.getFieldOffset(FD))
|
||||
.getQuantity();
|
||||
|
||||
// Make an Expr to thread through OpDecl.
|
||||
ExprResult Result = BuildMemberReferenceExpr(
|
||||
E, E->getType(), AsmLoc, /*IsArrow=*/false, CXXScopeSpec(),
|
||||
|
@ -2954,8 +2954,12 @@ Sema::FinishTemplateArgumentDeduction(FunctionTemplateDecl *FunctionTemplate,
|
||||
continue;
|
||||
|
||||
QualType DeducedA = Specialization->getParamDecl(ParamIdx)->getType();
|
||||
if (CheckOriginalCallArgDeduction(*this, OriginalArg, DeducedA))
|
||||
return Sema::TDK_SubstitutionFailure;
|
||||
if (CheckOriginalCallArgDeduction(*this, OriginalArg, DeducedA)) {
|
||||
Info.FirstArg = TemplateArgument(DeducedA);
|
||||
Info.SecondArg = TemplateArgument(OriginalArg.OriginalArgType);
|
||||
Info.CallArgIndex = OriginalArg.ArgIdx;
|
||||
return TDK_DeducedMismatch;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -4887,19 +4891,23 @@ MarkUsedTemplateParameters(ASTContext &Ctx, QualType T,
|
||||
break;
|
||||
|
||||
case Type::DependentTemplateSpecialization: {
|
||||
// C++14 [temp.deduct.type]p5:
|
||||
// The non-deduced contexts are:
|
||||
// -- The nested-name-specifier of a type that was specified using a
|
||||
// qualified-id
|
||||
//
|
||||
// C++14 [temp.deduct.type]p6:
|
||||
// When a type name is specified in a way that includes a non-deduced
|
||||
// context, all of the types that comprise that type name are also
|
||||
// non-deduced.
|
||||
if (OnlyDeduced)
|
||||
break;
|
||||
|
||||
const DependentTemplateSpecializationType *Spec
|
||||
= cast<DependentTemplateSpecializationType>(T);
|
||||
if (!OnlyDeduced)
|
||||
MarkUsedTemplateParameters(Ctx, Spec->getQualifier(),
|
||||
OnlyDeduced, Depth, Used);
|
||||
|
||||
// C++0x [temp.deduct.type]p9:
|
||||
// If the template argument list of P contains a pack expansion that is not
|
||||
// the last template argument, the entire template argument list is a
|
||||
// non-deduced context.
|
||||
if (OnlyDeduced &&
|
||||
hasPackExpansionBeforeEnd(Spec->getArgs(), Spec->getNumArgs()))
|
||||
break;
|
||||
MarkUsedTemplateParameters(Ctx, Spec->getQualifier(),
|
||||
OnlyDeduced, Depth, Used);
|
||||
|
||||
for (unsigned I = 0, N = Spec->getNumArgs(); I != N; ++I)
|
||||
MarkUsedTemplateParameters(Ctx, Spec->getArg(I), OnlyDeduced, Depth,
|
||||
|
@ -4699,6 +4699,13 @@ bool ASTReader::ParseLanguageOptions(const RecordData &Record,
|
||||
}
|
||||
LangOpts.CommentOpts.ParseAllComments = Record[Idx++];
|
||||
|
||||
// OpenMP offloading options.
|
||||
for (unsigned N = Record[Idx++]; N; --N) {
|
||||
LangOpts.OMPTargetTriples.push_back(llvm::Triple(ReadString(Record, Idx)));
|
||||
}
|
||||
|
||||
LangOpts.OMPHostIRFile = ReadString(Record, Idx);
|
||||
|
||||
return Listener.ReadLanguageOptions(LangOpts, Complain,
|
||||
AllowCompatibleDifferences);
|
||||
}
|
||||
|
@ -987,8 +987,10 @@ void ASTStmtReader::VisitObjCDictionaryLiteral(ObjCDictionaryLiteral *E) {
|
||||
assert(NumElements == E->getNumElements() && "Wrong number of elements");
|
||||
bool HasPackExpansions = Record[Idx++];
|
||||
assert(HasPackExpansions == E->HasPackExpansions &&"Pack expansion mismatch");
|
||||
ObjCDictionaryLiteral::KeyValuePair *KeyValues = E->getKeyValues();
|
||||
ObjCDictionaryLiteral::ExpansionData *Expansions = E->getExpansionData();
|
||||
ObjCDictionaryLiteral::KeyValuePair *KeyValues =
|
||||
E->getTrailingObjects<ObjCDictionaryLiteral::KeyValuePair>();
|
||||
ObjCDictionaryLiteral::ExpansionData *Expansions =
|
||||
E->getTrailingObjects<ObjCDictionaryLiteral::ExpansionData>();
|
||||
for (unsigned I = 0; I != NumElements; ++I) {
|
||||
KeyValues[I].Key = Reader.ReadSubExpr();
|
||||
KeyValues[I].Value = Reader.ReadSubExpr();
|
||||
@ -1445,7 +1447,8 @@ void ASTStmtReader::VisitExprWithCleanups(ExprWithCleanups *E) {
|
||||
unsigned NumObjects = Record[Idx++];
|
||||
assert(NumObjects == E->getNumObjects());
|
||||
for (unsigned i = 0; i != NumObjects; ++i)
|
||||
E->getObjectsBuffer()[i] = ReadDeclAs<BlockDecl>(Record, Idx);
|
||||
E->getTrailingObjects<BlockDecl *>()[i] =
|
||||
ReadDeclAs<BlockDecl>(Record, Idx);
|
||||
|
||||
E->SubExpr = Reader.ReadSubExpr();
|
||||
}
|
||||
@ -1541,7 +1544,7 @@ void ASTStmtReader::VisitTypeTraitExpr(TypeTraitExpr *E) {
|
||||
E->Loc = Range.getBegin();
|
||||
E->RParenLoc = Range.getEnd();
|
||||
|
||||
TypeSourceInfo **Args = E->getTypeSourceInfos();
|
||||
TypeSourceInfo **Args = E->getTrailingObjects<TypeSourceInfo *>();
|
||||
for (unsigned I = 0, N = E->getNumArgs(); I != N; ++I)
|
||||
Args[I] = GetTypeSourceInfo(Record, Idx);
|
||||
}
|
||||
@ -1589,7 +1592,7 @@ void ASTStmtReader::VisitSizeOfPackExpr(SizeOfPackExpr *E) {
|
||||
E->Pack = Reader.ReadDeclAs<NamedDecl>(F, Record, Idx);
|
||||
if (E->isPartiallySubstituted()) {
|
||||
assert(E->Length == NumPartialArgs);
|
||||
for (auto *I = reinterpret_cast<TemplateArgument *>(E + 1),
|
||||
for (auto *I = E->getTrailingObjects<TemplateArgument>(),
|
||||
*E = I + NumPartialArgs;
|
||||
I != E; ++I)
|
||||
new (I) TemplateArgument(Reader.ReadTemplateArgument(F, Record, Idx));
|
||||
@ -1624,7 +1627,7 @@ void ASTStmtReader::VisitFunctionParmPackExpr(FunctionParmPackExpr *E) {
|
||||
E->NumParameters = Record[Idx++];
|
||||
E->ParamPack = ReadDeclAs<ParmVarDecl>(Record, Idx);
|
||||
E->NameLoc = ReadSourceLocation(Record, Idx);
|
||||
ParmVarDecl **Parms = reinterpret_cast<ParmVarDecl**>(E+1);
|
||||
ParmVarDecl **Parms = E->getTrailingObjects<ParmVarDecl *>();
|
||||
for (unsigned i = 0, n = E->NumParameters; i != n; ++i)
|
||||
Parms[i] = ReadDeclAs<ParmVarDecl>(Record, Idx);
|
||||
}
|
||||
|
@ -1323,6 +1323,13 @@ uint64_t ASTWriter::WriteControlBlock(Preprocessor &PP,
|
||||
}
|
||||
Record.push_back(LangOpts.CommentOpts.ParseAllComments);
|
||||
|
||||
// OpenMP offloading options.
|
||||
Record.push_back(LangOpts.OMPTargetTriples.size());
|
||||
for (auto &T : LangOpts.OMPTargetTriples)
|
||||
AddString(T.getTriple(), Record);
|
||||
|
||||
AddString(LangOpts.OMPHostIRFile, Record);
|
||||
|
||||
Stream.EmitRecord(LANGUAGE_OPTIONS, Record);
|
||||
|
||||
// Target options.
|
||||
|
@ -34,8 +34,7 @@ class DereferenceChecker
|
||||
mutable std::unique_ptr<BuiltinBug> BT_null;
|
||||
mutable std::unique_ptr<BuiltinBug> BT_undef;
|
||||
|
||||
void reportBug(ProgramStateRef State, const Stmt *S, CheckerContext &C,
|
||||
bool IsBind = false) const;
|
||||
void reportBug(ProgramStateRef State, const Stmt *S, CheckerContext &C) const;
|
||||
|
||||
public:
|
||||
void checkLocation(SVal location, bool isLoad, const Stmt* S,
|
||||
@ -89,8 +88,31 @@ DereferenceChecker::AddDerefSource(raw_ostream &os,
|
||||
}
|
||||
}
|
||||
|
||||
static const Expr *getDereferenceExpr(const Stmt *S, bool IsBind=false){
|
||||
const Expr *E = nullptr;
|
||||
|
||||
// Walk through lvalue casts to get the original expression
|
||||
// that syntactically caused the load.
|
||||
if (const Expr *expr = dyn_cast<Expr>(S))
|
||||
E = expr->IgnoreParenLValueCasts();
|
||||
|
||||
if (IsBind) {
|
||||
const VarDecl *VD;
|
||||
const Expr *Init;
|
||||
std::tie(VD, Init) = parseAssignment(S);
|
||||
if (VD && Init)
|
||||
E = Init;
|
||||
}
|
||||
return E;
|
||||
}
|
||||
|
||||
static bool suppressReport(const Expr *E) {
|
||||
// Do not report dereferences on memory in non-default address spaces.
|
||||
return E->getType().getQualifiers().hasAddressSpace();
|
||||
}
|
||||
|
||||
void DereferenceChecker::reportBug(ProgramStateRef State, const Stmt *S,
|
||||
CheckerContext &C, bool IsBind) const {
|
||||
CheckerContext &C) const {
|
||||
// Generate an error node.
|
||||
ExplodedNode *N = C.generateErrorNode(State);
|
||||
if (!N)
|
||||
@ -106,19 +128,6 @@ void DereferenceChecker::reportBug(ProgramStateRef State, const Stmt *S,
|
||||
|
||||
SmallVector<SourceRange, 2> Ranges;
|
||||
|
||||
// Walk through lvalue casts to get the original expression
|
||||
// that syntactically caused the load.
|
||||
if (const Expr *expr = dyn_cast<Expr>(S))
|
||||
S = expr->IgnoreParenLValueCasts();
|
||||
|
||||
if (IsBind) {
|
||||
const VarDecl *VD;
|
||||
const Expr *Init;
|
||||
std::tie(VD, Init) = parseAssignment(S);
|
||||
if (VD && Init)
|
||||
S = Init;
|
||||
}
|
||||
|
||||
switch (S->getStmtClass()) {
|
||||
case Stmt::ArraySubscriptExprClass: {
|
||||
os << "Array access";
|
||||
@ -209,8 +218,11 @@ void DereferenceChecker::checkLocation(SVal l, bool isLoad, const Stmt* S,
|
||||
// The explicit NULL case.
|
||||
if (nullState) {
|
||||
if (!notNullState) {
|
||||
reportBug(nullState, S, C);
|
||||
return;
|
||||
const Expr *expr = getDereferenceExpr(S);
|
||||
if (!suppressReport(expr)) {
|
||||
reportBug(nullState, expr, C);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
// Otherwise, we have the case where the location could either be
|
||||
@ -248,8 +260,11 @@ void DereferenceChecker::checkBind(SVal L, SVal V, const Stmt *S,
|
||||
|
||||
if (StNull) {
|
||||
if (!StNonNull) {
|
||||
reportBug(StNull, S, C, /*isBind=*/true);
|
||||
return;
|
||||
const Expr *expr = getDereferenceExpr(S, /*IsBind=*/true);
|
||||
if (!suppressReport(expr)) {
|
||||
reportBug(StNull, expr, C);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
// At this point the value could be either null or non-null.
|
||||
|
@ -2508,6 +2508,16 @@ bool MallocChecker::mayFreeAnyEscapedMemoryOrIsModeledExplicitly(
|
||||
return true;
|
||||
}
|
||||
|
||||
if (FName == "postEvent" &&
|
||||
FD->getQualifiedNameAsString() == "QCoreApplication::postEvent") {
|
||||
return true;
|
||||
}
|
||||
|
||||
if (FName == "postEvent" &&
|
||||
FD->getQualifiedNameAsString() == "QCoreApplication::postEvent") {
|
||||
return true;
|
||||
}
|
||||
|
||||
// Handle cases where we know a buffer's /address/ can escape.
|
||||
// Note that the above checks handle some special cases where we know that
|
||||
// even though the address escapes, it's still our responsibility to free the
|
||||
|
@ -1542,6 +1542,16 @@ LikelyFalsePositiveSuppressionBRVisitor::getEndPath(BugReporterContext &BRC,
|
||||
}
|
||||
}
|
||||
|
||||
// The analyzer issues a false positive when the constructor of
|
||||
// std::__independent_bits_engine from algorithms is used.
|
||||
if (const CXXConstructorDecl *MD = dyn_cast<CXXConstructorDecl>(D)) {
|
||||
const CXXRecordDecl *CD = MD->getParent();
|
||||
if (CD->getName() == "__independent_bits_engine") {
|
||||
BR.markInvalid(getTag(), nullptr);
|
||||
return nullptr;
|
||||
}
|
||||
}
|
||||
|
||||
// The analyzer issues a false positive on
|
||||
// std::basic_string<uint8_t> v; v.push_back(1);
|
||||
// and
|
||||
|
16
test/Analysis/Inputs/qt-simulator.h
Normal file
16
test/Analysis/Inputs/qt-simulator.h
Normal file
@ -0,0 +1,16 @@
|
||||
#pragma clang system_header
|
||||
|
||||
struct QObject {
|
||||
};
|
||||
|
||||
struct QEvent {
|
||||
enum Type { None };
|
||||
QEvent(Type) {}
|
||||
};
|
||||
|
||||
struct QCoreApplication : public QObject {
|
||||
static void postEvent(QObject *receiver, QEvent *event);
|
||||
static QCoreApplication *instance();
|
||||
};
|
||||
|
||||
struct QApplication : public QCoreApplication {};
|
@ -198,6 +198,25 @@ namespace std {
|
||||
storage.assignExternal(new _CharT[4]);
|
||||
}
|
||||
};
|
||||
|
||||
template<class _Engine, class _UIntType>
|
||||
class __independent_bits_engine {
|
||||
public:
|
||||
// constructors and seeding functions
|
||||
__independent_bits_engine(_Engine& __e, size_t __w);
|
||||
};
|
||||
|
||||
template<class _Engine, class _UIntType>
|
||||
__independent_bits_engine<_Engine, _UIntType>
|
||||
::__independent_bits_engine(_Engine& __e, size_t __w)
|
||||
{
|
||||
// Fake error trigger.
|
||||
// No warning is expected as we are suppressing warning coming
|
||||
// out of std::basic_string.
|
||||
int z = 0;
|
||||
z = 5/z;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
void* operator new(std::size_t, const std::nothrow_t&) throw();
|
||||
|
@ -47,3 +47,8 @@ void testBasicStringSuppression_assign(std::basic_string<char32_t> &v,
|
||||
const std::basic_string<char32_t> &v2) {
|
||||
v = v2;
|
||||
}
|
||||
|
||||
class MyEngine;
|
||||
void testSupprerssion_independent_bits_engine(MyEngine& e) {
|
||||
std::__independent_bits_engine<MyEngine, unsigned int> x(e, 64); // no-warning
|
||||
}
|
||||
|
@ -311,3 +311,21 @@ int foo10595327(int b) {
|
||||
return *p; // no-warning
|
||||
return 0;
|
||||
}
|
||||
|
||||
#define AS_ATTRIBUTE volatile __attribute__((address_space(256)))
|
||||
#define _get_base() ((void * AS_ATTRIBUTE *)0)
|
||||
void* test_address_space_array(unsigned long slot) {
|
||||
return _get_base()[slot]; // no-warning
|
||||
}
|
||||
void test_address_space_condition(int AS_ATTRIBUTE *cpu_data) {
|
||||
if (cpu_data == 0) {
|
||||
*cpu_data = 3; // no-warning
|
||||
}
|
||||
}
|
||||
struct X { int member; };
|
||||
int test_address_space_member() {
|
||||
struct X AS_ATTRIBUTE *data = (struct X AS_ATTRIBUTE *)0UL;
|
||||
int ret;
|
||||
ret = data->member; // no-warning
|
||||
return ret;
|
||||
}
|
||||
|
@ -126,3 +126,22 @@ decltype(nullptr) returnsNullPtrType();
|
||||
void fromReturnType() {
|
||||
((X *)returnsNullPtrType())->f(); // expected-warning{{Called C++ object pointer is null}}
|
||||
}
|
||||
|
||||
#define AS_ATTRIBUTE __attribute__((address_space(256)))
|
||||
class AS1 {
|
||||
public:
|
||||
int x;
|
||||
~AS1() {
|
||||
int AS_ATTRIBUTE *x = 0;
|
||||
*x = 3; // no-warning
|
||||
}
|
||||
};
|
||||
void test_address_space_field_access() {
|
||||
AS1 AS_ATTRIBUTE *pa = 0;
|
||||
pa->x = 0; // no-warning
|
||||
}
|
||||
void test_address_space_bind() {
|
||||
AS1 AS_ATTRIBUTE *pa = 0;
|
||||
AS1 AS_ATTRIBUTE &r = *pa;
|
||||
r.x = 0; // no-warning
|
||||
}
|
||||
|
15
test/Analysis/qt_malloc.cpp
Normal file
15
test/Analysis/qt_malloc.cpp
Normal file
@ -0,0 +1,15 @@
|
||||
// RUN: %clang_cc1 -analyze -analyzer-checker=core,alpha.deadcode.UnreachableCode,alpha.core.CastSize,unix.Malloc,cplusplus -analyzer-store=region -verify %s
|
||||
// expected-no-diagnostics
|
||||
#include "Inputs/qt-simulator.h"
|
||||
|
||||
void send(QObject *obj)
|
||||
{
|
||||
QEvent *e1 = new QEvent(QEvent::None);
|
||||
static_cast<QApplication *>(QCoreApplication::instance())->postEvent(obj, e1);
|
||||
QEvent *e2 = new QEvent(QEvent::None);
|
||||
QCoreApplication::instance()->postEvent(obj, e2);
|
||||
QEvent *e3 = new QEvent(QEvent::None);
|
||||
QCoreApplication::postEvent(obj, e3);
|
||||
QEvent *e4 = new QEvent(QEvent::None);
|
||||
QApplication::postEvent(obj, e4);
|
||||
}
|
@ -148,8 +148,7 @@ namespace dr522 { // dr522: yes
|
||||
template<typename T> void b2(volatile T * const *);
|
||||
template<typename T> void b2(volatile T * const S::*);
|
||||
template<typename T> void b2(volatile T * const S::* const *);
|
||||
// FIXME: This diagnostic isn't very good. The problem is not substitution failure.
|
||||
template<typename T> void b2a(volatile T *S::* const *); // expected-note {{substitution failure}}
|
||||
template<typename T> void b2a(volatile T *S::* const *); // expected-note {{candidate template ignored: deduced type 'volatile int *dr522::S::*const *' of 1st parameter does not match adjusted type 'int *dr522::S::**' of argument}}
|
||||
|
||||
template<typename T> struct Base {};
|
||||
struct Derived : Base<int> {};
|
||||
|
@ -139,7 +139,7 @@ namespace N {
|
||||
}
|
||||
|
||||
namespace PR9233 {
|
||||
template<typename T> void f(const T **q); // expected-note{{candidate template ignored: substitution failure [with T = int]}}
|
||||
template<typename T> void f(const T **q); // expected-note{{candidate template ignored: deduced type 'const int **' of 1st parameter does not match adjusted type 'int **' of argument [with T = int]}}
|
||||
|
||||
void g(int **p) {
|
||||
f(p); // expected-error{{no matching function for call to 'f'}}
|
||||
|
@ -55,7 +55,7 @@ namespace DeduceNonTypeTemplateArgsInArray {
|
||||
}
|
||||
|
||||
namespace DeduceWithDefaultArgs {
|
||||
template<template<typename...> class Container> void f(Container<int>); // expected-note {{substitution failure [with Container = X]}}
|
||||
template<template<typename...> class Container> void f(Container<int>); // expected-note {{deduced type 'X<[...], (default) int>' of 1st parameter does not match adjusted type 'X<[...], double>' of argument [with Container = X]}}
|
||||
template<typename, typename = int> struct X {};
|
||||
void g() {
|
||||
// OK, use default argument for the second template parameter.
|
||||
|
@ -1,4 +1,5 @@
|
||||
// RUN: %clang_cc1 -emit-llvm %s -O2 -fno-builtin -o - | FileCheck %s
|
||||
// RUN: %clang_cc1 -emit-llvm %s -O2 -fno-builtin-printf -o - | FileCheck %s
|
||||
// Check that -fno-builtin is honored.
|
||||
|
||||
extern int printf(const char*, ...);
|
||||
|
@ -1,128 +1,198 @@
|
||||
// REQUIRES: aarch64-registered-target
|
||||
|
||||
// RUN: %clang_cc1 -triple aarch64-linux-gnu -target-feature +neon \
|
||||
// RUN: -target-feature +v8.1a -O3 -S -o - %s \
|
||||
// RUN: | FileCheck %s --check-prefix=CHECK --check-prefix=CHECK-AARCH64
|
||||
// RUN: -target-feature +v8.1a -S -emit-llvm -o - %s | FileCheck %s
|
||||
|
||||
#include <arm_neon.h>
|
||||
|
||||
// CHECK-AARCH64-LABEL: test_vqrdmlah_laneq_s16
|
||||
// CHECK-LABEL: test_vqrdmlah_laneq_s16
|
||||
int16x4_t test_vqrdmlah_laneq_s16(int16x4_t a, int16x4_t b, int16x8_t v) {
|
||||
// CHECK-AARCH64: sqrdmlah {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.h[7]
|
||||
// CHECK: shufflevector <8 x i16> {{%.*}}, <8 x i16> {{%.*}}, <4 x i32> <i32 7, i32 7, i32 7, i32 7>
|
||||
// CHECK: call <4 x i16> @llvm.aarch64.neon.sqrdmulh.v4i16(<4 x i16> {{%.*}}, <4 x i16> {{%.*}})
|
||||
// CHECK: call <4 x i16> @llvm.aarch64.neon.sqadd.v4i16(<4 x i16> {{%.*}}, <4 x i16> {{%.*}})
|
||||
return vqrdmlah_laneq_s16(a, b, v, 7);
|
||||
}
|
||||
|
||||
// CHECK-AARCH64-LABEL: test_vqrdmlah_laneq_s32
|
||||
// CHECK-LABEL: test_vqrdmlah_laneq_s32
|
||||
int32x2_t test_vqrdmlah_laneq_s32(int32x2_t a, int32x2_t b, int32x4_t v) {
|
||||
// CHECK-AARCH64: sqrdmlah {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[3]
|
||||
// CHECK: shufflevector <4 x i32> {{%.*}}, <4 x i32> {{%.*}}, <2 x i32> <i32 3, i32 3>
|
||||
// CHECK: call <2 x i32> @llvm.aarch64.neon.sqrdmulh.v2i32(<2 x i32> {{%.*}}, <2 x i32> {{%.*}})
|
||||
// CHECK: call <2 x i32> @llvm.aarch64.neon.sqadd.v2i32(<2 x i32> {{%.*}}, <2 x i32> {{%.*}})
|
||||
return vqrdmlah_laneq_s32(a, b, v, 3);
|
||||
}
|
||||
|
||||
// CHECK-AARCH64-LABEL: test_vqrdmlahq_laneq_s16
|
||||
// CHECK-LABEL: test_vqrdmlahq_laneq_s16
|
||||
int16x8_t test_vqrdmlahq_laneq_s16(int16x8_t a, int16x8_t b, int16x8_t v) {
|
||||
// CHECK-AARCH64: sqrdmlah {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.h[7]
|
||||
// CHECK: shufflevector <8 x i16> {{%.*}}, <8 x i16> {{%.*}}, <8 x i32> <i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7>
|
||||
// CHECK: call <8 x i16> @llvm.aarch64.neon.sqrdmulh.v8i16(<8 x i16> {{%.*}}, <8 x i16> {{%.*}})
|
||||
// CHECK: call <8 x i16> @llvm.aarch64.neon.sqadd.v8i16(<8 x i16> {{%.*}}, <8 x i16> {{%.*}})
|
||||
return vqrdmlahq_laneq_s16(a, b, v, 7);
|
||||
}
|
||||
|
||||
// CHECK-AARCH64-LABEL: test_vqrdmlahq_laneq_s32
|
||||
// CHECK-LABEL: test_vqrdmlahq_laneq_s32
|
||||
int32x4_t test_vqrdmlahq_laneq_s32(int32x4_t a, int32x4_t b, int32x4_t v) {
|
||||
// CHECK-AARCH64: sqrdmlah {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[3]
|
||||
// CHECK: shufflevector <4 x i32> {{%.*}}, <4 x i32> {{%.*}}, <4 x i32> <i32 3, i32 3, i32 3, i32 3>
|
||||
// CHECK: call <4 x i32> @llvm.aarch64.neon.sqrdmulh.v4i32(<4 x i32> {{%.*}}, <4 x i32> {{%.*}})
|
||||
// CHECK: call <4 x i32> @llvm.aarch64.neon.sqadd.v4i32(<4 x i32> {{%.*}}, <4 x i32> {{%.*}})
|
||||
return vqrdmlahq_laneq_s32(a, b, v, 3);
|
||||
}
|
||||
|
||||
// CHECK-AARCH64-LABEL: test_vqrdmlahh_s16
|
||||
// CHECK-LABEL: test_vqrdmlahh_s16
|
||||
int16_t test_vqrdmlahh_s16(int16_t a, int16_t b, int16_t c) {
|
||||
// CHECK-AARCH64: sqrdmlah {{h[0-9]+|v[0-9]+.4h}}, {{h[0-9]+|v[0-9]+.4h}}, {{h[0-9]+|v[0-9]+.4h}}
|
||||
// CHECK: [[insb:%.*]] = insertelement <4 x i16> undef, i16 {{%.*}}, i64 0
|
||||
// CHECK: [[insc:%.*]] = insertelement <4 x i16> undef, i16 {{%.*}}, i64 0
|
||||
// CHECK: [[mul:%.*]] = call <4 x i16> @llvm.aarch64.neon.sqrdmulh.v4i16(<4 x i16> [[insb]], <4 x i16> [[insc]])
|
||||
// CHECK: extractelement <4 x i16> [[mul]], i64 0
|
||||
// CHECK: [[insa:%.*]] = insertelement <4 x i16> undef, i16 {{%.*}}, i64 0
|
||||
// CHECK: [[insmul:%.*]] = insertelement <4 x i16> undef, i16 {{%.*}}, i64 0
|
||||
// CHECK: [[add:%.*]] = call <4 x i16> @llvm.aarch64.neon.sqadd.v4i16(<4 x i16> [[insa]], <4 x i16> [[insmul]])
|
||||
// CHECK: extractelement <4 x i16> [[add]], i64 0
|
||||
return vqrdmlahh_s16(a, b, c);
|
||||
}
|
||||
|
||||
// CHECK-AARCH64-LABEL: test_vqrdmlahs_s32
|
||||
// CHECK-LABEL: test_vqrdmlahs_s32
|
||||
int32_t test_vqrdmlahs_s32(int32_t a, int32_t b, int32_t c) {
|
||||
// CHECK-AARCH64: sqrdmlah {{s[0-9]+}}, {{s[0-9]+}}, {{s[0-9]+}}
|
||||
// CHECK: call i32 @llvm.aarch64.neon.sqrdmulh.i32(i32 {{%.*}}, i32 {{%.*}})
|
||||
// CHECK: call i32 @llvm.aarch64.neon.sqadd.i32(i32 {{%.*}}, i32 {{%.*}})
|
||||
return vqrdmlahs_s32(a, b, c);
|
||||
}
|
||||
|
||||
// CHECK-AARCH64-LABEL: test_vqrdmlahh_lane_s16
|
||||
// CHECK-LABEL: test_vqrdmlahh_lane_s16
|
||||
int16_t test_vqrdmlahh_lane_s16(int16_t a, int16_t b, int16x4_t c) {
|
||||
// CHECK-AARCH64: sqrdmlah {{h[0-9]+|v[0-9]+.4h}}, {{h[0-9]+|v[0-9]+.4h}}, {{v[0-9]+}}.h[3]
|
||||
// CHECK: extractelement <4 x i16> {{%.*}}, i32 3
|
||||
// CHECK: [[insb:%.*]] = insertelement <4 x i16> undef, i16 {{%.*}}, i64 0
|
||||
// CHECK: [[insc:%.*]] = insertelement <4 x i16> undef, i16 {{%.*}}, i64 0
|
||||
// CHECK: [[mul:%.*]] = call <4 x i16> @llvm.aarch64.neon.sqrdmulh.v4i16(<4 x i16> [[insb]], <4 x i16> [[insc]])
|
||||
// CHECK: extractelement <4 x i16> [[mul]], i64 0
|
||||
// CHECK: [[insa:%.*]] = insertelement <4 x i16> undef, i16 {{%.*}}, i64 0
|
||||
// CHECK: [[insmul:%.*]] = insertelement <4 x i16> undef, i16 {{%.*}}, i64 0
|
||||
// CHECK: [[add:%.*]] = call <4 x i16> @llvm.aarch64.neon.sqadd.v4i16(<4 x i16> [[insa]], <4 x i16> [[insmul]])
|
||||
// CHECK: extractelement <4 x i16> [[add]], i64 0
|
||||
return vqrdmlahh_lane_s16(a, b, c, 3);
|
||||
}
|
||||
|
||||
// CHECK-AARCH64-LABEL: test_vqrdmlahs_lane_s32
|
||||
// CHECK-LABEL: test_vqrdmlahs_lane_s32
|
||||
int32_t test_vqrdmlahs_lane_s32(int32_t a, int32_t b, int32x2_t c) {
|
||||
// CHECK-AARCH64: sqrdmlah {{s[0-9]+}}, {{s[0-9]+}}, {{v[0-9]+}}.s[1]
|
||||
// CHECK: extractelement <2 x i32> {{%.*}}, i32 1
|
||||
// CHECK: call i32 @llvm.aarch64.neon.sqrdmulh.i32(i32 {{%.*}}, i32 {{%.*}})
|
||||
// CHECK: call i32 @llvm.aarch64.neon.sqadd.i32(i32 {{%.*}}, i32 {{%.*}})
|
||||
return vqrdmlahs_lane_s32(a, b, c, 1);
|
||||
}
|
||||
|
||||
// CHECK-AARCH64-LABEL: test_vqrdmlahh_laneq_s16
|
||||
// CHECK-LABEL: test_vqrdmlahh_laneq_s16
|
||||
int16_t test_vqrdmlahh_laneq_s16(int16_t a, int16_t b, int16x8_t c) {
|
||||
// CHECK-AARCH64: sqrdmlah {{h[0-9]+|v[0-9]+.4h}}, {{h[0-9]+|v[0-9]+.4h}}, {{v[0-9]+}}.h[7]
|
||||
// CHECK: extractelement <8 x i16> {{%.*}}, i32 7
|
||||
// CHECK: [[insb:%.*]] = insertelement <4 x i16> undef, i16 {{%.*}}, i64 0
|
||||
// CHECK: [[insc:%.*]] = insertelement <4 x i16> undef, i16 {{%.*}}, i64 0
|
||||
// CHECK: [[mul:%.*]] = call <4 x i16> @llvm.aarch64.neon.sqrdmulh.v4i16(<4 x i16> [[insb]], <4 x i16> [[insc]])
|
||||
// CHECK: extractelement <4 x i16> [[mul]], i64 0
|
||||
// CHECK: [[insa:%.*]] = insertelement <4 x i16> undef, i16 {{%.*}}, i64 0
|
||||
// CHECK: [[insmul:%.*]] = insertelement <4 x i16> undef, i16 {{%.*}}, i64 0
|
||||
// CHECK: [[add:%.*]] = call <4 x i16> @llvm.aarch64.neon.sqadd.v4i16(<4 x i16> [[insa]], <4 x i16> [[insmul]])
|
||||
// CHECK: extractelement <4 x i16> [[add]], i64 0
|
||||
return vqrdmlahh_laneq_s16(a, b, c, 7);
|
||||
}
|
||||
|
||||
// CHECK-AARCH64-LABEL: test_vqrdmlahs_laneq_s32
|
||||
// CHECK-LABEL: test_vqrdmlahs_laneq_s32
|
||||
int32_t test_vqrdmlahs_laneq_s32(int32_t a, int32_t b, int32x4_t c) {
|
||||
// CHECK-AARCH64: sqrdmlah {{s[0-9]+}}, {{s[0-9]+}}, {{v[0-9]+}}.s[3]
|
||||
// CHECK: extractelement <4 x i32> {{%.*}}, i32 3
|
||||
// CHECK: call i32 @llvm.aarch64.neon.sqrdmulh.i32(i32 {{%.*}}, i32 {{%.*}})
|
||||
// CHECK: call i32 @llvm.aarch64.neon.sqadd.i32(i32 {{%.*}}, i32 {{%.*}})
|
||||
return vqrdmlahs_laneq_s32(a, b, c, 3);
|
||||
}
|
||||
|
||||
// CHECK-AARCH64-LABEL: test_vqrdmlsh_laneq_s16
|
||||
// CHECK-LABEL: test_vqrdmlsh_laneq_s16
|
||||
int16x4_t test_vqrdmlsh_laneq_s16(int16x4_t a, int16x4_t b, int16x8_t v) {
|
||||
// CHECK-AARCH64: sqrdmlsh {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.h[7]
|
||||
// CHECK: shufflevector <8 x i16> {{%.*}}, <8 x i16> {{%.*}}, <4 x i32> <i32 7, i32 7, i32 7, i32 7>
|
||||
// CHECK: call <4 x i16> @llvm.aarch64.neon.sqrdmulh.v4i16(<4 x i16> {{%.*}}, <4 x i16> {{%.*}})
|
||||
// CHECK: call <4 x i16> @llvm.aarch64.neon.sqsub.v4i16(<4 x i16> {{%.*}}, <4 x i16> {{%.*}})
|
||||
return vqrdmlsh_laneq_s16(a, b, v, 7);
|
||||
}
|
||||
|
||||
// CHECK-AARCH64-LABEL: test_vqrdmlsh_laneq_s32
|
||||
// CHECK-LABEL: test_vqrdmlsh_laneq_s32
|
||||
int32x2_t test_vqrdmlsh_laneq_s32(int32x2_t a, int32x2_t b, int32x4_t v) {
|
||||
// CHECK-AARCH64: sqrdmlsh {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[3]
|
||||
// CHECK: shufflevector <4 x i32> {{%.*}}, <4 x i32> {{%.*}}, <2 x i32> <i32 3, i32 3>
|
||||
// CHECK: call <2 x i32> @llvm.aarch64.neon.sqrdmulh.v2i32(<2 x i32> {{%.*}}, <2 x i32> {{%.*}})
|
||||
// CHECK: call <2 x i32> @llvm.aarch64.neon.sqsub.v2i32(<2 x i32> {{%.*}}, <2 x i32> {{%.*}})
|
||||
return vqrdmlsh_laneq_s32(a, b, v, 3);
|
||||
}
|
||||
|
||||
// CHECK-AARCH64-LABEL: test_vqrdmlshq_laneq_s16
|
||||
// CHECK-LABEL: test_vqrdmlshq_laneq_s16
|
||||
int16x8_t test_vqrdmlshq_laneq_s16(int16x8_t a, int16x8_t b, int16x8_t v) {
|
||||
// CHECK-AARCH64: sqrdmlsh {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.h[7]
|
||||
// CHECK: shufflevector <8 x i16> {{%.*}}, <8 x i16> {{%.*}}, <8 x i32> <i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7, i32 7>
|
||||
// CHECK: call <8 x i16> @llvm.aarch64.neon.sqrdmulh.v8i16(<8 x i16> {{%.*}}, <8 x i16> {{%.*}})
|
||||
// CHECK: call <8 x i16> @llvm.aarch64.neon.sqsub.v8i16(<8 x i16> {{%.*}}, <8 x i16> {{%.*}})
|
||||
return vqrdmlshq_laneq_s16(a, b, v, 7);
|
||||
}
|
||||
|
||||
// CHECK-AARCH64-LABEL: test_vqrdmlshq_laneq_s32
|
||||
// CHECK-LABEL: test_vqrdmlshq_laneq_s32
|
||||
int32x4_t test_vqrdmlshq_laneq_s32(int32x4_t a, int32x4_t b, int32x4_t v) {
|
||||
// CHECK-AARCH64: sqrdmlsh {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[3]
|
||||
// CHECK: shufflevector <4 x i32> {{%.*}}, <4 x i32> {{%.*}}, <4 x i32> <i32 3, i32 3, i32 3, i32 3>
|
||||
// CHECK: call <4 x i32> @llvm.aarch64.neon.sqrdmulh.v4i32(<4 x i32> {{%.*}}, <4 x i32> {{%.*}})
|
||||
// CHECK: call <4 x i32> @llvm.aarch64.neon.sqsub.v4i32(<4 x i32> {{%.*}}, <4 x i32> {{%.*}})
|
||||
return vqrdmlshq_laneq_s32(a, b, v, 3);
|
||||
}
|
||||
|
||||
// CHECK-AARCH64-LABEL: test_vqrdmlshh_s16
|
||||
// CHECK-LABEL: test_vqrdmlshh_s16
|
||||
int16_t test_vqrdmlshh_s16(int16_t a, int16_t b, int16_t c) {
|
||||
// CHECK-AARCH64: sqrdmlsh {{h[0-9]+|v[0-9]+.4h}}, {{h[0-9]+|v[0-9]+.4h}}, {{h[0-9]+|v[0-9]+.4h}}
|
||||
// CHECK: [[insb:%.*]] = insertelement <4 x i16> undef, i16 {{%.*}}, i64 0
|
||||
// CHECK: [[insc:%.*]] = insertelement <4 x i16> undef, i16 {{%.*}}, i64 0
|
||||
// CHECK: [[mul:%.*]] = call <4 x i16> @llvm.aarch64.neon.sqrdmulh.v4i16(<4 x i16> [[insb]], <4 x i16> [[insc]])
|
||||
// CHECK: extractelement <4 x i16> [[mul]], i64 0
|
||||
// CHECK: [[insa:%.*]] = insertelement <4 x i16> undef, i16 {{%.*}}, i64 0
|
||||
// CHECK: [[insmul:%.*]] = insertelement <4 x i16> undef, i16 {{%.*}}, i64 0
|
||||
// CHECK: [[sub:%.*]] = call <4 x i16> @llvm.aarch64.neon.sqsub.v4i16(<4 x i16> [[insa]], <4 x i16> [[insmul]])
|
||||
// CHECK: extractelement <4 x i16> [[sub]], i64 0
|
||||
return vqrdmlshh_s16(a, b, c);
|
||||
}
|
||||
|
||||
// CHECK-AARCH64-LABEL: test_vqrdmlshs_s32
|
||||
// CHECK-LABEL: test_vqrdmlshs_s32
|
||||
int32_t test_vqrdmlshs_s32(int32_t a, int32_t b, int32_t c) {
|
||||
// CHECK-AARCH64: sqrdmlsh {{s[0-9]+}}, {{s[0-9]+}}, {{s[0-9]+}}
|
||||
// CHECK: call i32 @llvm.aarch64.neon.sqrdmulh.i32(i32 {{%.*}}, i32 {{%.*}})
|
||||
// CHECK: call i32 @llvm.aarch64.neon.sqsub.i32(i32 {{%.*}}, i32 {{%.*}})
|
||||
return vqrdmlshs_s32(a, b, c);
|
||||
}
|
||||
|
||||
// CHECK-AARCH64-LABEL: test_vqrdmlshh_lane_s16
|
||||
// CHECK-LABEL: test_vqrdmlshh_lane_s16
|
||||
int16_t test_vqrdmlshh_lane_s16(int16_t a, int16_t b, int16x4_t c) {
|
||||
// CHECK-AARCH64: sqrdmlsh {{h[0-9]+|v[0-9]+.4h}}, {{h[0-9]+|v[0-9]+.4h}}, {{v[0-9]+}}.h[3]
|
||||
// CHECK: extractelement <4 x i16> {{%.*}}, i32 3
|
||||
// CHECK: [[insb:%.*]] = insertelement <4 x i16> undef, i16 {{%.*}}, i64 0
|
||||
// CHECK: [[insc:%.*]] = insertelement <4 x i16> undef, i16 {{%.*}}, i64 0
|
||||
// CHECK: [[mul:%.*]] = call <4 x i16> @llvm.aarch64.neon.sqrdmulh.v4i16(<4 x i16> [[insb]], <4 x i16> [[insc]])
|
||||
// CHECK: extractelement <4 x i16> [[mul]], i64 0
|
||||
// CHECK: [[insa:%.*]] = insertelement <4 x i16> undef, i16 {{%.*}}, i64 0
|
||||
// CHECK: [[insmul:%.*]] = insertelement <4 x i16> undef, i16 {{%.*}}, i64 0
|
||||
// CHECK: [[sub:%.*]] = call <4 x i16> @llvm.aarch64.neon.sqsub.v4i16(<4 x i16> [[insa]], <4 x i16> [[insmul]])
|
||||
// CHECK: extractelement <4 x i16> [[sub]], i64 0
|
||||
return vqrdmlshh_lane_s16(a, b, c, 3);
|
||||
}
|
||||
|
||||
// CHECK-AARCH64-LABEL: test_vqrdmlshs_lane_s32
|
||||
// CHECK-LABEL: test_vqrdmlshs_lane_s32
|
||||
int32_t test_vqrdmlshs_lane_s32(int32_t a, int32_t b, int32x2_t c) {
|
||||
// CHECK-AARCH64: sqrdmlsh {{s[0-9]+}}, {{s[0-9]+}}, {{v[0-9]+}}.s[1]
|
||||
// CHECK: extractelement <2 x i32> {{%.*}}, i32 1
|
||||
// CHECK: call i32 @llvm.aarch64.neon.sqrdmulh.i32(i32 {{%.*}}, i32 {{%.*}})
|
||||
// CHECK: call i32 @llvm.aarch64.neon.sqsub.i32(i32 {{%.*}}, i32 {{%.*}})
|
||||
return vqrdmlshs_lane_s32(a, b, c, 1);
|
||||
}
|
||||
|
||||
// CHECK-AARCH64-LABEL: test_vqrdmlshh_laneq_s16
|
||||
// CHECK-LABEL: test_vqrdmlshh_laneq_s16
|
||||
int16_t test_vqrdmlshh_laneq_s16(int16_t a, int16_t b, int16x8_t c) {
|
||||
// CHECK-AARCH64: sqrdmlsh {{h[0-9]+|v[0-9]+.4h}}, {{h[0-9]+|v[0-9]+.4h}}, {{v[0-9]+}}.h[7]
|
||||
// CHECK: extractelement <8 x i16> {{%.*}}, i32 7
|
||||
// CHECK: [[insb:%.*]] = insertelement <4 x i16> undef, i16 {{%.*}}, i64 0
|
||||
// CHECK: [[insc:%.*]] = insertelement <4 x i16> undef, i16 {{%.*}}, i64 0
|
||||
// CHECK: [[mul:%.*]] = call <4 x i16> @llvm.aarch64.neon.sqrdmulh.v4i16(<4 x i16> [[insb]], <4 x i16> [[insc]])
|
||||
// CHECK: extractelement <4 x i16> [[mul]], i64 0
|
||||
// CHECK: [[insa:%.*]] = insertelement <4 x i16> undef, i16 {{%.*}}, i64 0
|
||||
// CHECK: [[insmul:%.*]] = insertelement <4 x i16> undef, i16 {{%.*}}, i64 0
|
||||
// CHECK: [[sub:%.*]] = call <4 x i16> @llvm.aarch64.neon.sqsub.v4i16(<4 x i16> [[insa]], <4 x i16> [[insmul]])
|
||||
// CHECK: extractelement <4 x i16> [[sub]], i64 0
|
||||
return vqrdmlshh_laneq_s16(a, b, c, 7);
|
||||
}
|
||||
|
||||
// CHECK-AARCH64-LABEL: test_vqrdmlshs_laneq_s32
|
||||
// CHECK-LABEL: test_vqrdmlshs_laneq_s32
|
||||
int32_t test_vqrdmlshs_laneq_s32(int32_t a, int32_t b, int32x4_t c) {
|
||||
// CHECK-AARCH64: sqrdmlsh {{s[0-9]+}}, {{s[0-9]+}}, {{v[0-9]+}}.s[3]
|
||||
// CHECK: extractelement <4 x i32> {{%.*}}, i32 3
|
||||
// CHECK: call i32 @llvm.aarch64.neon.sqrdmulh.i32(i32 {{%.*}}, i32 {{%.*}})
|
||||
// CHECK: call i32 @llvm.aarch64.neon.sqsub.i32(i32 {{%.*}}, i32 {{%.*}})
|
||||
return vqrdmlshs_laneq_s32(a, b, c, 3);
|
||||
}
|
||||
|
||||
|
@ -26,6 +26,7 @@
|
||||
// RUN: %clang_cc1 -triple armv8-linux-gnueabi -target-cpu cortex-a53 -emit-llvm -o - %s | FileCheck %s --check-prefix=CHECK-BASIC-V8
|
||||
// RUN: %clang_cc1 -triple thumbv8-linux-gnueabihf -target-cpu cortex-a57 -emit-llvm -o - %s | FileCheck %s --check-prefix=CHECK-BASIC-V8
|
||||
// RUN: %clang_cc1 -triple thumbv8-linux-gnueabihf -target-cpu cortex-a72 -emit-llvm -o - %s | FileCheck %s --check-prefix=CHECK-BASIC-V8
|
||||
// RUN: %clang_cc1 -triple thumbv8-linux-gnueabihf -target-cpu exynos-m1 -emit-llvm -o - %s | FileCheck %s --check-prefix=CHECK-BASIC-V8
|
||||
// CHECK-BASIC-V8: "target-features"="+crc,+crypto,+dsp,+fp-armv8,+hwdiv,+hwdiv-arm,+neon"
|
||||
|
||||
|
||||
|
@ -1,122 +1,187 @@
|
||||
// RUN: %clang_cc1 -triple armv8.1a-linux-gnu -target-feature +neon \
|
||||
// RUN: -O3 -S -o - %s \
|
||||
// RUN: -S -emit-llvm -o - %s \
|
||||
// RUN: | FileCheck %s --check-prefix=CHECK --check-prefix=CHECK-ARM
|
||||
|
||||
// RUN: %clang_cc1 -triple aarch64-linux-gnu -target-feature +neon \
|
||||
// RUN: -target-feature +v8.1a -O3 -S -o - %s \
|
||||
// RUN: -target-feature +v8.1a -S -emit-llvm -o - %s \
|
||||
// RUN: | FileCheck %s --check-prefix=CHECK --check-prefix=CHECK-AARCH64
|
||||
|
||||
// REQUIRES: arm-registered-target,aarch64-registered-target
|
||||
|
||||
#include <arm_neon.h>
|
||||
|
||||
// CHECK-LABEL: test_vqrdmlah_s16
|
||||
int16x4_t test_vqrdmlah_s16(int16x4_t a, int16x4_t b, int16x4_t c) {
|
||||
// CHECK-ARM: vqrdmlah.s16 d{{[0-9]+}}, d{{[0-9]+}}, d{{[0-9]+}}
|
||||
// CHECK-AARCH64: sqrdmlah {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h
|
||||
// CHECK-ARM: call <4 x i16> @llvm.arm.neon.vqrdmulh.v4i16(<4 x i16> {{%.*}}, <4 x i16> {{%.*}})
|
||||
// CHECK-ARM: call <4 x i16> @llvm.arm.neon.vqadds.v4i16(<4 x i16> {{%.*}}, <4 x i16> {{%.*}})
|
||||
|
||||
// CHECK-AARCH64: call <4 x i16> @llvm.aarch64.neon.sqrdmulh.v4i16(<4 x i16> {{%.*}}, <4 x i16> {{%.*}})
|
||||
// CHECK-AARCH64: call <4 x i16> @llvm.aarch64.neon.sqadd.v4i16(<4 x i16> {{%.*}}, <4 x i16> {{%.*}})
|
||||
return vqrdmlah_s16(a, b, c);
|
||||
}
|
||||
|
||||
// CHECK-LABEL: test_vqrdmlah_s32
|
||||
int32x2_t test_vqrdmlah_s32(int32x2_t a, int32x2_t b, int32x2_t c) {
|
||||
// CHECK-ARM: vqrdmlah.s32 d{{[0-9]+}}, d{{[0-9]+}}, d{{[0-9]+}}
|
||||
// CHECK-AARCH64: sqrdmlah {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.2s
|
||||
// CHECK-ARM: call <2 x i32> @llvm.arm.neon.vqrdmulh.v2i32(<2 x i32> {{%.*}}, <2 x i32> {{%.*}})
|
||||
// CHECK-ARM: call <2 x i32> @llvm.arm.neon.vqadds.v2i32(<2 x i32> {{%.*}}, <2 x i32> {{%.*}})
|
||||
|
||||
// CHECK-AARCH64: call <2 x i32> @llvm.aarch64.neon.sqrdmulh.v2i32(<2 x i32> {{%.*}}, <2 x i32> {{%.*}})
|
||||
// CHECK-AARCH64: call <2 x i32> @llvm.aarch64.neon.sqadd.v2i32(<2 x i32> {{%.*}}, <2 x i32> {{%.*}})
|
||||
return vqrdmlah_s32(a, b, c);
|
||||
}
|
||||
|
||||
// CHECK-LABEL: test_vqrdmlahq_s16
|
||||
int16x8_t test_vqrdmlahq_s16(int16x8_t a, int16x8_t b, int16x8_t c) {
|
||||
// CHECK-ARM: vqrdmlah.s16 q{{[0-9]+}}, q{{[0-9]+}}, q{{[0-9]+}}
|
||||
// CHECK-AARCH64: sqrdmlah {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
|
||||
// CHECK-ARM: call <8 x i16> @llvm.arm.neon.vqrdmulh.v8i16(<8 x i16> {{%.*}}, <8 x i16> {{%.*}})
|
||||
// CHECK-ARM: call <8 x i16> @llvm.arm.neon.vqadds.v8i16(<8 x i16> {{%.*}}, <8 x i16> {{%.*}})
|
||||
|
||||
// CHECK-AARCH64: call <8 x i16> @llvm.aarch64.neon.sqrdmulh.v8i16(<8 x i16> {{%.*}}, <8 x i16> {{%.*}})
|
||||
// CHECK-AARCH64: call <8 x i16> @llvm.aarch64.neon.sqadd.v8i16(<8 x i16> {{%.*}}, <8 x i16> {{%.*}})
|
||||
return vqrdmlahq_s16(a, b, c);
|
||||
}
|
||||
|
||||
// CHECK-LABEL: test_vqrdmlahq_s32
|
||||
int32x4_t test_vqrdmlahq_s32(int32x4_t a, int32x4_t b, int32x4_t c) {
|
||||
// CHECK-ARM: vqrdmlah.s32 q{{[0-9]+}}, q{{[0-9]+}}, q{{[0-9]+}}
|
||||
// CHECK-AARCH64: sqrdmlah {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
|
||||
// CHECK-ARM: call <4 x i32> @llvm.arm.neon.vqrdmulh.v4i32(<4 x i32> {{%.*}}, <4 x i32> {{%.*}})
|
||||
// CHECK-ARM: call <4 x i32> @llvm.arm.neon.vqadds.v4i32(<4 x i32> {{%.*}}, <4 x i32> {{%.*}})
|
||||
|
||||
// CHECK-AARCH64: call <4 x i32> @llvm.aarch64.neon.sqrdmulh.v4i32(<4 x i32> {{%.*}}, <4 x i32> {{%.*}})
|
||||
// CHECK-AARCH64: call <4 x i32> @llvm.aarch64.neon.sqadd.v4i32(<4 x i32> {{%.*}}, <4 x i32> {{%.*}})
|
||||
return vqrdmlahq_s32(a, b, c);
|
||||
}
|
||||
|
||||
// CHECK-LABEL: test_vqrdmlah_lane_s16
|
||||
int16x4_t test_vqrdmlah_lane_s16(int16x4_t a, int16x4_t b, int16x4_t c) {
|
||||
// CHECK-ARM: vqrdmlah.s16 d{{[0-9]+}}, d{{[0-9]+}}, d{{[0-9]+}}[3]
|
||||
// CHECK-AARCH64: sqrdmlah {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.h[3]
|
||||
// CHECK-ARM: shufflevector <4 x i16> {{%.*}}, <4 x i16> {{%.*}}, <4 x i32> <i32 3, i32 3, i32 3, i32 3>
|
||||
// CHECK-ARM: call <4 x i16> @llvm.arm.neon.vqrdmulh.v4i16(<4 x i16> {{%.*}}, <4 x i16> {{%.*}})
|
||||
// CHECK-ARM: call <4 x i16> @llvm.arm.neon.vqadds.v4i16(<4 x i16> {{%.*}}, <4 x i16> {{%.*}})
|
||||
|
||||
// CHECK-AARCH64: shufflevector <4 x i16> {{%.*}}, <4 x i16> {{%.*}}, <4 x i32> <i32 3, i32 3, i32 3, i32 3>
|
||||
// CHECK-AARCH64: call <4 x i16> @llvm.aarch64.neon.sqrdmulh.v4i16(<4 x i16> {{%.*}}, <4 x i16> {{%.*}})
|
||||
// CHECK-AARCH64: call <4 x i16> @llvm.aarch64.neon.sqadd.v4i16(<4 x i16> {{%.*}}, <4 x i16> {{%.*}})
|
||||
return vqrdmlah_lane_s16(a, b, c, 3);
|
||||
}
|
||||
|
||||
// CHECK-LABEL: test_vqrdmlah_lane_s32
|
||||
int32x2_t test_vqrdmlah_lane_s32(int32x2_t a, int32x2_t b, int32x2_t c) {
|
||||
// CHECK-ARM: vqrdmlah.s32 d{{[0-9]+}}, d{{[0-9]+}}, d{{[0-9]+}}[1]
|
||||
// CHECK-AARCH64: sqrdmlah {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[1]
|
||||
// CHECK-ARM: shufflevector <2 x i32> {{%.*}}, <2 x i32> {{%.*}}, <2 x i32> <i32 1, i32 1>
|
||||
// CHECK-ARM: call <2 x i32> @llvm.arm.neon.vqrdmulh.v2i32(<2 x i32> {{%.*}}, <2 x i32> {{%.*}})
|
||||
// CHECK-ARM: call <2 x i32> @llvm.arm.neon.vqadds.v2i32(<2 x i32> {{%.*}}, <2 x i32> {{%.*}})
|
||||
|
||||
// CHECK-AARCH64: shufflevector <2 x i32> {{%.*}}, <2 x i32> {{%.*}}, <2 x i32> <i32 1, i32 1>
|
||||
// CHECK-AARCH64: call <2 x i32> @llvm.aarch64.neon.sqrdmulh.v2i32(<2 x i32> {{%.*}}, <2 x i32> {{%.*}})
|
||||
// CHECK-AARCH64: call <2 x i32> @llvm.aarch64.neon.sqadd.v2i32(<2 x i32> {{%.*}}, <2 x i32> {{%.*}})
|
||||
return vqrdmlah_lane_s32(a, b, c, 1);
|
||||
}
|
||||
|
||||
// CHECK-LABEL: test_vqrdmlahq_lane_s16
|
||||
int16x8_t test_vqrdmlahq_lane_s16(int16x8_t a, int16x8_t b, int16x4_t c) {
|
||||
// CHECK-ARM: vqrdmlah.s16 q{{[0-9]+}}, q{{[0-9]+}}, d{{[0-9]+}}[3]
|
||||
// CHECK-AARCH64: sqrdmlah {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.h[3]
|
||||
// CHECK-ARM: shufflevector <4 x i16> {{%.*}}, <4 x i16> {{%.*}}, <8 x i32> <i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3>
|
||||
// CHECK-ARM: call <8 x i16> @llvm.arm.neon.vqrdmulh.v8i16(<8 x i16> {{%.*}}, <8 x i16> {{%.*}})
|
||||
// CHECK-ARM: call <8 x i16> @llvm.arm.neon.vqadds.v8i16(<8 x i16> {{%.*}}, <8 x i16> {{%.*}})
|
||||
|
||||
// CHECK-AARCH64: shufflevector <4 x i16> {{%.*}}, <4 x i16> {{%.*}}, <8 x i32> <i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3>
|
||||
// CHECK-AARCH64: call <8 x i16> @llvm.aarch64.neon.sqrdmulh.v8i16(<8 x i16> {{%.*}}, <8 x i16> {{%.*}})
|
||||
// CHECK-AARCH64: call <8 x i16> @llvm.aarch64.neon.sqadd.v8i16(<8 x i16> {{%.*}}, <8 x i16> {{%.*}})
|
||||
return vqrdmlahq_lane_s16(a, b, c, 3);
|
||||
}
|
||||
|
||||
// CHECK-LABEL: test_vqrdmlahq_lane_s32
|
||||
int32x4_t test_vqrdmlahq_lane_s32(int32x4_t a, int32x4_t b, int32x2_t c) {
|
||||
// CHECK-ARM: vqrdmlah.s32 q{{[0-9]+}}, q{{[0-9]+}}, d{{[0-9]+}}[1]
|
||||
// CHECK-AARCH64: sqrdmlah {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[1]
|
||||
// CHECK-ARM: shufflevector <2 x i32> {{%.*}}, <2 x i32> {{%.*}}, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
|
||||
// CHECK-ARM: call <4 x i32> @llvm.arm.neon.vqrdmulh.v4i32(<4 x i32> {{%.*}}, <4 x i32> {{%.*}})
|
||||
// CHECK-ARM: call <4 x i32> @llvm.arm.neon.vqadds.v4i32(<4 x i32> {{%.*}}, <4 x i32> {{%.*}})
|
||||
|
||||
// CHECK-AARCH64: shufflevector <2 x i32> {{%.*}}, <2 x i32> {{%.*}}, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
|
||||
// CHECK-AARCH64: call <4 x i32> @llvm.aarch64.neon.sqrdmulh.v4i32(<4 x i32> {{%.*}}, <4 x i32> {{%.*}})
|
||||
// CHECK-AARCH64: call <4 x i32> @llvm.aarch64.neon.sqadd.v4i32(<4 x i32> {{%.*}}, <4 x i32> {{%.*}})
|
||||
return vqrdmlahq_lane_s32(a, b, c, 1);
|
||||
}
|
||||
|
||||
// CHECK-LABEL: test_vqrdmlsh_s16
|
||||
int16x4_t test_vqrdmlsh_s16(int16x4_t a, int16x4_t b, int16x4_t c) {
|
||||
// CHECK-ARM: vqrdmlsh.s16 d{{[0-9]+}}, d{{[0-9]+}}, d{{[0-9]+}}
|
||||
// CHECK-AARCH64: sqrdmlsh {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h
|
||||
// CHECK-ARM: call <4 x i16> @llvm.arm.neon.vqrdmulh.v4i16(<4 x i16> {{%.*}}, <4 x i16> {{%.*}})
|
||||
// CHECK-ARM: call <4 x i16> @llvm.arm.neon.vqsubs.v4i16(<4 x i16> {{%.*}}, <4 x i16> {{%.*}})
|
||||
|
||||
// CHECK-AARCH64: call <4 x i16> @llvm.aarch64.neon.sqrdmulh.v4i16(<4 x i16> {{%.*}}, <4 x i16> {{%.*}})
|
||||
// CHECK-AARCH64: call <4 x i16> @llvm.aarch64.neon.sqsub.v4i16(<4 x i16> {{%.*}}, <4 x i16> {{%.*}})
|
||||
return vqrdmlsh_s16(a, b, c);
|
||||
}
|
||||
|
||||
// CHECK-LABEL: test_vqrdmlsh_s32
|
||||
int32x2_t test_vqrdmlsh_s32(int32x2_t a, int32x2_t b, int32x2_t c) {
|
||||
// CHECK-ARM: vqrdmlsh.s32 d{{[0-9]+}}, d{{[0-9]+}}, d{{[0-9]+}}
|
||||
// CHECK-AARCH64: sqrdmlsh {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.2s
|
||||
// CHECK-ARM: call <2 x i32> @llvm.arm.neon.vqrdmulh.v2i32(<2 x i32> {{%.*}}, <2 x i32> {{%.*}})
|
||||
// CHECK-ARM: call <2 x i32> @llvm.arm.neon.vqsubs.v2i32(<2 x i32> {{%.*}}, <2 x i32> {{%.*}})
|
||||
|
||||
// CHECK-AARCH64: call <2 x i32> @llvm.aarch64.neon.sqrdmulh.v2i32(<2 x i32> {{%.*}}, <2 x i32> {{%.*}})
|
||||
// CHECK-AARCH64: call <2 x i32> @llvm.aarch64.neon.sqsub.v2i32(<2 x i32> {{%.*}}, <2 x i32> {{%.*}})
|
||||
return vqrdmlsh_s32(a, b, c);
|
||||
}
|
||||
|
||||
// CHECK-LABEL: test_vqrdmlshq_s16
|
||||
int16x8_t test_vqrdmlshq_s16(int16x8_t a, int16x8_t b, int16x8_t c) {
|
||||
// CHECK-ARM: vqrdmlsh.s16 q{{[0-9]+}}, q{{[0-9]+}}, q{{[0-9]+}}
|
||||
// CHECK-AARCH64: sqrdmlsh {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h
|
||||
// CHECK-ARM: call <8 x i16> @llvm.arm.neon.vqrdmulh.v8i16(<8 x i16> {{%.*}}, <8 x i16> {{%.*}})
|
||||
// CHECK-ARM: call <8 x i16> @llvm.arm.neon.vqsubs.v8i16(<8 x i16> {{%.*}}, <8 x i16> {{%.*}})
|
||||
|
||||
// CHECK-AARCH64: call <8 x i16> @llvm.aarch64.neon.sqrdmulh.v8i16(<8 x i16> {{%.*}}, <8 x i16> {{%.*}})
|
||||
// CHECK-AARCH64: call <8 x i16> @llvm.aarch64.neon.sqsub.v8i16(<8 x i16> {{%.*}}, <8 x i16> {{%.*}})
|
||||
return vqrdmlshq_s16(a, b, c);
|
||||
}
|
||||
|
||||
// CHECK-LABEL: test_vqrdmlshq_s32
|
||||
int32x4_t test_vqrdmlshq_s32(int32x4_t a, int32x4_t b, int32x4_t c) {
|
||||
// CHECK-ARM: vqrdmlsh.s32 q{{[0-9]+}}, q{{[0-9]+}}, q{{[0-9]+}}
|
||||
// CHECK-AARCH64: sqrdmlsh {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s
|
||||
// CHECK-ARM: call <4 x i32> @llvm.arm.neon.vqrdmulh.v4i32(<4 x i32> {{%.*}}, <4 x i32> {{%.*}})
|
||||
// CHECK-ARM: call <4 x i32> @llvm.arm.neon.vqsubs.v4i32(<4 x i32> {{%.*}}, <4 x i32> {{%.*}})
|
||||
|
||||
// CHECK-AARCH64: call <4 x i32> @llvm.aarch64.neon.sqrdmulh.v4i32(<4 x i32> {{%.*}}, <4 x i32> {{%.*}})
|
||||
// CHECK-AARCH64: call <4 x i32> @llvm.aarch64.neon.sqsub.v4i32(<4 x i32> {{%.*}}, <4 x i32> {{%.*}})
|
||||
return vqrdmlshq_s32(a, b, c);
|
||||
}
|
||||
|
||||
// CHECK-LABEL: test_vqrdmlsh_lane_s16
|
||||
int16x4_t test_vqrdmlsh_lane_s16(int16x4_t a, int16x4_t b, int16x4_t c) {
|
||||
// CHECK-ARM: vqrdmlsh.s16 d{{[0-9]+}}, d{{[0-9]+}}, d{{[0-9]+}}[3]
|
||||
// CHECK-AARCH64: sqrdmlsh {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.h[3]
|
||||
// CHECK-ARM: shufflevector <4 x i16> {{%.*}}, <4 x i16> {{%.*}}, <4 x i32> <i32 3, i32 3, i32 3, i32 3>
|
||||
// CHECK-ARM: call <4 x i16> @llvm.arm.neon.vqrdmulh.v4i16(<4 x i16> {{%.*}}, <4 x i16> {{%.*}})
|
||||
// CHECK-ARM: call <4 x i16> @llvm.arm.neon.vqsubs.v4i16(<4 x i16> {{%.*}}, <4 x i16> {{%.*}})
|
||||
|
||||
// CHECK-AARCH64: shufflevector <4 x i16> {{%.*}}, <4 x i16> {{%.*}}, <4 x i32> <i32 3, i32 3, i32 3, i32 3>
|
||||
// CHECK-AARCH64: call <4 x i16> @llvm.aarch64.neon.sqrdmulh.v4i16(<4 x i16> {{%.*}}, <4 x i16> {{%.*}})
|
||||
// CHECK-AARCH64: call <4 x i16> @llvm.aarch64.neon.sqsub.v4i16(<4 x i16> {{%.*}}, <4 x i16> {{%.*}})
|
||||
return vqrdmlsh_lane_s16(a, b, c, 3);
|
||||
}
|
||||
|
||||
// CHECK-LABEL: test_vqrdmlsh_lane_s32
|
||||
int32x2_t test_vqrdmlsh_lane_s32(int32x2_t a, int32x2_t b, int32x2_t c) {
|
||||
// CHECK-ARM: vqrdmlsh.s32 d{{[0-9]+}}, d{{[0-9]+}}, d{{[0-9]+}}[1]
|
||||
// CHECK-AARCH64: sqrdmlsh {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.s[1]
|
||||
// CHECK-ARM: shufflevector <2 x i32> {{%.*}}, <2 x i32> {{%.*}}, <2 x i32> <i32 1, i32 1>
|
||||
// CHECK-ARM: call <2 x i32> @llvm.arm.neon.vqrdmulh.v2i32(<2 x i32> {{%.*}}, <2 x i32> {{%.*}})
|
||||
// CHECK-ARM: call <2 x i32> @llvm.arm.neon.vqsubs.v2i32(<2 x i32> {{%.*}}, <2 x i32> {{%.*}})
|
||||
|
||||
// CHECK-AARCH64: shufflevector <2 x i32> {{%.*}}, <2 x i32> {{%.*}}, <2 x i32> <i32 1, i32 1>
|
||||
// CHECK-AARCH64: call <2 x i32> @llvm.aarch64.neon.sqrdmulh.v2i32(<2 x i32> {{%.*}}, <2 x i32> {{%.*}})
|
||||
// CHECK-AARCH64: call <2 x i32> @llvm.aarch64.neon.sqsub.v2i32(<2 x i32> {{%.*}}, <2 x i32> {{%.*}})
|
||||
return vqrdmlsh_lane_s32(a, b, c, 1);
|
||||
}
|
||||
|
||||
// CHECK-LABEL: test_vqrdmlshq_lane_s16
|
||||
int16x8_t test_vqrdmlshq_lane_s16(int16x8_t a, int16x8_t b, int16x4_t c) {
|
||||
// CHECK-ARM: vqrdmlsh.s16 q{{[0-9]+}}, q{{[0-9]+}}, d{{[0-9]+}}[3]
|
||||
// CHECK-AARCH64: sqrdmlsh {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.h[3]
|
||||
// CHECK-ARM: shufflevector <4 x i16> {{%.*}}, <4 x i16> {{%.*}}, <8 x i32> <i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3>
|
||||
// CHECK-ARM: call <8 x i16> @llvm.arm.neon.vqrdmulh.v8i16(<8 x i16> {{%.*}}, <8 x i16> {{%.*}})
|
||||
// CHECK-ARM: call <8 x i16> @llvm.arm.neon.vqsubs.v8i16(<8 x i16> {{%.*}}, <8 x i16> {{%.*}})
|
||||
|
||||
// CHECK-AARCH64: shufflevector <4 x i16> {{%.*}}, <4 x i16> {{%.*}}, <8 x i32> <i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3>
|
||||
// CHECK-AARCH64: call <8 x i16> @llvm.aarch64.neon.sqrdmulh.v8i16(<8 x i16> {{%.*}}, <8 x i16> {{%.*}})
|
||||
// CHECK-AARCH64: call <8 x i16> @llvm.aarch64.neon.sqsub.v8i16(<8 x i16> {{%.*}}, <8 x i16> {{%.*}})
|
||||
return vqrdmlshq_lane_s16(a, b, c, 3);
|
||||
}
|
||||
|
||||
// CHECK-LABEL: test_vqrdmlshq_lane_s32
|
||||
int32x4_t test_vqrdmlshq_lane_s32(int32x4_t a, int32x4_t b, int32x2_t c) {
|
||||
// CHECK-ARM: vqrdmlsh.s32 q{{[0-9]+}}, q{{[0-9]+}}, d{{[0-9]+}}[1]
|
||||
// CHECK-AARCH64: sqrdmlsh {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.s[1]
|
||||
// CHECK-ARM: shufflevector <2 x i32> {{%.*}}, <2 x i32> {{%.*}}, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
|
||||
// CHECK-ARM: call <4 x i32> @llvm.arm.neon.vqrdmulh.v4i32(<4 x i32> {{%.*}}, <4 x i32> {{%.*}})
|
||||
// CHECK-ARM: call <4 x i32> @llvm.arm.neon.vqsubs.v4i32(<4 x i32> {{%.*}}, <4 x i32> {{%.*}})
|
||||
|
||||
// CHECK-AARCH64: shufflevector <2 x i32> {{%.*}}, <2 x i32> {{%.*}}, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
|
||||
// CHECK-AARCH64: call <4 x i32> @llvm.aarch64.neon.sqrdmulh.v4i32(<4 x i32> {{%.*}}, <4 x i32> {{%.*}})
|
||||
// CHECK-AARCH64: call <4 x i32> @llvm.aarch64.neon.sqsub.v4i32(<4 x i32> {{%.*}}, <4 x i32> {{%.*}})
|
||||
return vqrdmlshq_lane_s32(a, b, c, 1);
|
||||
}
|
||||
|
||||
|
@ -1,4 +1,7 @@
|
||||
// RUN: %clang_cc1 -fno-builtin -emit-llvm -o - %s -triple i386-unknown-unknown | FileCheck -check-prefix CHECK-YES %s
|
||||
// RUN: %clang_cc1 -fno-builtin-crealf -fno-builtin-creal -fno-builtin-creall \
|
||||
// RUN: -fno-builtin-cimagf -fno-builtin-cimag -fno-builtin-cimagl -emit-llvm \
|
||||
// RUN: -o - %s -triple i386-unknown-unknown | FileCheck -check-prefix CHECK-YES %s
|
||||
// RUN: %clang_cc1 -emit-llvm -o - %s -triple i386-unknown-unknown | FileCheck -check-prefix CHECK-NO %s
|
||||
|
||||
extern float crealf(float _Complex);
|
||||
|
@ -1,4 +1,11 @@
|
||||
// RUN: %clang_cc1 -S -O3 -fno-builtin -o - %s | FileCheck %s
|
||||
// RUN: %clang_cc1 -S -O3 -fno-builtin-ceil -fno-builtin-copysign -fno-builtin-cos \
|
||||
// RUN: -fno-builtin-fabs -fno-builtin-floor -fno-builtin-strcat -fno-builtin-strncat \
|
||||
// RUN: -fno-builtin-strchr -fno-builtin-strrchr -fno-builtin-strcmp -fno-builtin-strncmp \
|
||||
// RUN: -fno-builtin-strcpy -fno-builtin-stpcpy -fno-builtin-strncpy -fno-builtin-strlen \
|
||||
// RUN: -fno-builtin-strpbrk -fno-builtin-strspn -fno-builtin-strtod -fno-builtin-strtof \
|
||||
// RUN: -fno-builtin-strtold -fno-builtin-strtol -fno-builtin-strtoll -fno-builtin-strtoul \
|
||||
// RUN: -fno-builtin-strtoull -o - %s | FileCheck %s
|
||||
// rdar://10551066
|
||||
|
||||
typedef __SIZE_TYPE__ size_t;
|
||||
|
@ -1,4 +1,5 @@
|
||||
// RUN: %clang_cc1 -fno-builtin -O1 -S -o - %s | FileCheck %s
|
||||
// RUN: %clang_cc1 -fno-builtin-memset -O1 -S -o - %s | FileCheck -check-prefix=MEMSET %s
|
||||
|
||||
void PR13497() {
|
||||
char content[2];
|
||||
@ -6,3 +7,11 @@ void PR13497() {
|
||||
// CHECK: __strcpy_chk
|
||||
__builtin___strcpy_chk(content, "", 1);
|
||||
}
|
||||
|
||||
void PR4941(char *s) {
|
||||
// Make sure we don't optimize this loop to a memset().
|
||||
// MEMSET-LABEL: PR4941:
|
||||
// MEMSET-NOT: memset
|
||||
for (unsigned i = 0; i < 8192; ++i)
|
||||
s[i] = 0;
|
||||
}
|
||||
|
18
test/CodeGen/pku.c
Normal file
18
test/CodeGen/pku.c
Normal file
@ -0,0 +1,18 @@
|
||||
// RUN: %clang_cc1 %s -triple=x86_64-apple-darwin -target-feature +pku -emit-llvm -o - -Werror | FileCheck %s
|
||||
|
||||
// Don't include mm_malloc.h, it's system specific.
|
||||
#define __MM_MALLOC_H
|
||||
|
||||
#include <immintrin.h>
|
||||
|
||||
unsigned int test_rdpkru_u32() {
|
||||
// CHECK-LABEL: @test_rdpkru_u32
|
||||
// CHECK: @llvm.x86.rdpkru
|
||||
return _rdpkru_u32();
|
||||
}
|
||||
void test_wrpkru(unsigned int __A) {
|
||||
// CHECK-LABEL: @test_wrpkru
|
||||
// CHECK: @llvm.x86.wrpkru
|
||||
_wrpkru(__A);
|
||||
return ;
|
||||
}
|
@ -29,3 +29,28 @@ extern "C" int test_namespace_global() {
|
||||
__asm mov eax, asdf::a_global.a3.b2
|
||||
}
|
||||
|
||||
template <bool Signed>
|
||||
struct make_storage_type {
|
||||
struct type {
|
||||
struct B {
|
||||
int a;
|
||||
int x;
|
||||
} b;
|
||||
};
|
||||
};
|
||||
|
||||
template <bool Signed>
|
||||
struct msvc_dcas_x86 {
|
||||
typedef typename make_storage_type<Signed>::type storage_type;
|
||||
void store() __asm("PR26001") {
|
||||
storage_type p;
|
||||
__asm mov edx, p.b.x;
|
||||
}
|
||||
};
|
||||
|
||||
template void msvc_dcas_x86<false>::store();
|
||||
// CHECK: define weak_odr void @"\01PR26001"(
|
||||
// CHECK: %[[P:.*]] = alloca %"struct.make_storage_type<false>::type", align 4
|
||||
// CHECK: %[[B:.*]] = getelementptr inbounds %"struct.make_storage_type<false>::type", %"struct.make_storage_type<false>::type"* %[[P]], i32 0, i32 0
|
||||
// CHECK: %[[X:.*]] = getelementptr inbounds %"struct.make_storage_type<false>::type::B", %"struct.make_storage_type<false>::type::B"* %[[B]], i32 0, i32 1
|
||||
// CHECK: call void asm sideeffect inteldialect "mov edx, dword ptr $0", "*m,~{edx},~{dirflag},~{fpsr},~{flags}"(i32* %[[X]])
|
||||
|
82
test/CodeGenCXX/optnone-and-attributes.cpp
Normal file
82
test/CodeGenCXX/optnone-and-attributes.cpp
Normal file
@ -0,0 +1,82 @@
|
||||
// RUN: %clang_cc1 < %s -triple i386-mingw32 -fms-extensions -emit-llvm -x c++ | FileCheck %s
|
||||
|
||||
// optnone wins over inlinehint.
|
||||
// Test that both func1 and func2 are marked optnone and noinline.
|
||||
|
||||
// Definition with both optnone and inlinehint.
|
||||
__attribute__((optnone))
|
||||
inline int func1(int a) {
|
||||
return a + a + a + a;
|
||||
}
|
||||
// CHECK: @_Z5func1i({{.*}}) [[OPTNONE:#[0-9]+]]
|
||||
|
||||
// optnone declaration, inlinehint definition.
|
||||
__attribute__((optnone))
|
||||
int func2(int a);
|
||||
|
||||
inline int func2(int a) {
|
||||
return a + a + a + a;
|
||||
}
|
||||
// CHECK: @_Z5func2i({{.*}}) [[OPTNONE]]
|
||||
|
||||
// Keep alive the definitions of func1 and func2.
|
||||
int foo() {
|
||||
int val = func1(1);
|
||||
return val + func2(2);
|
||||
}
|
||||
|
||||
// optnone wins over minsize.
|
||||
__attribute__((optnone))
|
||||
int func3(int a);
|
||||
|
||||
__attribute__((minsize))
|
||||
int func3(int a) {
|
||||
return a + a + a + a;
|
||||
}
|
||||
// Same attribute set as everything else, therefore no 'minsize'.
|
||||
// CHECK: @_Z5func3i({{.*}}) [[OPTNONE]]
|
||||
|
||||
|
||||
// Verify that noreturn is compatible with optnone.
|
||||
__attribute__((noreturn))
|
||||
extern void exit_from_function();
|
||||
|
||||
__attribute__((noreturn)) __attribute((optnone))
|
||||
extern void noreturn_function(int a) { exit_from_function(); }
|
||||
// CHECK: @_Z17noreturn_functioni({{.*}}) [[NORETURN:#[0-9]+]]
|
||||
|
||||
|
||||
// Verify that __declspec(noinline) is compatible with optnone.
|
||||
__declspec(noinline) __attribute__((optnone))
|
||||
void func4() { return; }
|
||||
// CHECK: @_Z5func4v() [[OPTNONE]]
|
||||
|
||||
__declspec(noinline)
|
||||
extern void func5();
|
||||
|
||||
__attribute__((optnone))
|
||||
void func5() { return; }
|
||||
// CHECK: @_Z5func5v() [[OPTNONE]]
|
||||
|
||||
|
||||
// Verify also that optnone can be used on dllexport functions.
|
||||
// Adding attribute optnone on a dllimport function has no effect.
|
||||
|
||||
__attribute__((dllimport))
|
||||
__attribute__((optnone))
|
||||
int imported_optnone_func(int a);
|
||||
|
||||
__attribute__((dllexport))
|
||||
__attribute__((optnone))
|
||||
int exported_optnone_func(int a) {
|
||||
return imported_optnone_func(a); // use of imported func
|
||||
}
|
||||
// CHECK: @_Z21exported_optnone_funci({{.*}}) [[OPTNONE]]
|
||||
// CHECK: declare dllimport {{.*}} @_Z21imported_optnone_funci({{.*}}) [[DLLIMPORT:#[0-9]+]]
|
||||
|
||||
|
||||
// CHECK: attributes [[OPTNONE]] = { noinline {{.*}} optnone
|
||||
// CHECK: attributes [[NORETURN]] = { noinline noreturn {{.*}} optnone
|
||||
|
||||
// CHECK: attributes [[DLLIMPORT]] =
|
||||
// CHECK-SAME-NOT: optnone
|
164
test/CodeGenCXX/optnone-class-members.cpp
Normal file
164
test/CodeGenCXX/optnone-class-members.cpp
Normal file
@ -0,0 +1,164 @@
|
||||
// RUN: %clang_cc1 < %s -triple %itanium_abi_triple -fms-extensions -emit-llvm -x c++ | FileCheck %s
|
||||
|
||||
// Test attribute 'optnone' on methods:
|
||||
// -- member functions;
|
||||
// -- static member functions.
|
||||
|
||||
// Verify that all methods of struct A are associated to the same attribute set.
|
||||
// The attribute set shall contain attributes 'noinline' and 'optnone'.
|
||||
|
||||
struct A {
|
||||
// Definition of an optnone static method.
|
||||
__attribute__((optnone))
|
||||
static int static_optnone_method(int a) {
|
||||
return a + a;
|
||||
}
|
||||
// CHECK: @_ZN1A21static_optnone_methodEi({{.*}}) [[OPTNONE:#[0-9]+]]
|
||||
|
||||
// Definition of an optnone normal method.
|
||||
__attribute__((optnone))
|
||||
int optnone_method(int a) {
|
||||
return a + a + a + a;
|
||||
}
|
||||
// CHECK: @_ZN1A14optnone_methodEi({{.*}}) [[OPTNONE]]
|
||||
|
||||
// Declaration of an optnone method with out-of-line definition
|
||||
// that doesn't say optnone.
|
||||
__attribute__((optnone))
|
||||
int optnone_decl_method(int a);
|
||||
|
||||
// Methods declared without attribute optnone; the definitions will
|
||||
// have attribute optnone, and we verify optnone wins.
|
||||
__forceinline static int static_forceinline_method(int a);
|
||||
__attribute__((always_inline)) int alwaysinline_method(int a);
|
||||
__attribute__((noinline)) int noinline_method(int a);
|
||||
__attribute__((minsize)) int minsize_method(int a);
|
||||
};
|
||||
|
||||
void foo() {
|
||||
A a;
|
||||
A::static_optnone_method(4);
|
||||
a.optnone_method(14);
|
||||
a.optnone_decl_method(12);
|
||||
A::static_forceinline_method(5);
|
||||
a.alwaysinline_method(5);
|
||||
a.noinline_method(6);
|
||||
a.minsize_method(7);
|
||||
}
|
||||
|
||||
// No attribute here, should still be on the definition.
|
||||
int A::optnone_decl_method(int a) {
|
||||
return a;
|
||||
}
|
||||
// CHECK: @_ZN1A19optnone_decl_methodEi({{.*}}) [[OPTNONE]]
|
||||
|
||||
// optnone implies noinline; therefore attribute noinline is added to
|
||||
// the set of function attributes.
|
||||
// forceinline is instead translated as 'always_inline'.
|
||||
// However 'noinline' wins over 'always_inline' and therefore
|
||||
// the resulting attributes for this method are: noinline + optnone
|
||||
__attribute__((optnone))
|
||||
int A::static_forceinline_method(int a) {
|
||||
return a + a + a + a;
|
||||
}
|
||||
// CHECK: @_ZN1A25static_forceinline_methodEi({{.*}}) [[OPTNONE]]
|
||||
|
||||
__attribute__((optnone))
|
||||
int A::alwaysinline_method(int a) {
|
||||
return a + a + a + a;
|
||||
}
|
||||
// CHECK: @_ZN1A19alwaysinline_methodEi({{.*}}) [[OPTNONE]]
|
||||
|
||||
// 'noinline' + 'noinline and optnone' = 'noinline and optnone'
|
||||
__attribute__((optnone))
|
||||
int A::noinline_method(int a) {
|
||||
return a + a + a + a;
|
||||
}
|
||||
// CHECK: @_ZN1A15noinline_methodEi({{.*}}) [[OPTNONE]]
|
||||
|
||||
// 'optnone' wins over 'minsize'
|
||||
__attribute__((optnone))
|
||||
int A::minsize_method(int a) {
|
||||
return a + a + a + a;
|
||||
}
|
||||
// CHECK: @_ZN1A14minsize_methodEi({{.*}}) [[OPTNONE]]
|
||||
|
||||
|
||||
// Test attribute 'optnone' on methods:
|
||||
// -- pure virtual functions
|
||||
// -- base virtual and derived virtual
|
||||
// -- base virtual but not derived virtual
|
||||
// -- optnone methods redefined in override
|
||||
|
||||
// A method defined in override doesn't inherit the function attributes of the
|
||||
// superclass method.
|
||||
|
||||
struct B {
|
||||
virtual int pure_virtual(int a) = 0;
|
||||
__attribute__((optnone))
|
||||
virtual int pure_virtual_with_optnone(int a) = 0;
|
||||
|
||||
virtual int base(int a) {
|
||||
return a + a + a + a;
|
||||
}
|
||||
|
||||
__attribute__((optnone))
|
||||
virtual int optnone_base(int a) {
|
||||
return a + a + a + a;
|
||||
}
|
||||
|
||||
__attribute__((optnone))
|
||||
virtual int only_base_virtual(int a) {
|
||||
return a + a;
|
||||
}
|
||||
};
|
||||
|
||||
struct C : public B {
|
||||
__attribute__((optnone))
|
||||
virtual int pure_virtual(int a) {
|
||||
return a + a + a + a;
|
||||
}
|
||||
|
||||
virtual int pure_virtual_with_optnone(int a) {
|
||||
return a + a + a + a;
|
||||
}
|
||||
|
||||
__attribute__((optnone))
|
||||
virtual int base(int a) {
|
||||
return a + a;
|
||||
}
|
||||
|
||||
virtual int optnone_base(int a) {
|
||||
return a + a;
|
||||
}
|
||||
|
||||
int only_base_virtual(int a) {
|
||||
return a + a + a + a;
|
||||
}
|
||||
};
|
||||
|
||||
int bar() {
|
||||
C c;
|
||||
int result;
|
||||
result = c.pure_virtual(3);
|
||||
result += c.pure_virtual_with_optnone(2);
|
||||
result += c.base(5);
|
||||
result += c.optnone_base(7);
|
||||
result += c.only_base_virtual(9);
|
||||
return result;
|
||||
}
|
||||
|
||||
// CHECK: @_ZN1C12pure_virtualEi({{.*}}) {{.*}} [[OPTNONE]]
|
||||
// CHECK: @_ZN1C25pure_virtual_with_optnoneEi({{.*}}) {{.*}} [[NORMAL:#[0-9]+]]
|
||||
// CHECK: @_ZN1C4baseEi({{.*}}) {{.*}} [[OPTNONE]]
|
||||
// CHECK: @_ZN1C12optnone_baseEi({{.*}}) {{.*}} [[NORMAL]]
|
||||
// CHECK: @_ZN1C17only_base_virtualEi({{.*}}) {{.*}} [[NORMAL]]
|
||||
// CHECK: @_ZN1B4baseEi({{.*}}) {{.*}} [[NORMAL]]
|
||||
// CHECK: @_ZN1B12optnone_baseEi({{.*}}) {{.*}} [[OPTNONE]]
|
||||
// CHECK: @_ZN1B17only_base_virtualEi({{.*}}) {{.*}} [[OPTNONE]]
|
||||
|
||||
|
||||
// CHECK: attributes [[NORMAL]] =
|
||||
// CHECK-SAME-NOT: noinline
|
||||
// CHECK-SAME-NOT: optnone
|
||||
// CHECK: attributes [[OPTNONE]] = {{.*}} noinline {{.*}} optnone
|
@ -90,5 +90,6 @@ int user_of_forceinline_optnone_function() {
|
||||
// CHECK: @_Z28forceinline_optnone_functionii({{.*}}) [[OPTNONE]]
|
||||
|
||||
// CHECK: attributes [[OPTNONE]] = { noinline nounwind optnone {{.*}} }
|
||||
// CHECK: attributes [[NORMAL]] = { nounwind {{.*}} }
|
||||
|
||||
// CHECK: attributes [[NORMAL]] =
|
||||
// CHECK-SAME-NOT: noinline
|
||||
// CHECK-SAME-NOT: optnone
|
||||
|
104
test/CodeGenCXX/optnone-templates.cpp
Normal file
104
test/CodeGenCXX/optnone-templates.cpp
Normal file
@ -0,0 +1,104 @@
|
||||
// RUN: %clang_cc1 %s -triple %itanium_abi_triple -std=c++11 -emit-llvm -o - | FileCheck %s
|
||||
|
||||
// Test optnone on template instantiations.
|
||||
|
||||
//-- Effect of optnone on generic add template function.
|
||||
|
||||
template <typename T> T template_normal(T a)
|
||||
{
|
||||
return a + a;
|
||||
}
|
||||
|
||||
template <typename T> __attribute__((optnone)) T template_optnone(T a)
|
||||
{
|
||||
return a + a + a;
|
||||
}
|
||||
|
||||
// This function should cause instantiations of each template, one marked
|
||||
// with the 'optnone' attribute.
|
||||
int container(int i)
|
||||
{
|
||||
return template_normal<int>(i) + template_optnone<int>(i);
|
||||
}
|
||||
|
||||
// CHECK: @_Z15template_normalIiET_S0_({{.*}}) [[NORMAL:#[0-9]+]]
|
||||
// CHECK: @_Z16template_optnoneIiET_S0_({{.*}}) [[OPTNONE:#[0-9]+]]
|
||||
|
||||
|
||||
//-- Effect of optnone on a partial specialization.
|
||||
// FIRST TEST: a method becomes marked with optnone in the specialization.
|
||||
|
||||
template <typename T, typename U> class template_normal_base {
|
||||
public:
|
||||
T method(T t, U u)
|
||||
{
|
||||
return t + static_cast<T>(u);
|
||||
}
|
||||
};
|
||||
|
||||
template <typename U> class template_normal_base<int, U>
|
||||
{
|
||||
public:
|
||||
__attribute__((optnone)) int method (int t, U u)
|
||||
{
|
||||
return t - static_cast<int>(u);
|
||||
}
|
||||
};
|
||||
|
||||
// This function should cause an instantiation of the full template (whose
|
||||
// method is not marked optnone) and an instantiation of the partially
|
||||
// specialized template (whose method is marked optnone).
|
||||
void container2()
|
||||
{
|
||||
int y = 2;
|
||||
float z = 3.0;
|
||||
template_normal_base<float, int> class_normal;
|
||||
template_normal_base<int, float> class_optnone;
|
||||
float r1 = class_normal.method(z, y);
|
||||
float r2 = class_optnone.method(y, z);
|
||||
}
|
||||
|
||||
// CHECK: @_ZN20template_normal_baseIfiE6methodEfi({{.*}}) [[NORMAL]]
|
||||
// CHECK: @_ZN20template_normal_baseIifE6methodEif({{.*}}) [[OPTNONE]]
|
||||
|
||||
|
||||
//-- Effect of optnone on a partial specialization.
|
||||
// SECOND TEST: a method loses optnone in the specialization.
|
||||
|
||||
template <typename T, typename U> class template_optnone_base {
|
||||
public:
|
||||
__attribute__((optnone)) T method(T t, U u)
|
||||
{
|
||||
return t + static_cast<T>(u);
|
||||
}
|
||||
};
|
||||
|
||||
template <typename U> class template_optnone_base<int, U>
|
||||
{
|
||||
public:
|
||||
int method (int t, U u)
|
||||
{
|
||||
return t - static_cast<int>(u);
|
||||
}
|
||||
};
|
||||
|
||||
// This function should cause an instantiation of the full template (whose
|
||||
// method is marked optnone) and an instantiation of the partially
|
||||
// specialized template (whose method is not marked optnone).
|
||||
void container3()
|
||||
{
|
||||
int y = 2;
|
||||
float z = 3.0;
|
||||
template_optnone_base<float, int> class_optnone;
|
||||
template_optnone_base<int, float> class_normal;
|
||||
float r1 = class_optnone.method(z, y);
|
||||
float r2 = class_normal.method(y, z);
|
||||
}
|
||||
|
||||
// CHECK: @_ZN21template_optnone_baseIfiE6methodEfi({{.*}}) [[OPTNONE]]
|
||||
// CHECK: @_ZN21template_optnone_baseIifE6methodEif({{.*}}) [[NORMAL]]
|
||||
|
||||
|
||||
// CHECK: attributes [[NORMAL]] =
|
||||
// CHECK-SAME-NOT: optnone
|
||||
// CHECK: attributes [[OPTNONE]] = {{.*}} optnone
|
@ -9,4 +9,4 @@ int main(void) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
// CHECK: @__llvm_coverage_mapping = internal constant { i32, i32, i32, i32, [2 x <{ i8*, i32, i32, i64 }>], [{{[0-9]+}} x i8] } { i32 2, i32 {{[0-9]+}}, i32 {{[0-9]+}}, i32 0, [2 x <{ i8*, i32, i32, i64 }>] [<{ i8*, i32, i32, i64 }> <{ i8* getelementptr inbounds ([3 x i8], [3 x i8]* @__profn_foo, i32 0, i32 0), i32 3, i32 9, i64 {{[0-9]+}} }>, <{ i8*, i32, i32, i64 }> <{ i8* getelementptr inbounds ([4 x i8], [4 x i8]* @__profn_main, i32 0, i32 0), i32 4, i32 9, i64 {{[0-9]+}} }>]
|
||||
// CHECK: @__llvm_coverage_mapping = internal constant { { i32, i32, i32, i32 }, [2 x <{ i8*, i32, i32, i64 }>], [{{[0-9]+}} x i8] } { { i32, i32, i32, i32 } { i32 2, i32 {{[0-9]+}}, i32 {{[0-9]+}}, i32 0 }, [2 x <{ i8*, i32, i32, i64 }>] [<{ i8*, i32, i32, i64 }> <{ i8* getelementptr inbounds ([3 x i8], [3 x i8]* @__profn_foo, i32 0, i32 0), i32 3, i32 9, i64 {{[0-9]+}} }>, <{ i8*, i32, i32, i64 }> <{ i8* getelementptr inbounds ([4 x i8], [4 x i8]* @__profn_main, i32 0, i32 0), i32 4, i32 9, i64 {{[0-9]+}} }>]
|
||||
|
@ -74,6 +74,20 @@
|
||||
// RUN: %clang -target arm64 -mlittle-endian -mtune=cortex-a72 -### -c %s 2>&1 | FileCheck -check-prefix=ARM64-CA72 %s
|
||||
// ARM64-CA72: "-cc1"{{.*}} "-triple" "arm64{{.*}}" "-target-cpu" "cortex-a72"
|
||||
|
||||
// RUN: %clang -target aarch64 -mcpu=exynos-m1 -### -c %s 2>&1 | FileCheck -check-prefix=M1 %s
|
||||
// RUN: %clang -target aarch64 -mlittle-endian -mcpu=exynos-m1 -### -c %s 2>&1 | FileCheck -check-prefix=M1 %s
|
||||
// RUN: %clang -target aarch64_be -mlittle-endian -mcpu=exynos-m1 -### -c %s 2>&1 | FileCheck -check-prefix=M1 %s
|
||||
// RUN: %clang -target aarch64 -mtune=exynos-m1 -### -c %s 2>&1 | FileCheck -check-prefix=M1 %s
|
||||
// RUN: %clang -target aarch64 -mlittle-endian -mtune=exynos-m1 -### -c %s 2>&1 | FileCheck -check-prefix=M1 %s
|
||||
// RUN: %clang -target aarch64_be -mlittle-endian -mtune=exynos-m1 -### -c %s 2>&1 | FileCheck -check-prefix=M1 %s
|
||||
// M1: "-cc1"{{.*}} "-triple" "aarch64{{.*}}" "-target-cpu" "exynos-m1"
|
||||
|
||||
// RUN: %clang -target arm64 -mcpu=exynos-m1 -### -c %s 2>&1 | FileCheck -check-prefix=ARM64-M1 %s
|
||||
// RUN: %clang -target arm64 -mlittle-endian -mcpu=exynos-m1 -### -c %s 2>&1 | FileCheck -check-prefix=ARM64-M1 %s
|
||||
// RUN: %clang -target arm64 -mtune=exynos-m1 -### -c %s 2>&1 | FileCheck -check-prefix=ARM64-M1 %s
|
||||
// RUN: %clang -target arm64 -mlittle-endian -mtune=exynos-m1 -### -c %s 2>&1 | FileCheck -check-prefix=ARM64-M1 %s
|
||||
// ARM64-M1: "-cc1"{{.*}} "-triple" "arm64{{.*}}" "-target-cpu" "exynos-m1"
|
||||
|
||||
// RUN: %clang -target aarch64_be -### -c %s 2>&1 | FileCheck -check-prefix=GENERIC-BE %s
|
||||
// RUN: %clang -target aarch64 -mbig-endian -### -c %s 2>&1 | FileCheck -check-prefix=GENERIC-BE %s
|
||||
// RUN: %clang -target aarch64_be -mbig-endian -### -c %s 2>&1 | FileCheck -check-prefix=GENERIC-BE %s
|
||||
@ -111,6 +125,14 @@
|
||||
// RUN: %clang -target aarch64_be -mbig-endian -mtune=cortex-a72 -### -c %s 2>&1 | FileCheck -check-prefix=CA72-BE %s
|
||||
// CA72-BE: "-cc1"{{.*}} "-triple" "aarch64_be{{.*}}" "-target-cpu" "cortex-a72"
|
||||
|
||||
// RUN: %clang -target aarch64_be -mcpu=exynos-m1 -### -c %s 2>&1 | FileCheck -check-prefix=M1-BE %s
|
||||
// RUN: %clang -target aarch64 -mbig-endian -mcpu=exynos-m1 -### -c %s 2>&1 | FileCheck -check-prefix=M1-BE %s
|
||||
// RUN: %clang -target aarch64_be -mbig-endian -mcpu=exynos-m1 -### -c %s 2>&1 | FileCheck -check-prefix=M1-BE %s
|
||||
// RUN: %clang -target aarch64_be -mtune=exynos-m1 -### -c %s 2>&1 | FileCheck -check-prefix=M1-BE %s
|
||||
// RUN: %clang -target aarch64 -mbig-endian -mtune=exynos-m1 -### -c %s 2>&1 | FileCheck -check-prefix=M1-BE %s
|
||||
// RUN: %clang -target aarch64_be -mbig-endian -mtune=exynos-m1 -### -c %s 2>&1 | FileCheck -check-prefix=M1-BE %s
|
||||
// M1-BE: "-cc1"{{.*}} "-triple" "aarch64_be{{.*}}" "-target-cpu" "exynos-m1"
|
||||
|
||||
// RUN: %clang -target aarch64 -mcpu=cortex-a57 -mtune=cortex-a53 -### -c %s 2>&1 | FileCheck -check-prefix=MCPU-MTUNE %s
|
||||
// RUN: %clang -target aarch64 -mtune=cortex-a53 -mcpu=cortex-a57 -### -c %s 2>&1 | FileCheck -check-prefix=MCPU-MTUNE %s
|
||||
// RUN: %clang -target aarch64 -mcpu=cortex-a72 -mtune=cortex-a53 -### -c %s 2>&1 | FileCheck -check-prefix=MCPU-MTUNE %s
|
||||
|
@ -398,40 +398,48 @@
|
||||
// RUN: %clang -target arm -mcpu=cortex-a53 -### -c %s 2>&1 | FileCheck -check-prefix=CHECK-CPUV8A %s
|
||||
// RUN: %clang -target arm -mcpu=cortex-a57 -### -c %s 2>&1 | FileCheck -check-prefix=CHECK-CPUV8A %s
|
||||
// RUN: %clang -target arm -mcpu=cortex-a72 -### -c %s 2>&1 | FileCheck -check-prefix=CHECK-CPUV8A %s
|
||||
// RUN: %clang -target arm -mcpu=exynos-m1 -### -c %s 2>&1 | FileCheck -check-prefix=CHECK-CPUV8A %s
|
||||
// RUN: %clang -target arm -mcpu=cortex-a35 -mlittle-endian -### -c %s 2>&1 | FileCheck -check-prefix=CHECK-CPUV8A %s
|
||||
// RUN: %clang -target arm -mcpu=cortex-a53 -mlittle-endian -### -c %s 2>&1 | FileCheck -check-prefix=CHECK-CPUV8A %s
|
||||
// RUN: %clang -target arm -mcpu=cortex-a57 -mlittle-endian -### -c %s 2>&1 | FileCheck -check-prefix=CHECK-CPUV8A %s
|
||||
// RUN: %clang -target arm -mcpu=cortex-a72 -mlittle-endian -### -c %s 2>&1 | FileCheck -check-prefix=CHECK-CPUV8A %s
|
||||
// RUN: %clang -target arm -mcpu=exynos-m1 -mlittle-endian -### -c %s 2>&1 | FileCheck -check-prefix=CHECK-CPUV8A %s
|
||||
// CHECK-CPUV8A: "-cc1"{{.*}} "-triple" "armv8-{{.*}}
|
||||
|
||||
// RUN: %clang -target armeb -mcpu=cortex-a35 -### -c %s 2>&1 | FileCheck -check-prefix=CHECK-BE-CPUV8A %s
|
||||
// RUN: %clang -target armeb -mcpu=cortex-a53 -### -c %s 2>&1 | FileCheck -check-prefix=CHECK-BE-CPUV8A %s
|
||||
// RUN: %clang -target armeb -mcpu=cortex-a57 -### -c %s 2>&1 | FileCheck -check-prefix=CHECK-BE-CPUV8A %s
|
||||
// RUN: %clang -target armeb -mcpu=cortex-a72 -### -c %s 2>&1 | FileCheck -check-prefix=CHECK-BE-CPUV8A %s
|
||||
// RUN: %clang -target armeb -mcpu=exynos-m1 -### -c %s 2>&1 | FileCheck -check-prefix=CHECK-BE-CPUV8A %s
|
||||
// RUN: %clang -target arm -mcpu=cortex-a35 -mbig-endian -### -c %s 2>&1 | FileCheck -check-prefix=CHECK-BE-CPUV8A %s
|
||||
// RUN: %clang -target arm -mcpu=cortex-a53 -mbig-endian -### -c %s 2>&1 | FileCheck -check-prefix=CHECK-BE-CPUV8A %s
|
||||
// RUN: %clang -target arm -mcpu=cortex-a57 -mbig-endian -### -c %s 2>&1 | FileCheck -check-prefix=CHECK-BE-CPUV8A %s
|
||||
// RUN: %clang -target arm -mcpu=cortex-a72 -mbig-endian -### -c %s 2>&1 | FileCheck -check-prefix=CHECK-BE-CPUV8A %s
|
||||
// RUN: %clang -target arm -mcpu=exynos-m1 -mbig-endian -### -c %s 2>&1 | FileCheck -check-prefix=CHECK-BE-CPUV8A %s
|
||||
// CHECK-BE-CPUV8A: "-cc1"{{.*}} "-triple" "armebv8-{{.*}}
|
||||
|
||||
// RUN: %clang -target arm -mcpu=cortex-a35 -mthumb -### -c %s 2>&1 | FileCheck -check-prefix=CHECK-CPUV8A-THUMB %s
|
||||
// RUN: %clang -target arm -mcpu=cortex-a53 -mthumb -### -c %s 2>&1 | FileCheck -check-prefix=CHECK-CPUV8A-THUMB %s
|
||||
// RUN: %clang -target arm -mcpu=cortex-a57 -mthumb -### -c %s 2>&1 | FileCheck -check-prefix=CHECK-CPUV8A-THUMB %s
|
||||
// RUN: %clang -target arm -mcpu=cortex-a72 -mthumb -### -c %s 2>&1 | FileCheck -check-prefix=CHECK-CPUV8A-THUMB %s
|
||||
// RUN: %clang -target arm -mcpu=exynos-m1 -mthumb -### -c %s 2>&1 | FileCheck -check-prefix=CHECK-CPUV8A-THUMB %s
|
||||
// RUN: %clang -target arm -mcpu=cortex-a35 -mlittle-endian -mthumb -### -c %s 2>&1 | FileCheck -check-prefix=CHECK-CPUV8A-THUMB %s
|
||||
// RUN: %clang -target arm -mcpu=cortex-a53 -mlittle-endian -mthumb -### -c %s 2>&1 | FileCheck -check-prefix=CHECK-CPUV8A-THUMB %s
|
||||
// RUN: %clang -target arm -mcpu=cortex-a57 -mlittle-endian -mthumb -### -c %s 2>&1 | FileCheck -check-prefix=CHECK-CPUV8A-THUMB %s
|
||||
// RUN: %clang -target arm -mcpu=cortex-a72 -mlittle-endian -mthumb -### -c %s 2>&1 | FileCheck -check-prefix=CHECK-CPUV8A-THUMB %s
|
||||
// RUN: %clang -target arm -mcpu=exynos-m1 -mlittle-endian -mthumb -### -c %s 2>&1 | FileCheck -check-prefix=CHECK-CPUV8A-THUMB %s
|
||||
// CHECK-CPUV8A-THUMB: "-cc1"{{.*}} "-triple" "thumbv8-{{.*}}
|
||||
|
||||
// RUN: %clang -target armeb -mcpu=cortex-a35 -mthumb -### -c %s 2>&1 | FileCheck -check-prefix=CHECK-BE-CPUV8A-THUMB %s
|
||||
// RUN: %clang -target armeb -mcpu=cortex-a53 -mthumb -### -c %s 2>&1 | FileCheck -check-prefix=CHECK-BE-CPUV8A-THUMB %s
|
||||
// RUN: %clang -target armeb -mcpu=cortex-a57 -mthumb -### -c %s 2>&1 | FileCheck -check-prefix=CHECK-BE-CPUV8A-THUMB %s
|
||||
// RUN: %clang -target armeb -mcpu=cortex-a72 -mthumb -### -c %s 2>&1 | FileCheck -check-prefix=CHECK-BE-CPUV8A-THUMB %s
|
||||
// RUN: %clang -target armeb -mcpu=exynos-m1 -mthumb -### -c %s 2>&1 | FileCheck -check-prefix=CHECK-BE-CPUV8A-THUMB %s
|
||||
// RUN: %clang -target arm -mcpu=cortex-a35 -mbig-endian -mthumb -### -c %s 2>&1 | FileCheck -check-prefix=CHECK-BE-CPUV8A-THUMB %s
|
||||
// RUN: %clang -target arm -mcpu=cortex-a53 -mbig-endian -mthumb -### -c %s 2>&1 | FileCheck -check-prefix=CHECK-BE-CPUV8A-THUMB %s
|
||||
// RUN: %clang -target arm -mcpu=cortex-a57 -mbig-endian -mthumb -### -c %s 2>&1 | FileCheck -check-prefix=CHECK-BE-CPUV8A-THUMB %s
|
||||
// RUN: %clang -target arm -mcpu=cortex-a72 -mbig-endian -mthumb -### -c %s 2>&1 | FileCheck -check-prefix=CHECK-BE-CPUV8A-THUMB %s
|
||||
// RUN: %clang -target arm -mcpu=exynos-m1 -mbig-endian -mthumb -### -c %s 2>&1 | FileCheck -check-prefix=CHECK-BE-CPUV8A-THUMB %s
|
||||
// CHECK-BE-CPUV8A-THUMB: "-cc1"{{.*}} "-triple" "thumbebv8-{{.*}}
|
||||
|
||||
// ================== Check whether -mcpu accepts mixed-case values.
|
||||
|
@ -4,6 +4,11 @@
|
||||
// RUN: %clang -target armv7-apple-darwin -mkernel -### %s 2>&1 \
|
||||
// RUN: | FileCheck %s -check-prefix CHECK-KERNEL
|
||||
|
||||
// RUN: %clang -target armv7-none-gnueabi -mno-movt -### %s 2>&1 \
|
||||
// RUN: | FileCheck %s -check-prefix CHECK-NO-MOVT
|
||||
|
||||
// CHECK-DEFAULT-NOT: "-target-feature" "+no-movt"
|
||||
|
||||
// CHECK-KERNEL: "-target-feature" "+no-movt"
|
||||
|
||||
// CHECK-NO-MOVT: "-target-feature" "+no-movt"
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user