Merge llvm-project main llvmorg-14-init-11187-g222442ec2d71

This updates llvm, clang, compiler-rt, libc++, libunwind, lld, lldb and
openmp to llvmorg-14-init-11187-g222442ec2d71.

PR:		261742
MFC after:	2 weeks
This commit is contained in:
Dimitry Andric 2021-12-02 22:49:08 +01:00
commit 4824e7fd18
751 changed files with 23675 additions and 13614 deletions

View File

@ -41,6 +41,7 @@ clang/lib/AST/CMakeLists.txt
clang/lib/ASTMatchers/CMakeLists.txt
clang/lib/ASTMatchers/Dynamic/CMakeLists.txt
clang/lib/Analysis/CMakeLists.txt
clang/lib/Analysis/FlowSensitive/CMakeLists.txt
clang/lib/Analysis/plugins/CMakeLists.txt
clang/lib/Analysis/plugins/CheckerDependencyHandling/CMakeLists.txt
clang/lib/Analysis/plugins/CheckerOptionHandling/CMakeLists.txt
@ -398,6 +399,7 @@ lldb/source/Plugins/Platform/MacOSX/
lldb/source/Plugins/Platform/NetBSD/CMakeLists.txt
lldb/source/Plugins/Platform/OpenBSD/CMakeLists.txt
lldb/source/Plugins/Platform/POSIX/CMakeLists.txt
lldb/source/Plugins/Platform/QemuUser/CMakeLists.txt
lldb/source/Plugins/Platform/Windows/
lldb/source/Plugins/Platform/gdb-server/CMakeLists.txt
lldb/source/Plugins/Process/CMakeLists.txt

View File

@ -1837,7 +1837,8 @@ enum class MultiVersionKind {
None,
Target,
CPUSpecific,
CPUDispatch
CPUDispatch,
TargetClones
};
/// Represents a function declaration or definition.
@ -2456,6 +2457,10 @@ class FunctionDecl : public DeclaratorDecl,
/// the target functionality.
bool isTargetMultiVersion() const;
/// True if this function is a multiversioned dispatch function as a part of
/// the target-clones functionality.
bool isTargetClonesMultiVersion() const;
/// \brief Get the associated-constraints of this function declaration.
/// Currently, this will either be a vector of size 1 containing the
/// trailing-requires-clause or an empty vector.

View File

@ -352,7 +352,7 @@ class alignas(8) Decl {
DeclContext *Parent, std::size_t Extra = 0);
private:
bool AccessDeclContextSanity() const;
bool AccessDeclContextCheck() const;
/// Get the module ownership kind to use for a local lexical child of \p DC,
/// which may be either a local or (rarely) an imported declaration.
@ -472,11 +472,11 @@ class alignas(8) Decl {
void setAccess(AccessSpecifier AS) {
Access = AS;
assert(AccessDeclContextSanity());
assert(AccessDeclContextCheck());
}
AccessSpecifier getAccess() const {
assert(AccessDeclContextSanity());
assert(AccessDeclContextCheck());
return AccessSpecifier(Access);
}

View File

@ -6304,8 +6304,10 @@ class AtomicExpr : public Expr {
bool isCmpXChg() const {
return getOp() == AO__c11_atomic_compare_exchange_strong ||
getOp() == AO__c11_atomic_compare_exchange_weak ||
getOp() == AO__hip_atomic_compare_exchange_strong ||
getOp() == AO__opencl_atomic_compare_exchange_strong ||
getOp() == AO__opencl_atomic_compare_exchange_weak ||
getOp() == AO__hip_atomic_compare_exchange_weak ||
getOp() == AO__atomic_compare_exchange ||
getOp() == AO__atomic_compare_exchange_n;
}
@ -6340,6 +6342,8 @@ class AtomicExpr : public Expr {
auto Kind =
(Op >= AO__opencl_atomic_load && Op <= AO__opencl_atomic_fetch_max)
? AtomicScopeModelKind::OpenCL
: (Op >= AO__hip_atomic_load && Op <= AO__hip_atomic_fetch_max)
? AtomicScopeModelKind::HIP
: AtomicScopeModelKind::None;
return AtomicScopeModel::create(Kind);
}

View File

@ -18,6 +18,7 @@
#include "clang/AST/DeclCXX.h"
#include "clang/AST/DeclObjC.h"
#include "clang/AST/DeclOpenMP.h"
#include "clang/AST/DeclTemplate.h"
#include "clang/Basic/ABI.h"
#include "clang/Basic/LLVM.h"
#include "llvm/ADT/DenseMapInfo.h"
@ -129,8 +130,12 @@ class GlobalDecl {
}
KernelReferenceKind getKernelReferenceKind() const {
assert(isa<FunctionDecl>(getDecl()) &&
cast<FunctionDecl>(getDecl())->hasAttr<CUDAGlobalAttr>() &&
assert(((isa<FunctionDecl>(getDecl()) &&
cast<FunctionDecl>(getDecl())->hasAttr<CUDAGlobalAttr>()) ||
(isa<FunctionTemplateDecl>(getDecl()) &&
cast<FunctionTemplateDecl>(getDecl())
->getTemplatedDecl()
->hasAttr<CUDAGlobalAttr>())) &&
"Decl is not a GPU kernel!");
return static_cast<KernelReferenceKind>(Value.getInt());
}

View File

@ -4227,8 +4227,8 @@ AST_MATCHER(VarDecl, isInitCapture) { return Node.isInitCapture(); }
/// lambdaExpr(forEachLambdaCapture(
/// lambdaCapture(capturesVar(varDecl(hasType(isInteger()))))))
/// will trigger two matches, binding for 'x' and 'y' respectively.
AST_MATCHER_P(LambdaExpr, forEachLambdaCapture, LambdaCaptureMatcher,
InnerMatcher) {
AST_MATCHER_P(LambdaExpr, forEachLambdaCapture,
internal::Matcher<LambdaCapture>, InnerMatcher) {
BoundNodesTreeBuilder Result;
bool Matched = false;
for (const auto &Capture : Node.captures()) {
@ -4655,7 +4655,8 @@ extern const internal::VariadicAllOfMatcher<LambdaCapture> lambdaCapture;
/// lambdaExpr(hasAnyCapture(lambdaCapture())) and
/// lambdaExpr(hasAnyCapture(lambdaCapture(refersToVarDecl(hasName("t")))))
/// both match `[=](){ return t; }`.
AST_MATCHER_P(LambdaExpr, hasAnyCapture, LambdaCaptureMatcher, InnerMatcher) {
AST_MATCHER_P(LambdaExpr, hasAnyCapture, internal::Matcher<LambdaCapture>,
InnerMatcher) {
for (const LambdaCapture &Capture : Node.captures()) {
clang::ast_matchers::internal::BoundNodesTreeBuilder Result(*Builder);
if (InnerMatcher.matches(Capture, Finder, &Result)) {

View File

@ -515,7 +515,7 @@ class CFGTerminator {
/// of the most derived class while we're in the base class.
VirtualBaseBranch,
/// Number of different kinds, for validity checks. We subtract 1 so that
/// Number of different kinds, for assertions. We subtract 1 so that
/// to keep receiving compiler warnings when we don't cover all enum values
/// in a switch.
NumKindsMinusOne = VirtualBaseBranch

View File

@ -0,0 +1,134 @@
//===- DataflowAnalysis.h ---------------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines base types and functions for building dataflow analyses
// that run over Control-Flow Graphs (CFGs).
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_ANALYSIS_FLOWSENSITIVE_DATAFLOWANALYSIS_H
#define LLVM_CLANG_ANALYSIS_FLOWSENSITIVE_DATAFLOWANALYSIS_H
#include <iterator>
#include <utility>
#include <vector>
#include "clang/AST/ASTContext.h"
#include "clang/AST/Stmt.h"
#include "clang/Analysis/CFG.h"
#include "clang/Analysis/FlowSensitive/DataflowEnvironment.h"
#include "clang/Analysis/FlowSensitive/TypeErasedDataflowAnalysis.h"
#include "llvm/ADT/Any.h"
#include "llvm/ADT/Optional.h"
#include "llvm/ADT/STLExtras.h"
namespace clang {
namespace dataflow {
/// Base class template for dataflow analyses built on a single lattice type.
///
/// Requirements:
///
/// `Derived` must be derived from a specialization of this class template and
/// must provide the following public members:
/// * `LatticeT initialElement()` - returns a lattice element that models the
/// initial state of a basic block;
/// * `LatticeT transfer(const Stmt *, const LatticeT &, Environment &)` -
/// applies the analysis transfer function for a given statement and lattice
/// element.
///
/// `LatticeT` is a bounded join-semilattice that is used by `Derived` and must
/// provide the following public members:
/// * `LatticeJoinEffect join(const LatticeT &)` - joins the object and the
/// argument by computing their least upper bound, modifies the object if
/// necessary, and returns an effect indicating whether any changes were
/// made to it;
/// * `bool operator==(const LatticeT &) const` - returns true if and only if
/// the object is equal to the argument.
template <typename Derived, typename LatticeT>
class DataflowAnalysis : public TypeErasedDataflowAnalysis {
public:
/// Bounded join-semilattice that is used in the analysis.
using Lattice = LatticeT;
explicit DataflowAnalysis(ASTContext &Context) : Context(Context) {}
ASTContext &getASTContext() final { return Context; }
TypeErasedLattice typeErasedInitialElement() final {
return {static_cast<Derived *>(this)->initialElement()};
}
LatticeJoinEffect joinTypeErased(TypeErasedLattice &E1,
const TypeErasedLattice &E2) final {
Lattice &L1 = llvm::any_cast<Lattice &>(E1.Value);
const Lattice &L2 = llvm::any_cast<const Lattice &>(E2.Value);
return L1.join(L2);
}
bool isEqualTypeErased(const TypeErasedLattice &E1,
const TypeErasedLattice &E2) final {
const Lattice &L1 = llvm::any_cast<const Lattice &>(E1.Value);
const Lattice &L2 = llvm::any_cast<const Lattice &>(E2.Value);
return L1 == L2;
}
TypeErasedLattice transferTypeErased(const Stmt *Stmt,
const TypeErasedLattice &E,
Environment &Env) final {
const Lattice &L = llvm::any_cast<const Lattice &>(E.Value);
return {static_cast<Derived *>(this)->transfer(Stmt, L, Env)};
}
private:
ASTContext &Context;
};
// Model of the program at a given program point.
template <typename LatticeT> struct DataflowAnalysisState {
// Model of a program property.
LatticeT Lattice;
// Model of the state of the program (store and heap).
Environment Env;
};
/// Performs dataflow analysis and returns a mapping from basic block IDs to
/// dataflow analysis states that model the respective basic blocks. Indices
/// of the returned vector correspond to basic block IDs.
///
/// Requirements:
///
/// `Cfg` must have been built with `CFG::BuildOptions::setAllAlwaysAdd()` to
/// ensure that all sub-expressions in a basic block are evaluated.
template <typename AnalysisT>
std::vector<llvm::Optional<DataflowAnalysisState<typename AnalysisT::Lattice>>>
runDataflowAnalysis(const CFG &Cfg, AnalysisT &Analysis,
const Environment &InitEnv) {
auto TypeErasedBlockStates =
runTypeErasedDataflowAnalysis(Cfg, Analysis, InitEnv);
std::vector<
llvm::Optional<DataflowAnalysisState<typename AnalysisT::Lattice>>>
BlockStates;
BlockStates.reserve(TypeErasedBlockStates.size());
llvm::transform(std::move(TypeErasedBlockStates),
std::back_inserter(BlockStates), [](auto &OptState) {
return std::move(OptState).map([](auto &&State) {
return DataflowAnalysisState<typename AnalysisT::Lattice>{
llvm::any_cast<typename AnalysisT::Lattice>(
std::move(State.Lattice.Value)),
std::move(State.Env)};
});
});
return BlockStates;
}
} // namespace dataflow
} // namespace clang
#endif // LLVM_CLANG_ANALYSIS_FLOWSENSITIVE_DATAFLOWANALYSIS_H

View File

@ -0,0 +1,27 @@
//===-- DataflowEnvironment.h -----------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines an Environment class that is used by dataflow analyses
// that run over Control-Flow Graphs (CFGs) to keep track of the state of the
// program at given program points.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_ANALYSIS_FLOWSENSITIVE_DATAFLOWENVIRONMENT_H
#define LLVM_CLANG_ANALYSIS_FLOWSENSITIVE_DATAFLOWENVIRONMENT_H
namespace clang {
namespace dataflow {
/// Holds the state of the program (store and heap) at a given program point.
class Environment {};
} // namespace dataflow
} // namespace clang
#endif // LLVM_CLANG_ANALYSIS_FLOWSENSITIVE_DATAFLOWENVIRONMENT_H

View File

@ -0,0 +1,29 @@
//===- DataflowLattice.h ----------------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines base types for building lattices to be used in dataflow
// analyses that run over Control-Flow Graphs (CFGs).
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_ANALYSIS_FLOWSENSITIVE_DATAFLOWLATTICE_H
#define LLVM_CLANG_ANALYSIS_FLOWSENSITIVE_DATAFLOWLATTICE_H
namespace clang {
namespace dataflow {
/// Effect indicating whether a lattice join operation resulted in a new value.
enum class LatticeJoinEffect {
Unchanged,
Changed,
};
} // namespace dataflow
} // namespace clang
#endif // LLVM_CLANG_ANALYSIS_FLOWSENSITIVE_DATAFLOWLATTICE_H

View File

@ -0,0 +1,95 @@
//===- TypeErasedDataflowAnalysis.h -----------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines type-erased base types and functions for building dataflow
// analyses that run over Control-Flow Graphs (CFGs).
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_ANALYSIS_FLOWSENSITIVE_TYPEERASEDDATAFLOWANALYSIS_H
#define LLVM_CLANG_ANALYSIS_FLOWSENSITIVE_TYPEERASEDDATAFLOWANALYSIS_H
#include <vector>
#include "clang/AST/ASTContext.h"
#include "clang/AST/Stmt.h"
#include "clang/Analysis/CFG.h"
#include "clang/Analysis/FlowSensitive/DataflowEnvironment.h"
#include "clang/Analysis/FlowSensitive/DataflowLattice.h"
#include "llvm/ADT/Any.h"
#include "llvm/ADT/Optional.h"
namespace clang {
namespace dataflow {
/// Type-erased lattice element container.
///
/// Requirements:
///
/// The type of the object stored in the container must be a bounded
/// join-semilattice.
struct TypeErasedLattice {
llvm::Any Value;
};
/// Type-erased base class for dataflow analyses built on a single lattice type.
class TypeErasedDataflowAnalysis {
public:
virtual ~TypeErasedDataflowAnalysis() {}
/// Returns the `ASTContext` that is used by the analysis.
virtual ASTContext &getASTContext() = 0;
/// Returns a type-erased lattice element that models the initial state of a
/// basic block.
virtual TypeErasedLattice typeErasedInitialElement() = 0;
/// Joins two type-erased lattice elements by computing their least upper
/// bound. Places the join result in the left element and returns an effect
/// indicating whether any changes were made to it.
virtual LatticeJoinEffect joinTypeErased(TypeErasedLattice &,
const TypeErasedLattice &) = 0;
/// Returns true if and only if the two given type-erased lattice elements are
/// equal.
virtual bool isEqualTypeErased(const TypeErasedLattice &,
const TypeErasedLattice &) = 0;
/// Applies the analysis transfer function for a given statement and
/// type-erased lattice element.
virtual TypeErasedLattice transferTypeErased(const Stmt *,
const TypeErasedLattice &,
Environment &) = 0;
};
/// Type-erased model of the program at a given program point.
struct TypeErasedDataflowAnalysisState {
/// Type-erased model of a program property.
TypeErasedLattice Lattice;
/// Model of the state of the program (store and heap).
Environment Env;
};
/// Performs dataflow analysis and returns a mapping from basic block IDs to
/// dataflow analysis states that model the respective basic blocks. Indices
/// of the returned vector correspond to basic block IDs.
///
/// Requirements:
///
/// `Cfg` must have been built with `CFG::BuildOptions::setAllAlwaysAdd()` to
/// ensure that all sub-expressions in a basic block are evaluated.
std::vector<llvm::Optional<TypeErasedDataflowAnalysisState>>
runTypeErasedDataflowAnalysis(const CFG &Cfg,
TypeErasedDataflowAnalysis &Analysis,
const Environment &InitEnv);
} // namespace dataflow
} // namespace clang
#endif // LLVM_CLANG_ANALYSIS_FLOWSENSITIVE_TYPEERASEDDATAFLOWANALYSIS_H

View File

@ -2677,6 +2677,40 @@ def Target : InheritableAttr {
}];
}
def TargetClones : InheritableAttr {
let Spellings = [GCC<"target_clones">];
let Args = [VariadicStringArgument<"featuresStrs">];
let Documentation = [TargetClonesDocs];
let Subjects = SubjectList<[Function], ErrorDiag>;
let AdditionalMembers = [{
StringRef getFeatureStr(unsigned Index) const {
return *(featuresStrs_begin() + Index);
}
// 'default' is always moved to the end, so it isn't considered
// when mangling the index.
unsigned getMangledIndex(unsigned Index) const {
if (getFeatureStr(Index) == "default")
return std::count_if(featuresStrs_begin(), featuresStrs_end(),
[](StringRef S) { return S != "default"; });
return std::count_if(featuresStrs_begin(), featuresStrs_begin() + Index,
[](StringRef S) { return S != "default"; });
}
// True if this is the first of this version to appear in the config string.
// This is used to make sure we don't try to emit this function multiple
// times.
bool isFirstOfVersion(unsigned Index) const {
StringRef FeatureStr(getFeatureStr(Index));
return 0 == std::count_if(
featuresStrs_begin(), featuresStrs_begin() + Index,
[FeatureStr](StringRef S) { return S == FeatureStr; });
}
}];
}
def : MutualExclusions<[TargetClones, Target, CPUDispatch, CPUSpecific]>;
def MinVectorWidth : InheritableAttr {
let Spellings = [Clang<"min_vector_width">];
let Args = [UnsignedArgument<"VectorWidth">];

View File

@ -2233,6 +2233,40 @@ Additionally, a function may not become multiversioned after its first use.
}];
}
def TargetClonesDocs : Documentation {
let Category = DocCatFunction;
let Content = [{
Clang supports the ``target_clones("OPTIONS")`` attribute. This attribute may be
attached to a function declaration and causes function multiversioning, where
multiple versions of the function will be emitted with different code
generation options. Additionally, these versions will be resolved at runtime
based on the priority of their attribute options. All ``target_clone`` functions
are considered multiversioned functions.
All multiversioned functions must contain a ``default`` (fallback)
implementation, otherwise usages of the function are considered invalid.
Additionally, a function may not become multiversioned after its first use.
The options to ``target_clones`` can either be a target-specific architecture
(specified as ``arch=CPU``), or one of a list of subtarget features.
Example "subtarget features" from the x86 backend include: "mmx", "sse", "sse4.2",
"avx", "xop" and largely correspond to the machine specific options handled by
the front end.
The versions can either be listed as a comma-separated sequence of string
literals or as a single string literal containing a comma-separated list of
versions. For compatibility with GCC, the two formats can be mixed. For
example, the following will emit 4 versions of the function:
.. code-block:: c++
__attribute__((target_clones("arch=atom,avx2","arch=ivybridge","default")))
void foo() {}
}];
}
def MinVectorWidthDocs : Documentation {
let Category = DocCatFunction;
let Content = [{

View File

@ -854,6 +854,19 @@ ATOMIC_BUILTIN(__opencl_atomic_fetch_max, "v.", "t")
ATOMIC_BUILTIN(__atomic_fetch_min, "v.", "t")
ATOMIC_BUILTIN(__atomic_fetch_max, "v.", "t")
// HIP atomic builtins.
ATOMIC_BUILTIN(__hip_atomic_load, "v.", "t")
ATOMIC_BUILTIN(__hip_atomic_store, "v.", "t")
ATOMIC_BUILTIN(__hip_atomic_compare_exchange_weak, "v.", "t")
ATOMIC_BUILTIN(__hip_atomic_compare_exchange_strong, "v.", "t")
ATOMIC_BUILTIN(__hip_atomic_exchange, "v.", "t")
ATOMIC_BUILTIN(__hip_atomic_fetch_add, "v.", "t")
ATOMIC_BUILTIN(__hip_atomic_fetch_and, "v.", "t")
ATOMIC_BUILTIN(__hip_atomic_fetch_or, "v.", "t")
ATOMIC_BUILTIN(__hip_atomic_fetch_xor, "v.", "t")
ATOMIC_BUILTIN(__hip_atomic_fetch_min, "v.", "t")
ATOMIC_BUILTIN(__hip_atomic_fetch_max, "v.", "t")
#undef ATOMIC_BUILTIN
// Non-overloaded atomic builtins.

View File

@ -404,6 +404,7 @@ BUILTIN(__builtin_altivec_vbpermd, "V2ULLiV2ULLiV16Uc", "")
// P8 Crypto built-ins.
BUILTIN(__builtin_altivec_crypto_vsbox, "V2ULLiV2ULLi", "")
BUILTIN(__builtin_altivec_crypto_vpermxor, "V16UcV16UcV16UcV16Uc", "")
BUILTIN(__builtin_altivec_crypto_vpermxor_be, "V16UcV16UcV16UcV16Uc", "")
BUILTIN(__builtin_altivec_crypto_vshasigmaw, "V4UiV4UiIiIi", "")
BUILTIN(__builtin_altivec_crypto_vshasigmad, "V2ULLiV2ULLiIiIi", "")
BUILTIN(__builtin_altivec_crypto_vcipher, "V2ULLiV2ULLiV2ULLi", "")
@ -424,6 +425,12 @@ BUILTIN(__builtin_altivec_vctzh, "V8UsV8Us", "")
BUILTIN(__builtin_altivec_vctzw, "V4UiV4Ui", "")
BUILTIN(__builtin_altivec_vctzd, "V2ULLiV2ULLi", "")
// P8 BCD builtins.
BUILTIN(__builtin_ppc_bcdadd, "V16UcV16UcV16UcIi", "")
BUILTIN(__builtin_ppc_bcdsub, "V16UcV16UcV16UcIi", "")
BUILTIN(__builtin_ppc_bcdadd_p, "iiV16UcV16Uc", "")
BUILTIN(__builtin_ppc_bcdsub_p, "iiV16UcV16Uc", "")
BUILTIN(__builtin_altivec_vclzlsbb, "SiV16Uc", "")
BUILTIN(__builtin_altivec_vctzlsbb, "SiV16Uc", "")
BUILTIN(__builtin_altivec_vprtybw, "V4UiV4Ui", "")

View File

@ -189,6 +189,12 @@ def err_drv_invalid_mtp : Error<
"invalid thread pointer reading mode '%0'">;
def err_drv_missing_arg_mtp : Error<
"missing argument to '%0'">;
def warn_drv_missing_plugin_name : Warning<
"missing plugin name in %0">,
InGroup<InvalidCommandLineArgument>;
def warn_drv_missing_plugin_arg : Warning<
"missing plugin argument for plugin %0 in %1">,
InGroup<InvalidCommandLineArgument>;
def err_drv_invalid_libcxx_deployment : Error<
"invalid deployment target for -stdlib=libc++ (requires %0 or later)">;
def err_drv_invalid_argument_to_option : Error<
@ -394,6 +400,8 @@ def warn_ignoring_verify_debuginfo_preserve_export : Warning<
InGroup<UnusedCommandLineArgument>;
def err_invalid_branch_protection: Error <
"invalid branch protection option '%0' in '%1'">;
def warn_unsupported_branch_protection: Warning <
"invalid branch protection option '%0' in '%1'">, InGroup<BranchProtection>;
def err_invalid_sls_hardening : Error<
"invalid sls hardening option '%0' in '%1'">;
def err_sls_hardening_arm_not_supported : Error<

View File

@ -56,7 +56,9 @@ def CoroutineMissingUnhandledException :
DiagGroup<"coroutine-missing-unhandled-exception">;
def DeprecatedExperimentalCoroutine :
DiagGroup<"deprecated-experimental-coroutine">;
def Coroutine : DiagGroup<"coroutine", [CoroutineMissingUnhandledException, DeprecatedExperimentalCoroutine]>;
def DeprecatedCoroutine :
DiagGroup<"deprecated-coroutine", [DeprecatedExperimentalCoroutine]>;
def Coroutine : DiagGroup<"coroutine", [CoroutineMissingUnhandledException, DeprecatedCoroutine]>;
def ObjCBoolConstantConversion : DiagGroup<"objc-bool-constant-conversion">;
def ConstantConversion : DiagGroup<"constant-conversion",
[BitFieldConstantConversion,
@ -1273,9 +1275,14 @@ def : DiagGroup<"spirv-compat", [SpirCompat]>; // Alias.
// Warning for the GlobalISel options.
def GlobalISel : DiagGroup<"global-isel">;
// A warning group for the GNU extension to allow mixed specifier types for
// target-clones multiversioning.
def TargetClonesMixedSpecifiers : DiagGroup<"target-clones-mixed-specifiers">;
// A warning group specifically for warnings related to function
// multiversioning.
def FunctionMultiVersioning : DiagGroup<"function-multiversion">;
def FunctionMultiVersioning
: DiagGroup<"function-multiversion", [TargetClonesMixedSpecifiers]>;
def NoDeref : DiagGroup<"noderef">;
@ -1331,3 +1338,6 @@ def PedanticMacros : DiagGroup<"pedantic-macros",
BuiltinMacroRedefined,
RestrictExpansionMacro,
FinalMacro]>;
def BranchProtection : DiagGroup<"branch-protection">;

View File

@ -30,7 +30,7 @@ namespace clang {
// Size of each of the diagnostic categories.
enum {
DIAG_SIZE_COMMON = 300,
DIAG_SIZE_DRIVER = 250,
DIAG_SIZE_DRIVER = 300,
DIAG_SIZE_FRONTEND = 150,
DIAG_SIZE_SERIALIZATION = 120,
DIAG_SIZE_LEX = 400,

View File

@ -1548,6 +1548,9 @@ def note_meant_to_use_typename : Note<
let CategoryName = "Coroutines Issue" in {
def err_for_co_await_not_range_for : Error<
"'co_await' modifier can only be applied to range-based for loop">;
def warn_deprecated_for_co_await : Warning<
"'for co_await' belongs to CoroutineTS instead of C++20, which is deprecated">,
InGroup<DeprecatedCoroutine>;
}
let CategoryName = "Concepts Issue" in {

View File

@ -2979,9 +2979,13 @@ def err_attribute_requires_opencl_version : Error<
"attribute %0 is supported in the OpenCL version %1%select{| onwards}2">;
def err_invalid_branch_protection_spec : Error<
"invalid or misplaced branch protection specification '%0'">;
def warn_unsupported_branch_protection_spec : Warning<
"unsupported branch protection specification '%0'">, InGroup<BranchProtection>;
def warn_unsupported_target_attribute
: Warning<"%select{unsupported|duplicate|unknown}0%select{| architecture|"
" tune CPU}1 '%2' in the 'target' attribute string; 'target' "
" tune CPU}1 '%2' in the '%select{target|target_clones}3' "
"attribute string; '%select{target|target_clones}3' "
"attribute ignored">,
InGroup<IgnoredAttributes>;
def err_attribute_unsupported
@ -9864,6 +9868,8 @@ def warn_duplicate_attribute_exact : Warning<
def warn_duplicate_attribute : Warning<
"attribute %0 is already applied with different arguments">,
InGroup<IgnoredAttributes>;
def err_disallowed_duplicate_attribute : Error<
"attribute %0 cannot appear more than once on a declaration">;
def warn_sync_fetch_and_nand_semantics_change : Warning<
"the semantics of this intrinsic changed with GCC "
@ -11254,9 +11260,11 @@ def err_multiversion_duplicate : Error<
"multiversioned function redeclarations require identical target attributes">;
def err_multiversion_noproto : Error<
"multiversioned function must have a prototype">;
def err_multiversion_disallowed_other_attr : Error<
"attribute '%select{target|cpu_specific|cpu_dispatch}0' multiversioning cannot be combined"
" with attribute %1">;
def err_multiversion_disallowed_other_attr
: Error<"attribute "
"'%select{|target|cpu_specific|cpu_dispatch|target_clones}0' "
"multiversioning cannot be combined"
" with attribute %1">;
def err_multiversion_mismatched_attrs
: Error<"attributes on multiversioned functions must all match, attribute "
"%0 %select{is missing|has different arguments}1">;
@ -11264,11 +11272,14 @@ def err_multiversion_diff : Error<
"multiversioned function declaration has a different %select{calling convention"
"|return type|constexpr specification|inline specification|linkage|"
"language linkage}0">;
def err_multiversion_doesnt_support : Error<
"attribute '%select{target|cpu_specific|cpu_dispatch}0' multiversioned functions do not "
"yet support %select{function templates|virtual functions|"
"deduced return types|constructors|destructors|deleted functions|"
"defaulted functions|constexpr functions|consteval function}1">;
def err_multiversion_doesnt_support
: Error<"attribute "
"'%select{|target|cpu_specific|cpu_dispatch|target_clones}0' "
"multiversioned functions do not "
"yet support %select{function templates|virtual functions|"
"deduced return types|constructors|destructors|deleted functions|"
"defaulted functions|constexpr functions|consteval "
"function|lambdas}1">;
def err_multiversion_not_allowed_on_main : Error<
"'main' cannot be a multiversioned function">;
def err_multiversion_not_supported : Error<
@ -11285,6 +11296,19 @@ def warn_multiversion_duplicate_entries : Warning<
def warn_dispatch_body_ignored : Warning<
"body of cpu_dispatch function will be ignored">,
InGroup<FunctionMultiVersioning>;
def err_target_clone_must_have_default
: Error<"'target_clones' multiversioning requires a default target">;
def err_target_clone_doesnt_match
: Error<"'target_clones' attribute does not match previous declaration">;
def warn_target_clone_mixed_values
: ExtWarn<
"mixing 'target_clones' specifier mechanisms is permitted for GCC "
"compatibility; use a comma separated sequence of string literals, "
"or a string literal containing a comma-separated list of versions">,
InGroup<TargetClonesMixedSpecifiers>;
def warn_target_clone_duplicate_options
: Warning<"version list contains duplicate entries">,
InGroup<FunctionMultiVersioning>;
// three-way comparison operator diagnostics
def err_implied_comparison_category_type_not_found : Error<

View File

@ -40,6 +40,11 @@ namespace clang {
/// Update getAsString.
///
enum class SyncScope {
HIPSingleThread,
HIPWavefront,
HIPWorkgroup,
HIPAgent,
HIPSystem,
OpenCLWorkGroup,
OpenCLDevice,
OpenCLAllSVMDevices,
@ -49,6 +54,16 @@ enum class SyncScope {
inline llvm::StringRef getAsString(SyncScope S) {
switch (S) {
case SyncScope::HIPSingleThread:
return "hip_singlethread";
case SyncScope::HIPWavefront:
return "hip_wavefront";
case SyncScope::HIPWorkgroup:
return "hip_workgroup";
case SyncScope::HIPAgent:
return "hip_agent";
case SyncScope::HIPSystem:
return "hip_system";
case SyncScope::OpenCLWorkGroup:
return "opencl_workgroup";
case SyncScope::OpenCLDevice:
@ -62,7 +77,7 @@ inline llvm::StringRef getAsString(SyncScope S) {
}
/// Defines the kind of atomic scope models.
enum class AtomicScopeModelKind { None, OpenCL };
enum class AtomicScopeModelKind { None, OpenCL, HIP };
/// Defines the interface for synch scope model.
class AtomicScopeModel {
@ -138,6 +153,58 @@ class AtomicScopeOpenCLModel : public AtomicScopeModel {
}
};
/// Defines the synch scope model for HIP.
class AtomicScopeHIPModel : public AtomicScopeModel {
public:
/// The enum values match the pre-defined macros
/// __HIP_MEMORY_SCOPE_*, which are used to define memory_scope_*
/// enums in hip-c.h.
enum ID {
SingleThread = 1,
Wavefront = 2,
Workgroup = 3,
Agent = 4,
System = 5,
Last = System
};
AtomicScopeHIPModel() {}
SyncScope map(unsigned S) const override {
switch (static_cast<ID>(S)) {
case SingleThread:
return SyncScope::HIPSingleThread;
case Wavefront:
return SyncScope::HIPWavefront;
case Workgroup:
return SyncScope::HIPWorkgroup;
case Agent:
return SyncScope::HIPAgent;
case System:
return SyncScope::HIPSystem;
}
llvm_unreachable("Invalid language synch scope value");
}
bool isValid(unsigned S) const override {
return S >= static_cast<unsigned>(SingleThread) &&
S <= static_cast<unsigned>(Last);
}
ArrayRef<unsigned> getRuntimeValues() const override {
static_assert(Last == System, "Does not include all synch scopes");
static const unsigned Scopes[] = {
static_cast<unsigned>(SingleThread), static_cast<unsigned>(Wavefront),
static_cast<unsigned>(Workgroup), static_cast<unsigned>(Agent),
static_cast<unsigned>(System)};
return llvm::makeArrayRef(Scopes);
}
unsigned getFallBackValue() const override {
return static_cast<unsigned>(System);
}
};
inline std::unique_ptr<AtomicScopeModel>
AtomicScopeModel::create(AtomicScopeModelKind K) {
switch (K) {
@ -145,9 +212,11 @@ AtomicScopeModel::create(AtomicScopeModelKind K) {
return std::unique_ptr<AtomicScopeModel>{};
case AtomicScopeModelKind::OpenCL:
return std::make_unique<AtomicScopeOpenCLModel>();
case AtomicScopeModelKind::HIP:
return std::make_unique<AtomicScopeHIPModel>();
}
llvm_unreachable("Invalid atomic scope model kind");
}
}
} // namespace clang
#endif

View File

@ -2504,6 +2504,9 @@ defm rwpi : BoolFOption<"rwpi",
NegFlag<SetFalse>>;
def fplugin_EQ : Joined<["-"], "fplugin=">, Group<f_Group>, Flags<[NoXarchOption]>, MetaVarName<"<dsopath>">,
HelpText<"Load the named plugin (dynamic shared object)">;
def fplugin_arg : Joined<["-"], "fplugin-arg-">,
MetaVarName<"<name>-<arg>">,
HelpText<"Pass <arg> to plugin <name>">;
def fpass_plugin_EQ : Joined<["-"], "fpass-plugin=">,
Group<f_Group>, Flags<[CC1Option]>, MetaVarName<"<dsopath>">,
HelpText<"Load pass plugin from a dynamic shared object file (only with new pass manager).">,
@ -2786,10 +2789,11 @@ def fvisibility_ms_compat : Flag<["-"], "fvisibility-ms-compat">, Group<f_Group>
def fvisibility_global_new_delete_hidden : Flag<["-"], "fvisibility-global-new-delete-hidden">, Group<f_Group>,
HelpText<"Give global C++ operator new and delete declarations hidden visibility">, Flags<[CC1Option]>,
MarshallingInfoFlag<LangOpts<"GlobalAllocationFunctionVisibilityHidden">>;
def fnew_infallible : Flag<["-"], "fnew-infallible">, Group<f_Group>,
HelpText<"Treats throwing global C++ operator new as always returning valid memory "
"(annotates with __attribute__((returns_nonnull)) and throw()). This is detectable in source.">,
Flags<[CC1Option]>, MarshallingInfoFlag<LangOpts<"NewInfallible">>;
defm new_infallible : BoolFOption<"new-infallible",
LangOpts<"NewInfallible">, DefaultFalse,
PosFlag<SetTrue, [], "Enable">, NegFlag<SetFalse, [], "Disable">,
BothFlags<[CC1Option], " treating throwing global C++ operator new as always returning valid memory "
"(annotates with __attribute__((returns_nonnull)) and throw()). This is detectable in source.">>;
defm whole_program_vtables : BoolFOption<"whole-program-vtables",
CodeGenOpts<"WholeProgramVTables">, DefaultFalse,
PosFlag<SetTrue, [CC1Option], "Enables whole-program vtable optimization. Requires -flto">,
@ -4519,7 +4523,7 @@ def frecord_marker_EQ : Joined<["-"], "frecord-marker=">, Group<gfortran_Group>;
defm aggressive_function_elimination : BooleanFFlag<"aggressive-function-elimination">, Group<gfortran_Group>;
defm align_commons : BooleanFFlag<"align-commons">, Group<gfortran_Group>;
defm all_intrinsics : BooleanFFlag<"all-intrinsics">, Group<gfortran_Group>;
defm automatic : BooleanFFlag<"automatic">, Group<gfortran_Group>;
def fautomatic : Flag<["-"], "fautomatic">; // -fno-automatic is significant
defm backtrace : BooleanFFlag<"backtrace">, Group<gfortran_Group>;
defm bounds_check : BooleanFFlag<"bounds-check">, Group<gfortran_Group>;
defm check_array_temporaries : BooleanFFlag<"check-array-temporaries">, Group<gfortran_Group>;
@ -4616,6 +4620,9 @@ defm backslash : OptInFC1FFlag<"backslash", "Specify that backslash in string in
defm xor_operator : OptInFC1FFlag<"xor-operator", "Enable .XOR. as a synonym of .NEQV.">;
defm logical_abbreviations : OptInFC1FFlag<"logical-abbreviations", "Enable logical abbreviations">;
defm implicit_none : OptInFC1FFlag<"implicit-none", "No implicit typing allowed unless overridden by IMPLICIT statements">;
def fno_automatic : Flag<["-"], "fno-automatic">, Group<f_Group>,
HelpText<"Implies the SAVE attribute for non-automatic local objects in subprograms unless RECURSIVE">;
}
def J : JoinedOrSeparate<["-"], "J">,
@ -5059,9 +5066,10 @@ def msmall_data_limit : Separate<["-"], "msmall-data-limit">,
def funwind_tables_EQ : Joined<["-"], "funwind-tables=">,
HelpText<"Generate unwinding tables for all functions">,
MarshallingInfoInt<CodeGenOpts<"UnwindTables">>;
def mconstructor_aliases : Flag<["-"], "mconstructor-aliases">,
HelpText<"Emit complete constructors and destructors as aliases when possible">,
MarshallingInfoFlag<CodeGenOpts<"CXXCtorDtorAliases">>;
defm constructor_aliases : BoolOption<"m", "constructor-aliases",
CodeGenOpts<"CXXCtorDtorAliases">, DefaultFalse,
PosFlag<SetTrue, [], "Enable">, NegFlag<SetFalse, [], "Disable">,
BothFlags<[CC1Option], " emitting complete constructors and destructors as aliases when possible">>;
def mlink_bitcode_file : Separate<["-"], "mlink-bitcode-file">,
HelpText<"Link the given bitcode file before performing optimizations.">;
def mlink_builtin_bitcode : Separate<["-"], "mlink-builtin-bitcode">,
@ -5174,10 +5182,6 @@ defm debug_pass_manager : BoolOption<"f", "debug-pass-manager",
CodeGenOpts<"DebugPassManager">, DefaultFalse,
PosFlag<SetTrue, [], "Prints debug information for the new pass manager">,
NegFlag<SetFalse, [], "Disables debug printing for the new pass manager">>;
def fexperimental_debug_variable_locations : Flag<["-"],
"fexperimental-debug-variable-locations">,
HelpText<"Use experimental new value-tracking variable locations">,
MarshallingInfoFlag<CodeGenOpts<"ValueTrackingVariableLocations">>;
def fverify_debuginfo_preserve
: Flag<["-"], "fverify-debuginfo-preserve">,
HelpText<"Enable Debug Info Metadata preservation testing in "

View File

@ -274,7 +274,7 @@ class PreambleCallbacks {
public:
virtual ~PreambleCallbacks() = default;
/// Called before FrontendAction::BeginSourceFile.
/// Called before FrontendAction::Execute.
/// Can be used to store references to various CompilerInstance fields
/// (e.g. SourceManager) that may be interesting to the consumers of other
/// callbacks.
@ -291,7 +291,7 @@ class PreambleCallbacks {
/// used instead, but having only this method allows a simpler API.
virtual void HandleTopLevelDecl(DeclGroupRef DG);
/// Creates wrapper class for PPCallbacks so we can also process information
/// about includes that are inside of a preamble
/// about includes that are inside of a preamble. Called after BeforeExecute.
virtual std::unique_ptr<PPCallbacks> createPPCallbacks();
/// The returned CommentHandler will be added to the preprocessor if not null.
virtual CommentHandler *getCommentHandler();

View File

@ -1296,6 +1296,11 @@ class Sema final {
EK_Decltype, EK_TemplateArgument, EK_Other
} ExprContext;
// A context can be nested in both a discarded statement context and
// an immediate function context, so they need to be tracked independently.
bool InDiscardedStatement;
bool InImmediateFunctionContext;
ExpressionEvaluationContextRecord(ExpressionEvaluationContext Context,
unsigned NumCleanupObjects,
CleanupInfo ParentCleanup,
@ -1303,7 +1308,8 @@ class Sema final {
ExpressionKind ExprContext)
: Context(Context), ParentCleanup(ParentCleanup),
NumCleanupObjects(NumCleanupObjects), NumTypos(0),
ManglingContextDecl(ManglingContextDecl), ExprContext(ExprContext) {}
ManglingContextDecl(ManglingContextDecl), ExprContext(ExprContext),
InDiscardedStatement(false), InImmediateFunctionContext(false) {}
bool isUnevaluated() const {
return Context == ExpressionEvaluationContext::Unevaluated ||
@ -1317,7 +1323,13 @@ class Sema final {
}
bool isImmediateFunctionContext() const {
return Context == ExpressionEvaluationContext::ImmediateFunctionContext;
return Context == ExpressionEvaluationContext::ImmediateFunctionContext ||
InImmediateFunctionContext;
}
bool isDiscardedStatementContext() const {
return Context == ExpressionEvaluationContext::DiscardedStatement ||
InDiscardedStatement;
}
};
@ -4351,6 +4363,10 @@ class Sema final {
llvm::Error isValidSectionSpecifier(StringRef Str);
bool checkSectionName(SourceLocation LiteralLoc, StringRef Str);
bool checkTargetAttr(SourceLocation LiteralLoc, StringRef Str);
bool checkTargetClonesAttrString(SourceLocation LiteralLoc, StringRef Str,
const StringLiteral *Literal,
bool &HasDefault, bool &HasCommas,
SmallVectorImpl<StringRef> &Strings);
bool checkMSInheritanceAttrOnDefinition(
CXXRecordDecl *RD, SourceRange Range, bool BestCase,
MSInheritanceModel SemanticSpelling);
@ -9150,14 +9166,7 @@ class Sema final {
bool isImmediateFunctionContext() const {
assert(!ExprEvalContexts.empty() &&
"Must be in an expression evaluation context");
for (const ExpressionEvaluationContextRecord &context :
llvm::reverse(ExprEvalContexts)) {
if (context.isImmediateFunctionContext())
return true;
if (context.isUnevaluated())
return false;
}
return false;
return ExprEvalContexts.back().isImmediateFunctionContext();
}
/// RAII class used to determine whether SFINAE has

View File

@ -195,15 +195,23 @@ class DependencyScanningWorkerFilesystem : public llvm::vfs::ProxyFileSystem {
llvm::ErrorOr<std::unique_ptr<llvm::vfs::File>>
openFileForRead(const Twine &Path) override;
void clearIgnoredFiles() { IgnoredFiles.clear(); }
void ignoreFile(StringRef Filename);
/// Disable minimization of the given file.
void disableMinimization(StringRef Filename);
/// Enable minimization of all files.
void enableMinimizationOfAllFiles() { NotToBeMinimized.clear(); }
private:
bool shouldIgnoreFile(StringRef Filename);
/// Check whether the file should be minimized.
bool shouldMinimize(StringRef Filename);
llvm::ErrorOr<const CachedFileSystemEntry *>
getOrCreateFileSystemEntry(const StringRef Filename);
/// Create a cached file system entry based on the initial status result.
CachedFileSystemEntry
createFileSystemEntry(llvm::ErrorOr<llvm::vfs::Status> &&MaybeStatus,
StringRef Filename, bool ShouldMinimize);
/// The global cache shared between worker threads.
DependencyScanningFilesystemSharedCache &SharedCache;
/// The local cache is used by the worker thread to cache file system queries
@ -214,7 +222,7 @@ class DependencyScanningWorkerFilesystem : public llvm::vfs::ProxyFileSystem {
/// currently active preprocessor.
ExcludedPreprocessorDirectiveSkipMapping *PPSkipMappings;
/// The set of files that should not be minimized.
llvm::StringSet<> IgnoredFiles;
llvm::StringSet<> NotToBeMinimized;
};
} // end namespace dependencies

View File

@ -11798,6 +11798,15 @@ void ASTContext::getFunctionFeatureMap(llvm::StringMap<bool> &FeatureMap,
Target->getTargetOpts().FeaturesAsWritten.begin(),
Target->getTargetOpts().FeaturesAsWritten.end());
Target->initFeatureMap(FeatureMap, getDiagnostics(), TargetCPU, Features);
} else if (const auto *TC = FD->getAttr<TargetClonesAttr>()) {
std::vector<std::string> Features;
StringRef VersionStr = TC->getFeatureStr(GD.getMultiVersionIndex());
if (VersionStr.startswith("arch="))
TargetCPU = VersionStr.drop_front(sizeof("arch=") - 1);
else if (VersionStr != "default")
Features.push_back((StringRef{"+"} + VersionStr).str());
Target->initFeatureMap(FeatureMap, getDiagnostics(), TargetCPU, Features);
} else {
FeatureMap = Target->getTargetOpts().FeatureMap;
}

View File

@ -1347,6 +1347,42 @@ IsStructurallyEquivalentLambdas(StructuralEquivalenceContext &Context,
return true;
}
/// Determine if context of a class is equivalent.
static bool IsRecordContextStructurallyEquivalent(RecordDecl *D1,
RecordDecl *D2) {
// The context should be completely equal, including anonymous and inline
// namespaces.
// We compare objects as part of full translation units, not subtrees of
// translation units.
DeclContext *DC1 = D1->getDeclContext()->getNonTransparentContext();
DeclContext *DC2 = D2->getDeclContext()->getNonTransparentContext();
while (true) {
// Special case: We allow a struct defined in a function to be equivalent
// with a similar struct defined outside of a function.
if ((DC1->isFunctionOrMethod() && DC2->isTranslationUnit()) ||
(DC2->isFunctionOrMethod() && DC1->isTranslationUnit()))
return true;
if (DC1->getDeclKind() != DC2->getDeclKind())
return false;
if (DC1->isTranslationUnit())
break;
if (DC1->isInlineNamespace() != DC2->isInlineNamespace())
return false;
if (const auto *ND1 = dyn_cast<NamedDecl>(DC1)) {
const auto *ND2 = cast<NamedDecl>(DC2);
if (!DC1->isInlineNamespace() &&
!IsStructurallyEquivalent(ND1->getIdentifier(), ND2->getIdentifier()))
return false;
}
DC1 = DC1->getParent()->getNonTransparentContext();
DC2 = DC2->getParent()->getNonTransparentContext();
}
return true;
}
/// Determine structural equivalence of two records.
static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
RecordDecl *D1, RecordDecl *D2) {
@ -1386,6 +1422,12 @@ static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
}
}
// If the records occur in different context (namespace), these should be
// different. This is specially important if the definition of one or both
// records is missing.
if (!IsRecordContextStructurallyEquivalent(D1, D2))
return false;
// If both declarations are class template specializations, we know
// the ODR applies, so check the template and template arguments.
const auto *Spec1 = dyn_cast<ClassTemplateSpecializationDecl>(D1);

View File

@ -3256,6 +3256,8 @@ MultiVersionKind FunctionDecl::getMultiVersionKind() const {
return MultiVersionKind::CPUDispatch;
if (hasAttr<CPUSpecificAttr>())
return MultiVersionKind::CPUSpecific;
if (hasAttr<TargetClonesAttr>())
return MultiVersionKind::TargetClones;
return MultiVersionKind::None;
}
@ -3271,6 +3273,10 @@ bool FunctionDecl::isTargetMultiVersion() const {
return isMultiVersion() && hasAttr<TargetAttr>();
}
bool FunctionDecl::isTargetClonesMultiVersion() const {
return isMultiVersion() && hasAttr<TargetClonesAttr>();
}
void
FunctionDecl::setPreviousDeclaration(FunctionDecl *PrevDecl) {
redeclarable_base::setPreviousDecl(PrevDecl);

View File

@ -964,7 +964,7 @@ SourceLocation Decl::getBodyRBrace() const {
return {};
}
bool Decl::AccessDeclContextSanity() const {
bool Decl::AccessDeclContextCheck() const {
#ifndef NDEBUG
// Suppress this check if any of the following hold:
// 1. this is the translation unit (and thus has no parent)
@ -1212,7 +1212,7 @@ bool DeclContext::Encloses(const DeclContext *DC) const {
return getPrimaryContext()->Encloses(DC);
for (; DC; DC = DC->getParent())
if (DC->getPrimaryContext() == this)
if (!isa<LinkageSpecDecl>(DC) && DC->getPrimaryContext() == this)
return true;
return false;
}

View File

@ -4681,6 +4681,7 @@ unsigned AtomicExpr::getNumSubExprs(AtomicOp Op) {
return 2;
case AO__opencl_atomic_load:
case AO__hip_atomic_load:
case AO__c11_atomic_store:
case AO__c11_atomic_exchange:
case AO__atomic_load:
@ -4713,7 +4714,15 @@ unsigned AtomicExpr::getNumSubExprs(AtomicOp Op) {
case AO__atomic_fetch_max:
return 3;
case AO__hip_atomic_exchange:
case AO__hip_atomic_fetch_add:
case AO__hip_atomic_fetch_and:
case AO__hip_atomic_fetch_or:
case AO__hip_atomic_fetch_xor:
case AO__hip_atomic_fetch_min:
case AO__hip_atomic_fetch_max:
case AO__opencl_atomic_store:
case AO__hip_atomic_store:
case AO__opencl_atomic_exchange:
case AO__opencl_atomic_fetch_add:
case AO__opencl_atomic_fetch_sub:
@ -4728,9 +4737,10 @@ unsigned AtomicExpr::getNumSubExprs(AtomicOp Op) {
case AO__c11_atomic_compare_exchange_strong:
case AO__c11_atomic_compare_exchange_weak:
return 5;
case AO__hip_atomic_compare_exchange_strong:
case AO__opencl_atomic_compare_exchange_strong:
case AO__opencl_atomic_compare_exchange_weak:
case AO__hip_atomic_compare_exchange_weak:
case AO__atomic_compare_exchange:
case AO__atomic_compare_exchange_n:
return 6;

View File

@ -7486,7 +7486,7 @@ class ExprEvaluatorBase
const Expr *Source = E->getSourceExpr();
if (!Source)
return Error(E);
if (Source == E) { // sanity checking.
if (Source == E) {
assert(0 && "OpaqueValueExpr recursively refers to itself");
return Error(E);
}

View File

@ -21,6 +21,7 @@
#include "clang/AST/DeclTemplate.h"
#include "clang/AST/Expr.h"
#include "clang/AST/ExprCXX.h"
#include "clang/AST/GlobalDecl.h"
#include "clang/AST/Mangle.h"
#include "clang/AST/VTableBuilder.h"
#include "clang/Basic/ABI.h"
@ -39,6 +40,18 @@ using namespace clang;
namespace {
// Get GlobalDecl of DeclContext of local entities.
static GlobalDecl getGlobalDeclAsDeclContext(const DeclContext *DC) {
GlobalDecl GD;
if (auto *CD = dyn_cast<CXXConstructorDecl>(DC))
GD = GlobalDecl(CD, Ctor_Complete);
else if (auto *DD = dyn_cast<CXXDestructorDecl>(DC))
GD = GlobalDecl(DD, Dtor_Complete);
else
GD = GlobalDecl(cast<FunctionDecl>(DC));
return GD;
}
struct msvc_hashing_ostream : public llvm::raw_svector_ostream {
raw_ostream &OS;
llvm::SmallString<64> Buffer;
@ -345,9 +358,9 @@ class MicrosoftCXXNameMangler {
raw_ostream &getStream() const { return Out; }
void mangle(const NamedDecl *D, StringRef Prefix = "?");
void mangleName(const NamedDecl *ND);
void mangleFunctionEncoding(const FunctionDecl *FD, bool ShouldMangle);
void mangle(GlobalDecl GD, StringRef Prefix = "?");
void mangleName(GlobalDecl GD);
void mangleFunctionEncoding(GlobalDecl GD, bool ShouldMangle);
void mangleVariableEncoding(const VarDecl *VD);
void mangleMemberDataPointer(const CXXRecordDecl *RD, const ValueDecl *VD,
StringRef Prefix = "$");
@ -370,7 +383,7 @@ class MicrosoftCXXNameMangler {
const FunctionDecl *D = nullptr,
bool ForceThisQuals = false,
bool MangleExceptionSpec = true);
void mangleNestedName(const NamedDecl *ND);
void mangleNestedName(GlobalDecl GD);
private:
bool isStructorDecl(const NamedDecl *ND) const {
@ -384,10 +397,10 @@ class MicrosoftCXXNameMangler {
AddrSpace == LangAS::ptr32_uptr));
}
void mangleUnqualifiedName(const NamedDecl *ND) {
mangleUnqualifiedName(ND, ND->getDeclName());
void mangleUnqualifiedName(GlobalDecl GD) {
mangleUnqualifiedName(GD, cast<NamedDecl>(GD.getDecl())->getDeclName());
}
void mangleUnqualifiedName(const NamedDecl *ND, DeclarationName Name);
void mangleUnqualifiedName(GlobalDecl GD, DeclarationName Name);
void mangleSourceName(StringRef Name);
void mangleOperatorName(OverloadedOperatorKind OO, SourceLocation Loc);
void mangleCXXDtorType(CXXDtorType T);
@ -396,9 +409,9 @@ class MicrosoftCXXNameMangler {
void manglePointerCVQualifiers(Qualifiers Quals);
void manglePointerExtQualifiers(Qualifiers Quals, QualType PointeeType);
void mangleUnscopedTemplateName(const TemplateDecl *ND);
void mangleUnscopedTemplateName(GlobalDecl GD);
void
mangleTemplateInstantiationName(const TemplateDecl *TD,
mangleTemplateInstantiationName(GlobalDecl GD,
const TemplateArgumentList &TemplateArgs);
void mangleObjCMethodName(const ObjCMethodDecl *MD);
@ -533,7 +546,8 @@ MicrosoftMangleContextImpl::shouldMangleStringLiteral(const StringLiteral *SL) {
return true;
}
void MicrosoftCXXNameMangler::mangle(const NamedDecl *D, StringRef Prefix) {
void MicrosoftCXXNameMangler::mangle(GlobalDecl GD, StringRef Prefix) {
const NamedDecl *D = cast<NamedDecl>(GD.getDecl());
// MSVC doesn't mangle C++ names the same way it mangles extern "C" names.
// Therefore it's really important that we don't decorate the
// name with leading underscores or leading/trailing at signs. So, by
@ -542,9 +556,9 @@ void MicrosoftCXXNameMangler::mangle(const NamedDecl *D, StringRef Prefix) {
// <mangled-name> ::= ? <name> <type-encoding>
Out << Prefix;
mangleName(D);
mangleName(GD);
if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D))
mangleFunctionEncoding(FD, Context.shouldMangleDeclName(FD));
mangleFunctionEncoding(GD, Context.shouldMangleDeclName(FD));
else if (const VarDecl *VD = dyn_cast<VarDecl>(D))
mangleVariableEncoding(VD);
else if (isa<MSGuidDecl>(D))
@ -558,8 +572,9 @@ void MicrosoftCXXNameMangler::mangle(const NamedDecl *D, StringRef Prefix) {
llvm_unreachable("Tried to mangle unexpected NamedDecl!");
}
void MicrosoftCXXNameMangler::mangleFunctionEncoding(const FunctionDecl *FD,
void MicrosoftCXXNameMangler::mangleFunctionEncoding(GlobalDecl GD,
bool ShouldMangle) {
const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl());
// <type-encoding> ::= <function-class> <function-type>
// Since MSVC operates on the type as written and not the canonical type, it
@ -770,13 +785,13 @@ void MicrosoftCXXNameMangler::mangleVirtualMemPtrThunk(
mangleCallingConvention(MD->getType()->castAs<FunctionProtoType>());
}
void MicrosoftCXXNameMangler::mangleName(const NamedDecl *ND) {
void MicrosoftCXXNameMangler::mangleName(GlobalDecl GD) {
// <name> ::= <unscoped-name> {[<named-scope>]+ | [<nested-name>]}? @
// Always start with the unqualified name.
mangleUnqualifiedName(ND);
mangleUnqualifiedName(GD);
mangleNestedName(ND);
mangleNestedName(GD);
// Terminate the whole name with an '@'.
Out << '@';
@ -844,13 +859,14 @@ void MicrosoftCXXNameMangler::mangleBits(llvm::APInt Value) {
}
}
static const TemplateDecl *
isTemplate(const NamedDecl *ND, const TemplateArgumentList *&TemplateArgs) {
static GlobalDecl isTemplate(GlobalDecl GD,
const TemplateArgumentList *&TemplateArgs) {
const NamedDecl *ND = cast<NamedDecl>(GD.getDecl());
// Check if we have a function template.
if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(ND)) {
if (const TemplateDecl *TD = FD->getPrimaryTemplate()) {
TemplateArgs = FD->getTemplateSpecializationArgs();
return TD;
return GD.getWithDecl(TD);
}
}
@ -858,21 +874,22 @@ isTemplate(const NamedDecl *ND, const TemplateArgumentList *&TemplateArgs) {
if (const ClassTemplateSpecializationDecl *Spec =
dyn_cast<ClassTemplateSpecializationDecl>(ND)) {
TemplateArgs = &Spec->getTemplateArgs();
return Spec->getSpecializedTemplate();
return GD.getWithDecl(Spec->getSpecializedTemplate());
}
// Check if we have a variable template.
if (const VarTemplateSpecializationDecl *Spec =
dyn_cast<VarTemplateSpecializationDecl>(ND)) {
TemplateArgs = &Spec->getTemplateArgs();
return Spec->getSpecializedTemplate();
return GD.getWithDecl(Spec->getSpecializedTemplate());
}
return nullptr;
return GlobalDecl();
}
void MicrosoftCXXNameMangler::mangleUnqualifiedName(const NamedDecl *ND,
void MicrosoftCXXNameMangler::mangleUnqualifiedName(GlobalDecl GD,
DeclarationName Name) {
const NamedDecl *ND = cast<NamedDecl>(GD.getDecl());
// <unqualified-name> ::= <operator-name>
// ::= <ctor-dtor-name>
// ::= <source-name>
@ -880,11 +897,11 @@ void MicrosoftCXXNameMangler::mangleUnqualifiedName(const NamedDecl *ND,
// Check if we have a template.
const TemplateArgumentList *TemplateArgs = nullptr;
if (const TemplateDecl *TD = isTemplate(ND, TemplateArgs)) {
if (GlobalDecl TD = isTemplate(GD, TemplateArgs)) {
// Function templates aren't considered for name back referencing. This
// makes sense since function templates aren't likely to occur multiple
// times in a symbol.
if (isa<FunctionTemplateDecl>(TD)) {
if (isa<FunctionTemplateDecl>(TD.getDecl())) {
mangleTemplateInstantiationName(TD, *TemplateArgs);
Out << '@';
return;
@ -945,7 +962,19 @@ void MicrosoftCXXNameMangler::mangleUnqualifiedName(const NamedDecl *ND,
switch (Name.getNameKind()) {
case DeclarationName::Identifier: {
if (const IdentifierInfo *II = Name.getAsIdentifierInfo()) {
mangleSourceName(II->getName());
bool IsDeviceStub =
ND &&
((isa<FunctionDecl>(ND) && ND->hasAttr<CUDAGlobalAttr>()) ||
(isa<FunctionTemplateDecl>(ND) &&
cast<FunctionTemplateDecl>(ND)
->getTemplatedDecl()
->hasAttr<CUDAGlobalAttr>())) &&
GD.getKernelReferenceKind() == KernelReferenceKind::Stub;
if (IsDeviceStub)
mangleSourceName(
(llvm::Twine("__device_stub__") + II->getName()).str());
else
mangleSourceName(II->getName());
break;
}
@ -1146,7 +1175,8 @@ void MicrosoftCXXNameMangler::mangleUnqualifiedName(const NamedDecl *ND,
// <postfix> ::= <unqualified-name> [<postfix>]
// ::= <substitution> [<postfix>]
void MicrosoftCXXNameMangler::mangleNestedName(const NamedDecl *ND) {
void MicrosoftCXXNameMangler::mangleNestedName(GlobalDecl GD) {
const NamedDecl *ND = cast<NamedDecl>(GD.getDecl());
const DeclContext *DC = getEffectiveDeclContext(ND);
while (!DC->isTranslationUnit()) {
if (isa<TagDecl>(ND) || isa<VarDecl>(ND)) {
@ -1229,7 +1259,7 @@ void MicrosoftCXXNameMangler::mangleNestedName(const NamedDecl *ND) {
} else if (isa<NamedDecl>(DC)) {
ND = cast<NamedDecl>(DC);
if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(ND)) {
mangle(FD, "?");
mangle(getGlobalDeclAsDeclContext(FD), "?");
break;
} else {
mangleUnqualifiedName(ND);
@ -1418,7 +1448,7 @@ void MicrosoftCXXNameMangler::mangleObjCMethodName(const ObjCMethodDecl *MD) {
}
void MicrosoftCXXNameMangler::mangleTemplateInstantiationName(
const TemplateDecl *TD, const TemplateArgumentList &TemplateArgs) {
GlobalDecl GD, const TemplateArgumentList &TemplateArgs) {
// <template-name> ::= <unscoped-template-name> <template-args>
// ::= <substitution>
// Always start with the unqualified name.
@ -1433,8 +1463,8 @@ void MicrosoftCXXNameMangler::mangleTemplateInstantiationName(
TemplateArgBackReferences.swap(OuterTemplateArgsContext);
PassObjectSizeArgs.swap(OuterPassObjectSizeArgs);
mangleUnscopedTemplateName(TD);
mangleTemplateArgs(TD, TemplateArgs);
mangleUnscopedTemplateName(GD);
mangleTemplateArgs(cast<TemplateDecl>(GD.getDecl()), TemplateArgs);
// Restore the previous back reference contexts.
NameBackReferences.swap(OuterTemplateContext);
@ -1443,11 +1473,10 @@ void MicrosoftCXXNameMangler::mangleTemplateInstantiationName(
PassObjectSizeArgs.swap(OuterPassObjectSizeArgs);
}
void
MicrosoftCXXNameMangler::mangleUnscopedTemplateName(const TemplateDecl *TD) {
void MicrosoftCXXNameMangler::mangleUnscopedTemplateName(GlobalDecl GD) {
// <unscoped-template-name> ::= ?$ <unqualified-name>
Out << "?$";
mangleUnqualifiedName(TD);
mangleUnqualifiedName(GD);
}
void MicrosoftCXXNameMangler::mangleIntegerLiteral(
@ -3323,17 +3352,17 @@ void MicrosoftMangleContextImpl::mangleCXXName(GlobalDecl GD,
if (auto *CD = dyn_cast<CXXConstructorDecl>(D)) {
auto Type = GD.getCtorType();
MicrosoftCXXNameMangler mangler(*this, MHO, CD, Type);
return mangler.mangle(D);
return mangler.mangle(GD);
}
if (auto *DD = dyn_cast<CXXDestructorDecl>(D)) {
auto Type = GD.getDtorType();
MicrosoftCXXNameMangler mangler(*this, MHO, DD, Type);
return mangler.mangle(D);
return mangler.mangle(GD);
}
MicrosoftCXXNameMangler Mangler(*this, MHO);
return Mangler.mangle(D);
return Mangler.mangle(GD);
}
void MicrosoftCXXNameMangler::mangleType(const ExtIntType *T, Qualifiers,

View File

@ -1691,7 +1691,8 @@ void StmtPrinter::VisitAtomicExpr(AtomicExpr *Node) {
PrintExpr(Node->getPtr());
if (Node->getOp() != AtomicExpr::AO__c11_atomic_load &&
Node->getOp() != AtomicExpr::AO__atomic_load_n &&
Node->getOp() != AtomicExpr::AO__opencl_atomic_load) {
Node->getOp() != AtomicExpr::AO__opencl_atomic_load &&
Node->getOp() != AtomicExpr::AO__hip_atomic_load) {
OS << ", ";
PrintExpr(Node->getVal1());
}

View File

@ -0,0 +1,35 @@
//===- TypeErasedDataflowAnalysis.cpp -------------------------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines type-erased base types and functions for building dataflow
// analyses that run over Control-Flow Graphs (CFGs).
//
//===----------------------------------------------------------------------===//
#include <vector>
#include "clang/Analysis/CFG.h"
#include "clang/Analysis/FlowSensitive/DataflowEnvironment.h"
#include "clang/Analysis/FlowSensitive/TypeErasedDataflowAnalysis.h"
#include "llvm/ADT/Optional.h"
using namespace clang;
using namespace dataflow;
std::vector<llvm::Optional<TypeErasedDataflowAnalysisState>>
runTypeErasedDataflowAnalysis(const CFG &Cfg,
TypeErasedDataflowAnalysis &Analysis,
const Environment &InitEnv) {
// FIXME: Consider enforcing that `Cfg` meets the requirements that
// are specified in the header. This could be done by remembering
// what options were used to build `Cfg` and asserting on them here.
// FIXME: Implement work list-based algorithm to compute the fixed
// point of `Analysis::transform` for every basic block in `Cfg`.
return {};
}

View File

@ -140,8 +140,8 @@ bool AArch64TargetInfo::setABI(const std::string &Name) {
bool AArch64TargetInfo::validateBranchProtection(StringRef Spec,
BranchProtectionInfo &BPI,
StringRef &Err) const {
llvm::AArch64::ParsedBranchProtection PBP;
if (!llvm::AArch64::parseBranchProtection(Spec, PBP, Err))
llvm::ARM::ParsedBranchProtection PBP;
if (!llvm::ARM::parseBranchProtection(Spec, PBP, Err))
return false;
BPI.SignReturnAddr =

View File

@ -367,6 +367,28 @@ bool ARMTargetInfo::setABI(const std::string &Name) {
return false;
}
bool ARMTargetInfo::validateBranchProtection(StringRef Spec,
BranchProtectionInfo &BPI,
StringRef &Err) const {
llvm::ARM::ParsedBranchProtection PBP;
if (!llvm::ARM::parseBranchProtection(Spec, PBP, Err))
return false;
BPI.SignReturnAddr =
llvm::StringSwitch<LangOptions::SignReturnAddressScopeKind>(PBP.Scope)
.Case("non-leaf", LangOptions::SignReturnAddressScopeKind::NonLeaf)
.Case("all", LangOptions::SignReturnAddressScopeKind::All)
.Default(LangOptions::SignReturnAddressScopeKind::None);
// Don't care for the sign key, beyond issuing a warning.
if (PBP.Key == "b_key")
Err = "b-key";
BPI.SignKey = LangOptions::SignReturnAddressKeyKind::AKey;
BPI.BranchTargetEnforcement = PBP.BranchTargetEnforcement;
return true;
}
// FIXME: This should be based on Arch attributes, not CPU names.
bool ARMTargetInfo::initFeatureMap(
llvm::StringMap<bool> &Features, DiagnosticsEngine &Diags, StringRef CPU,
@ -874,6 +896,16 @@ void ARMTargetInfo::getTargetDefines(const LangOptions &Opts,
Builder.defineMacro("__ARM_BF16_FORMAT_ALTERNATIVE", "1");
}
if (Opts.BranchTargetEnforcement)
Builder.defineMacro("__ARM_FEATURE_BTI_DEFAULT", "1");
if (Opts.hasSignReturnAddress()) {
unsigned Value = Opts.isSignReturnAddressWithAKey() ? 1 : 2;
if (Opts.isSignReturnAddressScopeAll())
Value |= 1 << 2;
Builder.defineMacro("__ARM_FEATURE_PAC_DEFAULT", Twine(Value));
}
switch (ArchKind) {
default:
break;

View File

@ -123,6 +123,9 @@ class LLVM_LIBRARY_VISIBILITY ARMTargetInfo : public TargetInfo {
StringRef getABI() const override;
bool setABI(const std::string &Name) override;
bool validateBranchProtection(StringRef, BranchProtectionInfo &,
StringRef &) const override;
// FIXME: This should be based on Arch attributes, not CPU names.
bool
initFeatureMap(llvm::StringMap<bool> &Features, DiagnosticsEngine &Diags,

View File

@ -181,8 +181,10 @@ static void addVisualCDefines(const LangOptions &Opts, MacroBuilder &Builder) {
Builder.defineMacro("_HAS_CHAR16_T_LANGUAGE_SUPPORT", Twine(1));
if (Opts.isCompatibleWithMSVC(LangOptions::MSVC2015)) {
if (Opts.CPlusPlus20)
Builder.defineMacro("_MSVC_LANG", "201705L");
if (Opts.CPlusPlus2b)
Builder.defineMacro("_MSVC_LANG", "202004L");
else if (Opts.CPlusPlus20)
Builder.defineMacro("_MSVC_LANG", "202002L");
else if (Opts.CPlusPlus17)
Builder.defineMacro("_MSVC_LANG", "201703L");
else if (Opts.CPlusPlus14)
@ -201,6 +203,14 @@ static void addVisualCDefines(const LangOptions &Opts, MacroBuilder &Builder) {
}
Builder.defineMacro("_INTEGRAL_MAX_BITS", "64");
// Starting with VS 2022 17.1, MSVC predefines the below macro to inform
// users of the execution character set defined at compile time.
// The value given is the Windows Code Page Identifier:
// https://docs.microsoft.com/en-us/windows/win32/intl/code-page-identifiers
//
// Clang currently only supports UTF-8, so we'll use 65001
Builder.defineMacro("_MSVC_EXECUTION_CHARACTER_SET", "65001");
}
void addWindowsDefines(const llvm::Triple &Triple, const LangOptions &Opts,

View File

@ -56,9 +56,14 @@ static const unsigned SPIRDefIsGenMap[] = {
0, // opencl_generic
0, // opencl_global_device
0, // opencl_global_host
0, // cuda_device
0, // cuda_constant
0, // cuda_shared
// cuda_* address space mapping is intended for HIPSPV (HIP to SPIR-V
// translation). This mapping is enabled when the language mode is HIP.
1, // cuda_device
// cuda_constant pointer can be casted to default/"flat" pointer, but in
// SPIR-V casts between constant and generic pointers are not allowed. For
// this reason cuda_constant is mapped to SPIR-V CrossWorkgroup.
1, // cuda_constant
3, // cuda_shared
1, // sycl_global
5, // sycl_global_device
6, // sycl_global_host
@ -74,6 +79,8 @@ class LLVM_LIBRARY_VISIBILITY BaseSPIRTargetInfo : public TargetInfo {
protected:
BaseSPIRTargetInfo(const llvm::Triple &Triple, const TargetOptions &)
: TargetInfo(Triple) {
assert((Triple.isSPIR() || Triple.isSPIRV()) &&
"Invalid architecture for SPIR or SPIR-V.");
assert(getTriple().getOS() == llvm::Triple::UnknownOS &&
"SPIR(-V) target must use unknown OS");
assert(getTriple().getEnvironment() == llvm::Triple::UnknownEnvironment &&
@ -137,11 +144,16 @@ class LLVM_LIBRARY_VISIBILITY BaseSPIRTargetInfo : public TargetInfo {
// FIXME: SYCL specification considers unannotated pointers and references
// to be pointing to the generic address space. See section 5.9.3 of
// SYCL 2020 specification.
// Currently, there is no way of representing SYCL's default address space
// language semantic along with the semantics of embedded C's default
// address space in the same address space map. Hence the map needs to be
// reset to allow mapping to the desired value of 'Default' entry for SYCL.
setAddressSpaceMap(/*DefaultIsGeneric=*/Opts.SYCLIsDevice);
// Currently, there is no way of representing SYCL's and HIP's default
// address space language semantic along with the semantics of embedded C's
// default address space in the same address space map. Hence the map needs
// to be reset to allow mapping to the desired value of 'Default' entry for
// SYCL and HIP.
setAddressSpaceMap(
/*DefaultIsGeneric=*/Opts.SYCLIsDevice ||
// The address mapping from HIP language for device code is only defined
// for SPIR-V.
(getTriple().isSPIRV() && Opts.HIP && Opts.CUDAIsDevice));
}
void setSupportedOpenCLOpts() override {
@ -159,6 +171,7 @@ class LLVM_LIBRARY_VISIBILITY SPIRTargetInfo : public BaseSPIRTargetInfo {
public:
SPIRTargetInfo(const llvm::Triple &Triple, const TargetOptions &Opts)
: BaseSPIRTargetInfo(Triple, Opts) {
assert(Triple.isSPIR() && "Invalid architecture for SPIR.");
assert(getTriple().getOS() == llvm::Triple::UnknownOS &&
"SPIR target must use unknown OS");
assert(getTriple().getEnvironment() == llvm::Triple::UnknownEnvironment &&
@ -177,6 +190,8 @@ class LLVM_LIBRARY_VISIBILITY SPIR32TargetInfo : public SPIRTargetInfo {
public:
SPIR32TargetInfo(const llvm::Triple &Triple, const TargetOptions &Opts)
: SPIRTargetInfo(Triple, Opts) {
assert(Triple.getArch() == llvm::Triple::spir &&
"Invalid architecture for 32-bit SPIR.");
PointerWidth = PointerAlign = 32;
SizeType = TargetInfo::UnsignedInt;
PtrDiffType = IntPtrType = TargetInfo::SignedInt;
@ -192,6 +207,8 @@ class LLVM_LIBRARY_VISIBILITY SPIR64TargetInfo : public SPIRTargetInfo {
public:
SPIR64TargetInfo(const llvm::Triple &Triple, const TargetOptions &Opts)
: SPIRTargetInfo(Triple, Opts) {
assert(Triple.getArch() == llvm::Triple::spir64 &&
"Invalid architecture for 64-bit SPIR.");
PointerWidth = PointerAlign = 64;
SizeType = TargetInfo::UnsignedLong;
PtrDiffType = IntPtrType = TargetInfo::SignedLong;
@ -207,6 +224,7 @@ class LLVM_LIBRARY_VISIBILITY SPIRVTargetInfo : public BaseSPIRTargetInfo {
public:
SPIRVTargetInfo(const llvm::Triple &Triple, const TargetOptions &Opts)
: BaseSPIRTargetInfo(Triple, Opts) {
assert(Triple.isSPIRV() && "Invalid architecture for SPIR-V.");
assert(getTriple().getOS() == llvm::Triple::UnknownOS &&
"SPIR-V target must use unknown OS");
assert(getTriple().getEnvironment() == llvm::Triple::UnknownEnvironment &&
@ -225,6 +243,8 @@ class LLVM_LIBRARY_VISIBILITY SPIRV32TargetInfo : public SPIRVTargetInfo {
public:
SPIRV32TargetInfo(const llvm::Triple &Triple, const TargetOptions &Opts)
: SPIRVTargetInfo(Triple, Opts) {
assert(Triple.getArch() == llvm::Triple::spirv32 &&
"Invalid architecture for 32-bit SPIR-V.");
PointerWidth = PointerAlign = 32;
SizeType = TargetInfo::UnsignedInt;
PtrDiffType = IntPtrType = TargetInfo::SignedInt;
@ -240,6 +260,8 @@ class LLVM_LIBRARY_VISIBILITY SPIRV64TargetInfo : public SPIRVTargetInfo {
public:
SPIRV64TargetInfo(const llvm::Triple &Triple, const TargetOptions &Opts)
: SPIRVTargetInfo(Triple, Opts) {
assert(Triple.getArch() == llvm::Triple::spirv64 &&
"Invalid architecture for 64-bit SPIR-V.");
PointerWidth = PointerAlign = 64;
SizeType = TargetInfo::UnsignedLong;
PtrDiffType = IntPtrType = TargetInfo::SignedLong;

View File

@ -239,9 +239,9 @@ bool X86TargetInfo::handleTargetFeatures(std::vector<std::string> &Features,
HasAVX512ER = true;
} else if (Feature == "+avx512fp16") {
HasAVX512FP16 = true;
HasFloat16 = true;
} else if (Feature == "+avx512pf") {
HasAVX512PF = true;
HasLegalHalfType = true;
} else if (Feature == "+avx512dq") {
HasAVX512DQ = true;
} else if (Feature == "+avx512bitalg") {
@ -369,8 +369,6 @@ bool X86TargetInfo::handleTargetFeatures(std::vector<std::string> &Features,
.Default(NoXOP);
XOPLevel = std::max(XOPLevel, XLevel);
}
// Turn on _float16 for x86 (feature sse2)
HasFloat16 = SSELevel >= SSE2;
// LLVM doesn't have a separate switch for fpmath, so only accept it if it
// matches the selected sse level.
@ -384,12 +382,10 @@ bool X86TargetInfo::handleTargetFeatures(std::vector<std::string> &Features,
SimdDefaultAlign =
hasFeature("avx512f") ? 512 : hasFeature("avx") ? 256 : 128;
if (!HasX87) {
if (LongDoubleFormat == &llvm::APFloat::x87DoubleExtended())
HasLongDouble = false;
if (getTriple().getArch() == llvm::Triple::x86)
HasFPReturn = false;
}
// FIXME: We should allow long double type on 32-bits to match with GCC.
// This requires backend to be able to lower f80 without x87 first.
if (!HasX87 && LongDoubleFormat == &llvm::APFloat::x87DoubleExtended())
HasLongDouble = false;
return true;
}

View File

@ -1034,8 +1034,9 @@ void EmitAssemblyHelper::EmitAssemblyWithLegacyPassManager(
if (!ThinLinkOS)
return;
}
TheModule->addModuleFlag(Module::Error, "EnableSplitLTOUnit",
CodeGenOpts.EnableSplitLTOUnit);
if (!TheModule->getModuleFlag("EnableSplitLTOUnit"))
TheModule->addModuleFlag(Module::Error, "EnableSplitLTOUnit",
CodeGenOpts.EnableSplitLTOUnit);
PerModulePasses.add(createWriteThinLTOBitcodePass(
*OS, ThinLinkOS ? &ThinLinkOS->os() : nullptr));
} else {
@ -1049,8 +1050,9 @@ void EmitAssemblyHelper::EmitAssemblyWithLegacyPassManager(
if (EmitLTOSummary) {
if (!TheModule->getModuleFlag("ThinLTO"))
TheModule->addModuleFlag(Module::Error, "ThinLTO", uint32_t(0));
TheModule->addModuleFlag(Module::Error, "EnableSplitLTOUnit",
uint32_t(1));
if (!TheModule->getModuleFlag("EnableSplitLTOUnit"))
TheModule->addModuleFlag(Module::Error, "EnableSplitLTOUnit",
uint32_t(1));
}
PerModulePasses.add(createBitcodeWriterPass(
@ -1451,8 +1453,9 @@ void EmitAssemblyHelper::RunOptimizationPipeline(
if (!ThinLinkOS)
return;
}
TheModule->addModuleFlag(Module::Error, "EnableSplitLTOUnit",
CodeGenOpts.EnableSplitLTOUnit);
if (!TheModule->getModuleFlag("EnableSplitLTOUnit"))
TheModule->addModuleFlag(Module::Error, "EnableSplitLTOUnit",
CodeGenOpts.EnableSplitLTOUnit);
MPM.addPass(ThinLTOBitcodeWriterPass(*OS, ThinLinkOS ? &ThinLinkOS->os()
: nullptr));
} else {
@ -1465,8 +1468,9 @@ void EmitAssemblyHelper::RunOptimizationPipeline(
if (EmitLTOSummary) {
if (!TheModule->getModuleFlag("ThinLTO"))
TheModule->addModuleFlag(Module::Error, "ThinLTO", uint32_t(0));
TheModule->addModuleFlag(Module::Error, "EnableSplitLTOUnit",
uint32_t(1));
if (!TheModule->getModuleFlag("EnableSplitLTOUnit"))
TheModule->addModuleFlag(Module::Error, "EnableSplitLTOUnit",
uint32_t(1));
}
MPM.addPass(
BitcodeWriterPass(*OS, CodeGenOpts.EmitLLVMUseLists, EmitLTOSummary));

View File

@ -524,12 +524,14 @@ static void EmitAtomicOp(CodeGenFunction &CGF, AtomicExpr *E, Address Dest,
llvm_unreachable("Already handled!");
case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
case AtomicExpr::AO__hip_atomic_compare_exchange_strong:
case AtomicExpr::AO__opencl_atomic_compare_exchange_strong:
emitAtomicCmpXchgFailureSet(CGF, E, false, Dest, Ptr, Val1, Val2,
FailureOrder, Size, Order, Scope);
return;
case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
case AtomicExpr::AO__opencl_atomic_compare_exchange_weak:
case AtomicExpr::AO__hip_atomic_compare_exchange_weak:
emitAtomicCmpXchgFailureSet(CGF, E, true, Dest, Ptr, Val1, Val2,
FailureOrder, Size, Order, Scope);
return;
@ -565,6 +567,7 @@ static void EmitAtomicOp(CodeGenFunction &CGF, AtomicExpr *E, Address Dest,
}
case AtomicExpr::AO__c11_atomic_load:
case AtomicExpr::AO__opencl_atomic_load:
case AtomicExpr::AO__hip_atomic_load:
case AtomicExpr::AO__atomic_load_n:
case AtomicExpr::AO__atomic_load: {
llvm::LoadInst *Load = CGF.Builder.CreateLoad(Ptr);
@ -576,6 +579,7 @@ static void EmitAtomicOp(CodeGenFunction &CGF, AtomicExpr *E, Address Dest,
case AtomicExpr::AO__c11_atomic_store:
case AtomicExpr::AO__opencl_atomic_store:
case AtomicExpr::AO__hip_atomic_store:
case AtomicExpr::AO__atomic_store:
case AtomicExpr::AO__atomic_store_n: {
llvm::Value *LoadVal1 = CGF.Builder.CreateLoad(Val1);
@ -586,6 +590,7 @@ static void EmitAtomicOp(CodeGenFunction &CGF, AtomicExpr *E, Address Dest,
}
case AtomicExpr::AO__c11_atomic_exchange:
case AtomicExpr::AO__hip_atomic_exchange:
case AtomicExpr::AO__opencl_atomic_exchange:
case AtomicExpr::AO__atomic_exchange_n:
case AtomicExpr::AO__atomic_exchange:
@ -597,6 +602,7 @@ static void EmitAtomicOp(CodeGenFunction &CGF, AtomicExpr *E, Address Dest,
: llvm::Instruction::Add;
LLVM_FALLTHROUGH;
case AtomicExpr::AO__c11_atomic_fetch_add:
case AtomicExpr::AO__hip_atomic_fetch_add:
case AtomicExpr::AO__opencl_atomic_fetch_add:
case AtomicExpr::AO__atomic_fetch_add:
Op = E->getValueType()->isFloatingType() ? llvm::AtomicRMWInst::FAdd
@ -618,6 +624,7 @@ static void EmitAtomicOp(CodeGenFunction &CGF, AtomicExpr *E, Address Dest,
PostOpMinMax = true;
LLVM_FALLTHROUGH;
case AtomicExpr::AO__c11_atomic_fetch_min:
case AtomicExpr::AO__hip_atomic_fetch_min:
case AtomicExpr::AO__opencl_atomic_fetch_min:
case AtomicExpr::AO__atomic_fetch_min:
Op = E->getValueType()->isSignedIntegerType() ? llvm::AtomicRMWInst::Min
@ -628,6 +635,7 @@ static void EmitAtomicOp(CodeGenFunction &CGF, AtomicExpr *E, Address Dest,
PostOpMinMax = true;
LLVM_FALLTHROUGH;
case AtomicExpr::AO__c11_atomic_fetch_max:
case AtomicExpr::AO__hip_atomic_fetch_max:
case AtomicExpr::AO__opencl_atomic_fetch_max:
case AtomicExpr::AO__atomic_fetch_max:
Op = E->getValueType()->isSignedIntegerType() ? llvm::AtomicRMWInst::Max
@ -638,6 +646,7 @@ static void EmitAtomicOp(CodeGenFunction &CGF, AtomicExpr *E, Address Dest,
PostOp = llvm::Instruction::And;
LLVM_FALLTHROUGH;
case AtomicExpr::AO__c11_atomic_fetch_and:
case AtomicExpr::AO__hip_atomic_fetch_and:
case AtomicExpr::AO__opencl_atomic_fetch_and:
case AtomicExpr::AO__atomic_fetch_and:
Op = llvm::AtomicRMWInst::And;
@ -647,6 +656,7 @@ static void EmitAtomicOp(CodeGenFunction &CGF, AtomicExpr *E, Address Dest,
PostOp = llvm::Instruction::Or;
LLVM_FALLTHROUGH;
case AtomicExpr::AO__c11_atomic_fetch_or:
case AtomicExpr::AO__hip_atomic_fetch_or:
case AtomicExpr::AO__opencl_atomic_fetch_or:
case AtomicExpr::AO__atomic_fetch_or:
Op = llvm::AtomicRMWInst::Or;
@ -656,6 +666,7 @@ static void EmitAtomicOp(CodeGenFunction &CGF, AtomicExpr *E, Address Dest,
PostOp = llvm::Instruction::Xor;
LLVM_FALLTHROUGH;
case AtomicExpr::AO__c11_atomic_fetch_xor:
case AtomicExpr::AO__hip_atomic_fetch_xor:
case AtomicExpr::AO__opencl_atomic_fetch_xor:
case AtomicExpr::AO__atomic_fetch_xor:
Op = llvm::AtomicRMWInst::Xor;
@ -838,6 +849,7 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E) {
case AtomicExpr::AO__c11_atomic_load:
case AtomicExpr::AO__opencl_atomic_load:
case AtomicExpr::AO__hip_atomic_load:
case AtomicExpr::AO__atomic_load_n:
break;
@ -857,7 +869,9 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E) {
case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
case AtomicExpr::AO__opencl_atomic_compare_exchange_strong:
case AtomicExpr::AO__hip_atomic_compare_exchange_strong:
case AtomicExpr::AO__opencl_atomic_compare_exchange_weak:
case AtomicExpr::AO__hip_atomic_compare_exchange_weak:
case AtomicExpr::AO__atomic_compare_exchange_n:
case AtomicExpr::AO__atomic_compare_exchange:
Val1 = EmitPointerWithAlignment(E->getVal1());
@ -873,6 +887,7 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E) {
case AtomicExpr::AO__c11_atomic_fetch_add:
case AtomicExpr::AO__c11_atomic_fetch_sub:
case AtomicExpr::AO__hip_atomic_fetch_add:
case AtomicExpr::AO__opencl_atomic_fetch_add:
case AtomicExpr::AO__opencl_atomic_fetch_sub:
if (MemTy->isPointerType()) {
@ -901,7 +916,9 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E) {
case AtomicExpr::AO__c11_atomic_store:
case AtomicExpr::AO__c11_atomic_exchange:
case AtomicExpr::AO__opencl_atomic_store:
case AtomicExpr::AO__hip_atomic_store:
case AtomicExpr::AO__opencl_atomic_exchange:
case AtomicExpr::AO__hip_atomic_exchange:
case AtomicExpr::AO__atomic_store_n:
case AtomicExpr::AO__atomic_exchange_n:
case AtomicExpr::AO__c11_atomic_fetch_and:
@ -916,8 +933,11 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E) {
case AtomicExpr::AO__opencl_atomic_fetch_min:
case AtomicExpr::AO__opencl_atomic_fetch_max:
case AtomicExpr::AO__atomic_fetch_and:
case AtomicExpr::AO__hip_atomic_fetch_and:
case AtomicExpr::AO__atomic_fetch_or:
case AtomicExpr::AO__hip_atomic_fetch_or:
case AtomicExpr::AO__atomic_fetch_xor:
case AtomicExpr::AO__hip_atomic_fetch_xor:
case AtomicExpr::AO__atomic_fetch_nand:
case AtomicExpr::AO__atomic_and_fetch:
case AtomicExpr::AO__atomic_or_fetch:
@ -926,7 +946,9 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E) {
case AtomicExpr::AO__atomic_max_fetch:
case AtomicExpr::AO__atomic_min_fetch:
case AtomicExpr::AO__atomic_fetch_max:
case AtomicExpr::AO__hip_atomic_fetch_max:
case AtomicExpr::AO__atomic_fetch_min:
case AtomicExpr::AO__hip_atomic_fetch_min:
Val1 = EmitValToTemp(*this, E->getVal1());
break;
}
@ -968,11 +990,14 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E) {
case AtomicExpr::AO__c11_atomic_fetch_add:
case AtomicExpr::AO__opencl_atomic_fetch_add:
case AtomicExpr::AO__atomic_fetch_add:
case AtomicExpr::AO__hip_atomic_fetch_add:
case AtomicExpr::AO__c11_atomic_fetch_and:
case AtomicExpr::AO__opencl_atomic_fetch_and:
case AtomicExpr::AO__hip_atomic_fetch_and:
case AtomicExpr::AO__atomic_fetch_and:
case AtomicExpr::AO__c11_atomic_fetch_or:
case AtomicExpr::AO__opencl_atomic_fetch_or:
case AtomicExpr::AO__hip_atomic_fetch_or:
case AtomicExpr::AO__atomic_fetch_or:
case AtomicExpr::AO__c11_atomic_fetch_nand:
case AtomicExpr::AO__atomic_fetch_nand:
@ -984,6 +1009,7 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E) {
case AtomicExpr::AO__opencl_atomic_fetch_min:
case AtomicExpr::AO__opencl_atomic_fetch_max:
case AtomicExpr::AO__atomic_fetch_xor:
case AtomicExpr::AO__hip_atomic_fetch_xor:
case AtomicExpr::AO__c11_atomic_fetch_max:
case AtomicExpr::AO__c11_atomic_fetch_min:
case AtomicExpr::AO__atomic_add_fetch:
@ -993,7 +1019,9 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E) {
case AtomicExpr::AO__atomic_sub_fetch:
case AtomicExpr::AO__atomic_xor_fetch:
case AtomicExpr::AO__atomic_fetch_max:
case AtomicExpr::AO__hip_atomic_fetch_max:
case AtomicExpr::AO__atomic_fetch_min:
case AtomicExpr::AO__hip_atomic_fetch_min:
case AtomicExpr::AO__atomic_max_fetch:
case AtomicExpr::AO__atomic_min_fetch:
// For these, only library calls for certain sizes exist.
@ -1014,10 +1042,15 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E) {
case AtomicExpr::AO__c11_atomic_exchange:
case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
case AtomicExpr::AO__hip_atomic_compare_exchange_strong:
case AtomicExpr::AO__opencl_atomic_load:
case AtomicExpr::AO__hip_atomic_load:
case AtomicExpr::AO__opencl_atomic_store:
case AtomicExpr::AO__hip_atomic_store:
case AtomicExpr::AO__opencl_atomic_exchange:
case AtomicExpr::AO__hip_atomic_exchange:
case AtomicExpr::AO__opencl_atomic_compare_exchange_weak:
case AtomicExpr::AO__hip_atomic_compare_exchange_weak:
case AtomicExpr::AO__opencl_atomic_compare_exchange_strong:
case AtomicExpr::AO__atomic_load_n:
case AtomicExpr::AO__atomic_store_n:
@ -1079,7 +1112,9 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E) {
case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
case AtomicExpr::AO__opencl_atomic_compare_exchange_weak:
case AtomicExpr::AO__hip_atomic_compare_exchange_weak:
case AtomicExpr::AO__opencl_atomic_compare_exchange_strong:
case AtomicExpr::AO__hip_atomic_compare_exchange_strong:
case AtomicExpr::AO__atomic_compare_exchange:
case AtomicExpr::AO__atomic_compare_exchange_n:
LibCallName = "__atomic_compare_exchange";
@ -1101,6 +1136,7 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E) {
case AtomicExpr::AO__opencl_atomic_exchange:
case AtomicExpr::AO__atomic_exchange_n:
case AtomicExpr::AO__atomic_exchange:
case AtomicExpr::AO__hip_atomic_exchange:
LibCallName = "__atomic_exchange";
AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(),
MemTy, E->getExprLoc(), TInfo.Width);
@ -1109,6 +1145,7 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E) {
// void __atomic_store_N(T *mem, T val, int order)
case AtomicExpr::AO__c11_atomic_store:
case AtomicExpr::AO__opencl_atomic_store:
case AtomicExpr::AO__hip_atomic_store:
case AtomicExpr::AO__atomic_store:
case AtomicExpr::AO__atomic_store_n:
LibCallName = "__atomic_store";
@ -1121,6 +1158,7 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E) {
// T __atomic_load_N(T *mem, int order)
case AtomicExpr::AO__c11_atomic_load:
case AtomicExpr::AO__opencl_atomic_load:
case AtomicExpr::AO__hip_atomic_load:
case AtomicExpr::AO__atomic_load:
case AtomicExpr::AO__atomic_load_n:
LibCallName = "__atomic_load";
@ -1133,6 +1171,7 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E) {
case AtomicExpr::AO__c11_atomic_fetch_add:
case AtomicExpr::AO__opencl_atomic_fetch_add:
case AtomicExpr::AO__atomic_fetch_add:
case AtomicExpr::AO__hip_atomic_fetch_add:
LibCallName = "__atomic_fetch_add";
AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(),
LoweredMemTy, E->getExprLoc(), TInfo.Width);
@ -1144,6 +1183,7 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E) {
LLVM_FALLTHROUGH;
case AtomicExpr::AO__c11_atomic_fetch_and:
case AtomicExpr::AO__opencl_atomic_fetch_and:
case AtomicExpr::AO__hip_atomic_fetch_and:
case AtomicExpr::AO__atomic_fetch_and:
LibCallName = "__atomic_fetch_and";
AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(),
@ -1156,6 +1196,7 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E) {
LLVM_FALLTHROUGH;
case AtomicExpr::AO__c11_atomic_fetch_or:
case AtomicExpr::AO__opencl_atomic_fetch_or:
case AtomicExpr::AO__hip_atomic_fetch_or:
case AtomicExpr::AO__atomic_fetch_or:
LibCallName = "__atomic_fetch_or";
AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(),
@ -1180,6 +1221,7 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E) {
LLVM_FALLTHROUGH;
case AtomicExpr::AO__c11_atomic_fetch_xor:
case AtomicExpr::AO__opencl_atomic_fetch_xor:
case AtomicExpr::AO__hip_atomic_fetch_xor:
case AtomicExpr::AO__atomic_fetch_xor:
LibCallName = "__atomic_fetch_xor";
AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(),
@ -1190,6 +1232,7 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E) {
LLVM_FALLTHROUGH;
case AtomicExpr::AO__c11_atomic_fetch_min:
case AtomicExpr::AO__atomic_fetch_min:
case AtomicExpr::AO__hip_atomic_fetch_min:
case AtomicExpr::AO__opencl_atomic_fetch_min:
LibCallName = E->getValueType()->isSignedIntegerType()
? "__atomic_fetch_min"
@ -1202,6 +1245,7 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E) {
LLVM_FALLTHROUGH;
case AtomicExpr::AO__c11_atomic_fetch_max:
case AtomicExpr::AO__atomic_fetch_max:
case AtomicExpr::AO__hip_atomic_fetch_max:
case AtomicExpr::AO__opencl_atomic_fetch_max:
LibCallName = E->getValueType()->isSignedIntegerType()
? "__atomic_fetch_max"
@ -1291,10 +1335,12 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E) {
bool IsStore = E->getOp() == AtomicExpr::AO__c11_atomic_store ||
E->getOp() == AtomicExpr::AO__opencl_atomic_store ||
E->getOp() == AtomicExpr::AO__hip_atomic_store ||
E->getOp() == AtomicExpr::AO__atomic_store ||
E->getOp() == AtomicExpr::AO__atomic_store_n;
bool IsLoad = E->getOp() == AtomicExpr::AO__c11_atomic_load ||
E->getOp() == AtomicExpr::AO__opencl_atomic_load ||
E->getOp() == AtomicExpr::AO__hip_atomic_load ||
E->getOp() == AtomicExpr::AO__atomic_load ||
E->getOp() == AtomicExpr::AO__atomic_load_n;

View File

@ -170,8 +170,9 @@ static Value *EmitNontemporalStore(CodeGenFunction &CGF, const CallExpr *E) {
// Convert the type of the pointer to a pointer to the stored type.
Val = CGF.EmitToMemory(Val, E->getArg(0)->getType());
unsigned SrcAddrSpace = Address->getType()->getPointerAddressSpace();
Value *BC = CGF.Builder.CreateBitCast(
Address, llvm::PointerType::getUnqual(Val->getType()), "cast");
Address, llvm::PointerType::get(Val->getType(), SrcAddrSpace), "cast");
LValue LV = CGF.MakeNaturalAlignAddrLValue(BC, E->getArg(0)->getType());
LV.setNontemporal(true);
CGF.EmitStoreOfScalar(Val, LV, false);

View File

@ -4510,6 +4510,9 @@ void CodeGenFunction::EmitOMPTaskBasedDirective(
Address Replacement(CGF.Builder.CreateLoad(Pair.second),
CGF.getContext().getDeclAlign(Pair.first));
Scope.addPrivate(Pair.first, [Replacement]() { return Replacement; });
if (auto *DI = CGF.getDebugInfo())
DI->EmitDeclareOfAutoVariable(Pair.first, Pair.second.getPointer(),
CGF.Builder, /*UsePointerValue*/ true);
}
// Adjust mapping for internal locals by mapping actual memory instead of
// a pointer to this memory.

View File

@ -710,10 +710,25 @@ void CodeGenModule::Release() {
1);
}
if (Arch == llvm::Triple::aarch64 || Arch == llvm::Triple::aarch64_32 ||
// Add module metadata for return address signing (ignoring
// non-leaf/all) and stack tagging. These are actually turned on by function
// attributes, but we use module metadata to emit build attributes. This is
// needed for LTO, where the function attributes are inside bitcode
// serialised into a global variable by the time build attributes are
// emitted, so we can't access them.
if (Context.getTargetInfo().hasFeature("ptrauth") &&
LangOpts.getSignReturnAddressScope() !=
LangOptions::SignReturnAddressScopeKind::None)
getModule().addModuleFlag(llvm::Module::Override,
"sign-return-address-buildattr", 1);
if (LangOpts.Sanitize.has(SanitizerKind::MemTag))
getModule().addModuleFlag(llvm::Module::Override,
"tag-stack-memory-buildattr", 1);
if (Arch == llvm::Triple::thumb || Arch == llvm::Triple::thumbeb ||
Arch == llvm::Triple::aarch64 || Arch == llvm::Triple::aarch64_32 ||
Arch == llvm::Triple::aarch64_be) {
getModule().addModuleFlag(llvm::Module::Error,
"branch-target-enforcement",
getModule().addModuleFlag(llvm::Module::Error, "branch-target-enforcement",
LangOpts.BranchTargetEnforcement);
getModule().addModuleFlag(llvm::Module::Error, "sign-return-address",
@ -722,9 +737,11 @@ void CodeGenModule::Release() {
getModule().addModuleFlag(llvm::Module::Error, "sign-return-address-all",
LangOpts.isSignReturnAddressScopeAll());
getModule().addModuleFlag(llvm::Module::Error,
"sign-return-address-with-bkey",
!LangOpts.isSignReturnAddressWithAKey());
if (Arch != llvm::Triple::thumb && Arch != llvm::Triple::thumbeb) {
getModule().addModuleFlag(llvm::Module::Error,
"sign-return-address-with-bkey",
!LangOpts.isSignReturnAddressWithAKey());
}
}
if (!CodeGenOpts.MemoryProfileOutput.empty()) {
@ -1266,6 +1283,20 @@ static bool isUniqueInternalLinkageDecl(GlobalDecl GD,
(CGM.getFunctionLinkage(GD) == llvm::GlobalValue::InternalLinkage);
}
static void AppendTargetClonesMangling(const CodeGenModule &CGM,
const TargetClonesAttr *Attr,
unsigned VersionIndex,
raw_ostream &Out) {
Out << '.';
StringRef FeatureStr = Attr->getFeatureStr(VersionIndex);
if (FeatureStr.startswith("arch="))
Out << "arch_" << FeatureStr.substr(sizeof("arch=") - 1);
else
Out << FeatureStr;
Out << '.' << Attr->getMangledIndex(VersionIndex);
}
static std::string getMangledNameImpl(CodeGenModule &CGM, GlobalDecl GD,
const NamedDecl *ND,
bool OmitMultiVersionMangling = false) {
@ -1319,6 +1350,10 @@ static std::string getMangledNameImpl(CodeGenModule &CGM, GlobalDecl GD,
case MultiVersionKind::Target:
AppendTargetMangling(CGM, FD->getAttr<TargetAttr>(), Out);
break;
case MultiVersionKind::TargetClones:
AppendTargetClonesMangling(CGM, FD->getAttr<TargetClonesAttr>(),
GD.getMultiVersionIndex(), Out);
break;
case MultiVersionKind::None:
llvm_unreachable("None multiversion type isn't valid here");
}
@ -1983,8 +2018,9 @@ bool CodeGenModule::GetCPUAndFeaturesAttributes(GlobalDecl GD,
FD = FD ? FD->getMostRecentDecl() : FD;
const auto *TD = FD ? FD->getAttr<TargetAttr>() : nullptr;
const auto *SD = FD ? FD->getAttr<CPUSpecificAttr>() : nullptr;
const auto *TC = FD ? FD->getAttr<TargetClonesAttr>() : nullptr;
bool AddedAttr = false;
if (TD || SD) {
if (TD || SD || TC) {
llvm::StringMap<bool> FeatureMap;
getContext().getFunctionFeatureMap(FeatureMap, GD);
@ -3226,6 +3262,12 @@ void CodeGenModule::EmitMultiVersionFunctionDefinition(GlobalDecl GD,
for (unsigned I = 0; I < Spec->cpus_size(); ++I)
EmitGlobalFunctionDefinition(GD.getWithMultiVersionIndex(I), nullptr);
// Requires multiple emits.
} else if (FD->isTargetClonesMultiVersion()) {
auto *Clone = FD->getAttr<TargetClonesAttr>();
for (unsigned I = 0; I < Clone->featuresStrs_size(); ++I)
if (Clone->isFirstOfVersion(I))
EmitGlobalFunctionDefinition(GD.getWithMultiVersionIndex(I), nullptr);
EmitTargetClonesResolver(GD);
} else
EmitGlobalFunctionDefinition(GD, GV);
}
@ -3307,6 +3349,63 @@ llvm::GlobalValue::LinkageTypes getMultiversionLinkage(CodeGenModule &CGM,
return llvm::GlobalValue::WeakODRLinkage;
}
void CodeGenModule::EmitTargetClonesResolver(GlobalDecl GD) {
const auto *FD = cast<FunctionDecl>(GD.getDecl());
assert(FD && "Not a FunctionDecl?");
const auto *TC = FD->getAttr<TargetClonesAttr>();
assert(TC && "Not a target_clones Function?");
QualType CanonTy = Context.getCanonicalType(FD->getType());
llvm::Type *DeclTy = getTypes().ConvertType(CanonTy);
if (const auto *CXXFD = dyn_cast<CXXMethodDecl>(FD)) {
const CGFunctionInfo &FInfo = getTypes().arrangeCXXMethodDeclaration(CXXFD);
DeclTy = getTypes().GetFunctionType(FInfo);
}
llvm::Function *ResolverFunc;
if (getTarget().supportsIFunc()) {
auto *IFunc = cast<llvm::GlobalIFunc>(
GetOrCreateMultiVersionResolver(GD, DeclTy, FD));
ResolverFunc = cast<llvm::Function>(IFunc->getResolver());
} else
ResolverFunc =
cast<llvm::Function>(GetOrCreateMultiVersionResolver(GD, DeclTy, FD));
SmallVector<CodeGenFunction::MultiVersionResolverOption, 10> Options;
for (unsigned VersionIndex = 0; VersionIndex < TC->featuresStrs_size();
++VersionIndex) {
if (!TC->isFirstOfVersion(VersionIndex))
continue;
StringRef Version = TC->getFeatureStr(VersionIndex);
StringRef MangledName =
getMangledName(GD.getWithMultiVersionIndex(VersionIndex));
llvm::Constant *Func = GetGlobalValue(MangledName);
assert(Func &&
"Should have already been created before calling resolver emit");
StringRef Architecture;
llvm::SmallVector<StringRef, 1> Feature;
if (Version.startswith("arch="))
Architecture = Version.drop_front(sizeof("arch=") - 1);
else if (Version != "default")
Feature.push_back(Version);
Options.emplace_back(cast<llvm::Function>(Func), Architecture, Feature);
}
const TargetInfo &TI = getTarget();
std::stable_sort(
Options.begin(), Options.end(),
[&TI](const CodeGenFunction::MultiVersionResolverOption &LHS,
const CodeGenFunction::MultiVersionResolverOption &RHS) {
return TargetMVPriority(TI, LHS) > TargetMVPriority(TI, RHS);
});
CodeGenFunction CGF(*this);
CGF.EmitMultiVersionResolver(ResolverFunc, Options);
}
void CodeGenModule::emitMultiVersionFunctions() {
std::vector<GlobalDecl> MVFuncsToEmit;
MultiVersionFuncs.swap(MVFuncsToEmit);
@ -3511,8 +3610,25 @@ llvm::Constant *CodeGenModule::GetOrCreateMultiVersionResolver(
// Since this is the first time we've created this IFunc, make sure
// that we put this multiversioned function into the list to be
// replaced later if necessary (target multiversioning only).
if (!FD->isCPUDispatchMultiVersion() && !FD->isCPUSpecificMultiVersion())
if (FD->isTargetMultiVersion())
MultiVersionFuncs.push_back(GD);
else if (FD->isTargetClonesMultiVersion()) {
// In target_clones multiversioning, make sure we emit this if used.
auto DDI =
DeferredDecls.find(getMangledName(GD.getWithMultiVersionIndex(0)));
if (DDI != DeferredDecls.end()) {
addDeferredDeclToEmit(GD);
DeferredDecls.erase(DDI);
} else {
// Emit the symbol of the 1st variant, so that the deferred decls know we
// need it, otherwise the only global value will be the resolver/ifunc,
// which end up getting broken if we search for them with GetGlobalValue'.
GetOrCreateLLVMFunction(
getMangledName(GD.getWithMultiVersionIndex(0)), DeclTy, FD,
/*ForVTable=*/false, /*DontDefer=*/true,
/*IsThunk=*/false, llvm::AttributeList(), ForDefinition);
}
}
if (getTarget().supportsIFunc()) {
llvm::Type *ResolverType = llvm::FunctionType::get(

View File

@ -1500,6 +1500,7 @@ class CodeGenModule : public CodeGenTypeCache {
void EmitAliasDefinition(GlobalDecl GD);
void emitIFuncDefinition(GlobalDecl GD);
void emitCPUDispatchDefinition(GlobalDecl GD);
void EmitTargetClonesResolver(GlobalDecl GD);
void EmitObjCPropertyImplementations(const ObjCImplementationDecl *D);
void EmitObjCIvarInitializations(ObjCImplementationDecl *D);

View File

@ -6364,6 +6364,26 @@ class ARMTargetCodeGenInfo : public TargetCodeGenInfo {
const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D);
if (!FD)
return;
auto *Fn = cast<llvm::Function>(GV);
if (const auto *TA = FD->getAttr<TargetAttr>()) {
ParsedTargetAttr Attr = TA->parse();
if (!Attr.BranchProtection.empty()) {
TargetInfo::BranchProtectionInfo BPI;
StringRef DiagMsg;
(void)CGM.getTarget().validateBranchProtection(Attr.BranchProtection,
BPI, DiagMsg);
static const char *SignReturnAddrStr[] = {"none", "non-leaf", "all"};
assert(static_cast<unsigned>(BPI.SignReturnAddr) <= 2 &&
"Unexpected SignReturnAddressScopeKind");
Fn->addFnAttr("sign-return-address",
SignReturnAddrStr[static_cast<int>(BPI.SignReturnAddr)]);
Fn->addFnAttr("branch-target-enforcement",
BPI.BranchTargetEnforcement ? "true" : "false");
}
}
const ARMInterruptAttr *Attr = FD->getAttr<ARMInterruptAttr>();
if (!Attr)
@ -6379,8 +6399,6 @@ class ARMTargetCodeGenInfo : public TargetCodeGenInfo {
case ARMInterruptAttr::UNDEF: Kind = "UNDEF"; break;
}
llvm::Function *Fn = cast<llvm::Function>(GV);
Fn->addFnAttr("interrupt", Kind);
ARMABIInfo::ABIKind ABI = cast<ARMABIInfo>(getABIInfo()).getABIKind();
@ -9339,17 +9357,25 @@ AMDGPUTargetCodeGenInfo::getLLVMSyncScopeID(const LangOptions &LangOpts,
llvm::LLVMContext &Ctx) const {
std::string Name;
switch (Scope) {
case SyncScope::HIPSingleThread:
Name = "singlethread";
break;
case SyncScope::HIPWavefront:
case SyncScope::OpenCLSubGroup:
Name = "wavefront";
break;
case SyncScope::HIPWorkgroup:
case SyncScope::OpenCLWorkGroup:
Name = "workgroup";
break;
case SyncScope::HIPAgent:
case SyncScope::OpenCLDevice:
Name = "agent";
break;
case SyncScope::HIPSystem:
case SyncScope::OpenCLAllSVMDevices:
Name = "";
break;
case SyncScope::OpenCLSubGroup:
Name = "wavefront";
}
if (Ordering != llvm::AtomicOrdering::SequentiallyConsistent) {

View File

@ -38,6 +38,7 @@
#include "ToolChains/NaCl.h"
#include "ToolChains/NetBSD.h"
#include "ToolChains/OpenBSD.h"
#include "ToolChains/PPCFreeBSD.h"
#include "ToolChains/PPCLinux.h"
#include "ToolChains/PS4CPU.h"
#include "ToolChains/RISCVToolchain.h"
@ -5302,7 +5303,11 @@ const ToolChain &Driver::getToolChain(const ArgList &Args,
TC = std::make_unique<toolchains::NetBSD>(*this, Target, Args);
break;
case llvm::Triple::FreeBSD:
TC = std::make_unique<toolchains::FreeBSD>(*this, Target, Args);
if (Target.isPPC())
TC = std::make_unique<toolchains::PPCFreeBSDToolChain>(*this, Target,
Args);
else
TC = std::make_unique<toolchains::FreeBSD>(*this, Target, Args);
break;
case llvm::Triple::Minix:
TC = std::make_unique<toolchains::Minix>(*this, Target, Args);

View File

@ -403,7 +403,7 @@ shouldUseExceptionTablesForObjCExceptions(const ObjCRuntime &runtime,
}
/// Adds exception related arguments to the driver command arguments. There's a
/// master flag, -fexceptions and also language specific flags to enable/disable
/// main flag, -fexceptions and also language specific flags to enable/disable
/// C++ and Objective-C exceptions. This makes it possible to for example
/// disable C++ exceptions but enable Objective-C exceptions.
static bool addExceptionArgs(const ArgList &Args, types::ID InputType,
@ -1603,6 +1603,49 @@ void RenderARMABI(const Driver &D, const llvm::Triple &Triple,
}
}
static void CollectARMPACBTIOptions(const Driver &D, const ArgList &Args,
ArgStringList &CmdArgs, bool isAArch64) {
const Arg *A = isAArch64
? Args.getLastArg(options::OPT_msign_return_address_EQ,
options::OPT_mbranch_protection_EQ)
: Args.getLastArg(options::OPT_mbranch_protection_EQ);
if (!A)
return;
StringRef Scope, Key;
bool IndirectBranches;
if (A->getOption().matches(options::OPT_msign_return_address_EQ)) {
Scope = A->getValue();
if (!Scope.equals("none") && !Scope.equals("non-leaf") &&
!Scope.equals("all"))
D.Diag(diag::err_invalid_branch_protection)
<< Scope << A->getAsString(Args);
Key = "a_key";
IndirectBranches = false;
} else {
StringRef DiagMsg;
llvm::ARM::ParsedBranchProtection PBP;
if (!llvm::ARM::parseBranchProtection(A->getValue(), PBP, DiagMsg))
D.Diag(diag::err_invalid_branch_protection)
<< DiagMsg << A->getAsString(Args);
if (!isAArch64 && PBP.Key == "b_key")
D.Diag(diag::warn_unsupported_branch_protection)
<< "b-key" << A->getAsString(Args);
Scope = PBP.Scope;
Key = PBP.Key;
IndirectBranches = PBP.BranchTargetEnforcement;
}
CmdArgs.push_back(
Args.MakeArgString(Twine("-msign-return-address=") + Scope));
if (!Scope.equals("none"))
CmdArgs.push_back(
Args.MakeArgString(Twine("-msign-return-address-key=") + Key));
if (IndirectBranches)
CmdArgs.push_back("-mbranch-target-enforce");
}
void Clang::AddARMTargetArgs(const llvm::Triple &Triple, const ArgList &Args,
ArgStringList &CmdArgs, bool KernelOrKext) const {
RenderARMABI(getToolChain().getDriver(), Triple, Args, CmdArgs);
@ -1644,6 +1687,10 @@ void Clang::AddARMTargetArgs(const llvm::Triple &Triple, const ArgList &Args,
CmdArgs.push_back("-mcmse");
AddAAPCSVolatileBitfieldArgs(Args, CmdArgs);
// Enable/disable return address signing and indirect branch targets.
CollectARMPACBTIOptions(getToolChain().getDriver(), Args, CmdArgs,
false /*isAArch64*/);
}
void Clang::RenderTargetOptions(const llvm::Triple &EffectiveTriple,
@ -1783,40 +1830,8 @@ void Clang::AddAArch64TargetArgs(const ArgList &Args,
}
// Enable/disable return address signing and indirect branch targets.
if (Arg *A = Args.getLastArg(options::OPT_msign_return_address_EQ,
options::OPT_mbranch_protection_EQ)) {
const Driver &D = getToolChain().getDriver();
StringRef Scope, Key;
bool IndirectBranches;
if (A->getOption().matches(options::OPT_msign_return_address_EQ)) {
Scope = A->getValue();
if (!Scope.equals("none") && !Scope.equals("non-leaf") &&
!Scope.equals("all"))
D.Diag(diag::err_invalid_branch_protection)
<< Scope << A->getAsString(Args);
Key = "a_key";
IndirectBranches = false;
} else {
StringRef Err;
llvm::AArch64::ParsedBranchProtection PBP;
if (!llvm::AArch64::parseBranchProtection(A->getValue(), PBP, Err))
D.Diag(diag::err_invalid_branch_protection)
<< Err << A->getAsString(Args);
Scope = PBP.Scope;
Key = PBP.Key;
IndirectBranches = PBP.BranchTargetEnforcement;
}
CmdArgs.push_back(
Args.MakeArgString(Twine("-msign-return-address=") + Scope));
CmdArgs.push_back(
Args.MakeArgString(Twine("-msign-return-address-key=") + Key));
if (IndirectBranches)
CmdArgs.push_back("-mbranch-target-enforce");
}
CollectARMPACBTIOptions(getToolChain().getDriver(), Args, CmdArgs,
true /*isAArch64*/);
// Handle -msve_vector_bits=<bits>
if (Arg *A = Args.getLastArg(options::OPT_msve_vector_bits_EQ)) {
@ -5821,9 +5836,12 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
Args.AddLastArg(CmdArgs, options::OPT_fvisibility_inlines_hidden_static_local_var,
options::OPT_fno_visibility_inlines_hidden_static_local_var);
Args.AddLastArg(CmdArgs, options::OPT_fvisibility_global_new_delete_hidden);
Args.AddLastArg(CmdArgs, options::OPT_fnew_infallible);
Args.AddLastArg(CmdArgs, options::OPT_ftlsmodel_EQ);
if (Args.hasFlag(options::OPT_fnew_infallible,
options::OPT_fno_new_infallible, false))
CmdArgs.push_back("-fnew-infallible");
if (Args.hasFlag(options::OPT_fno_operator_names,
options::OPT_foperator_names, false))
CmdArgs.push_back("-fno-operator-names");
@ -5886,7 +5904,7 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
// runtime.
if (Args.hasFlag(options::OPT_fopenmp_target_new_runtime,
options::OPT_fno_openmp_target_new_runtime,
/*Default=*/false))
/*Default=*/!getToolChain().getTriple().isAMDGCN()))
CmdArgs.push_back("-fopenmp-target-new-runtime");
// When in OpenMP offloading mode, enable debugging on the device.
@ -6659,6 +6677,35 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
A->claim();
}
// Turn -fplugin-arg-pluginname-key=value into
// -plugin-arg-pluginname key=value
// GCC has an actual plugin_argument struct with key/value pairs that it
// passes to its plugins, but we don't, so just pass it on as-is.
//
// The syntax for -fplugin-arg- is ambiguous if both plugin name and
// argument key are allowed to contain dashes. GCC therefore only
// allows dashes in the key. We do the same.
for (const Arg *A : Args.filtered(options::OPT_fplugin_arg)) {
auto ArgValue = StringRef(A->getValue());
auto FirstDashIndex = ArgValue.find('-');
StringRef PluginName = ArgValue.substr(0, FirstDashIndex);
StringRef Arg = ArgValue.substr(FirstDashIndex + 1);
A->claim();
if (FirstDashIndex == StringRef::npos || Arg.empty()) {
if (PluginName.empty()) {
D.Diag(diag::warn_drv_missing_plugin_name) << A->getAsString(Args);
} else {
D.Diag(diag::warn_drv_missing_plugin_arg)
<< PluginName << A->getAsString(Args);
}
continue;
}
CmdArgs.push_back(Args.MakeArgString(Twine("-plugin-arg-") + PluginName));
CmdArgs.push_back(Args.MakeArgString(Arg));
}
// Forward -fpass-plugin=name.so to -cc1.
for (const Arg *A : Args.filtered(options::OPT_fpass_plugin_EQ)) {
CmdArgs.push_back(

View File

@ -745,7 +745,7 @@ void CudaToolChain::addClangTargetOptions(
std::string BitcodeSuffix;
if (DriverArgs.hasFlag(options::OPT_fopenmp_target_new_runtime,
options::OPT_fno_openmp_target_new_runtime, false))
options::OPT_fno_openmp_target_new_runtime, true))
BitcodeSuffix = "new-nvptx-" + GpuArch.str();
else
BitcodeSuffix = "nvptx-" + GpuArch.str();

View File

@ -32,7 +32,8 @@ void Flang::AddFortranDialectOptions(const ArgList &Args,
options::OPT_fxor_operator, options::OPT_fno_xor_operator,
options::OPT_falternative_parameter_statement,
options::OPT_fdefault_real_8, options::OPT_fdefault_integer_8,
options::OPT_fdefault_double_8, options::OPT_flarge_sizes});
options::OPT_fdefault_double_8, options::OPT_flarge_sizes,
options::OPT_fno_automatic});
}
void Flang::AddPreprocessingOptions(const ArgList &Args,

View File

@ -391,7 +391,8 @@ FreeBSD::FreeBSD(const Driver &D, const llvm::Triple &Triple,
}
ToolChain::CXXStdlibType FreeBSD::GetDefaultCXXStdlibType() const {
if (getTriple().getOSMajorVersion() >= 10)
unsigned Major = getTriple().getOSMajorVersion();
if (Major >= 10 || Major == 0)
return ToolChain::CST_Libcxx;
return ToolChain::CST_Libstdcxx;
}

View File

@ -421,6 +421,9 @@ std::string Linux::getDynamicLinker(const ArgList &Args) const {
(Triple.getEnvironment() == llvm::Triple::MuslEABIHF ||
tools::arm::getARMFloatABI(*this, Args) == tools::arm::FloatABI::Hard))
ArchName += "hf";
if (Arch == llvm::Triple::ppc &&
Triple.getSubArch() == llvm::Triple::PPCSubArch_spe)
ArchName = "powerpc-sf";
return "/lib/ld-musl-" + ArchName + ".so.1";
}

View File

@ -0,0 +1,28 @@
//===-- PPCFreeBSD.cpp - PowerPC ToolChain Implementations ------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
#include "PPCFreeBSD.h"
#include "clang/Driver/Driver.h"
#include "clang/Driver/Options.h"
#include "llvm/Support/Path.h"
using namespace clang::driver::toolchains;
using namespace llvm::opt;
void PPCFreeBSDToolChain::AddClangSystemIncludeArgs(
const ArgList &DriverArgs, ArgStringList &CC1Args) const {
if (!DriverArgs.hasArg(clang::driver::options::OPT_nostdinc) &&
!DriverArgs.hasArg(options::OPT_nobuiltininc)) {
const Driver &D = getDriver();
SmallString<128> P(D.ResourceDir);
llvm::sys::path::append(P, "include", "ppc_wrappers");
addSystemInclude(DriverArgs, CC1Args, P);
}
FreeBSD::AddClangSystemIncludeArgs(DriverArgs, CC1Args);
}

View File

@ -0,0 +1,33 @@
//===--- PPCFreeBSD.h - PowerPC ToolChain Implementations -------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_LIB_DRIVER_TOOLCHAINS_PPC_FREEBSD_H
#define LLVM_CLANG_LIB_DRIVER_TOOLCHAINS_PPC_FREEBSD_H
#include "FreeBSD.h"
namespace clang {
namespace driver {
namespace toolchains {
class LLVM_LIBRARY_VISIBILITY PPCFreeBSDToolChain : public FreeBSD {
public:
PPCFreeBSDToolChain(const Driver &D, const llvm::Triple &Triple,
const llvm::opt::ArgList &Args)
: FreeBSD(D, Triple, Args) {}
void
AddClangSystemIncludeArgs(const llvm::opt::ArgList &DriverArgs,
llvm::opt::ArgStringList &CC1Args) const override;
};
} // end namespace toolchains
} // end namespace driver
} // end namespace clang
#endif // LLVM_CLANG_LIB_DRIVER_TOOLCHAINS_PPC_FREEBSD_H

View File

@ -80,6 +80,7 @@ class LLVM_LIBRARY_VISIBILITY PS4CPU : public Generic_ELF {
return LangOptions::SSPStrong;
}
unsigned GetDefaultDwarfVersion() const override { return 4; }
llvm::DebuggerKind getDefaultDebuggerTuning() const override {
return llvm::DebuggerKind::SCE;
}

View File

@ -1984,9 +1984,17 @@ ContinuationIndenter::createBreakableToken(const FormatToken &Current,
} else if (Current.is(TT_LineComment) &&
(Current.Previous == nullptr ||
Current.Previous->isNot(TT_ImplicitStringLiteral))) {
bool RegularComments = [&]() {
for (const FormatToken *T = &Current; T && T->is(TT_LineComment);
T = T->Next) {
if (!(T->TokenText.startswith("//") || T->TokenText.startswith("#")))
return false;
}
return true;
}();
if (!Style.ReflowComments ||
CommentPragmasRegex.match(Current.TokenText.substr(2)) ||
switchesFormatting(Current))
switchesFormatting(Current) || !RegularComments)
return nullptr;
return std::make_unique<BreakableLineCommentSection>(
Current, StartColumn, /*InPPDirective=*/false, Encoding, Style);
@ -2195,11 +2203,10 @@ ContinuationIndenter::breakProtrudingToken(const FormatToken &Current,
// When breaking before a tab character, it may be moved by a few columns,
// but will still be expanded to the next tab stop, so we don't save any
// columns.
if (NewRemainingTokenColumns == RemainingTokenColumns) {
if (NewRemainingTokenColumns >= RemainingTokenColumns) {
// FIXME: Do we need to adjust the penalty?
break;
}
assert(NewRemainingTokenColumns < RemainingTokenColumns);
LLVM_DEBUG(llvm::dbgs() << " Breaking at: " << TailOffset + Split.first
<< ", " << Split.second << "\n");

View File

@ -2988,9 +2988,8 @@ reformat(const FormatStyle &Style, StringRef Code,
// JSON only needs the formatting passing.
if (Style.isJson()) {
std::vector<tooling::Range> Ranges(1, tooling::Range(0, Code.size()));
auto Env =
Environment::make(Code, FileName, Ranges, FirstStartColumn,
NextStartColumn, LastStartColumn);
auto Env = Environment::make(Code, FileName, Ranges, FirstStartColumn,
NextStartColumn, LastStartColumn);
if (!Env)
return {};
// Perform the actual formatting pass.
@ -3118,9 +3117,7 @@ tooling::Replacements fixNamespaceEndComments(const FormatStyle &Style,
auto Env = Environment::make(Code, FileName, Ranges);
if (!Env)
return {};
return NamespaceEndCommentsFixer(*Env, Style)
.process()
.first;
return NamespaceEndCommentsFixer(*Env, Style).process().first;
}
tooling::Replacements sortUsingDeclarations(const FormatStyle &Style,
@ -3130,9 +3127,7 @@ tooling::Replacements sortUsingDeclarations(const FormatStyle &Style,
auto Env = Environment::make(Code, FileName, Ranges);
if (!Env)
return {};
return UsingDeclarationsSorter(*Env, Style)
.process()
.first;
return UsingDeclarationsSorter(*Env, Style).process().first;
}
LangOptions getFormattingLangOpts(const FormatStyle &Style) {

View File

@ -76,6 +76,7 @@ namespace format {
TYPE(LineComment) \
TYPE(MacroBlockBegin) \
TYPE(MacroBlockEnd) \
TYPE(ModulePartitionColon) \
TYPE(NamespaceMacro) \
TYPE(NonNullAssertion) \
TYPE(NullCoalescingEqual) \

View File

@ -37,27 +37,40 @@ FormatTokenLexer::FormatTokenLexer(
getFormattingLangOpts(Style)));
Lex->SetKeepWhitespaceMode(true);
for (const std::string &ForEachMacro : Style.ForEachMacros)
Macros.insert({&IdentTable.get(ForEachMacro), TT_ForEachMacro});
for (const std::string &IfMacro : Style.IfMacros)
Macros.insert({&IdentTable.get(IfMacro), TT_IfMacro});
for (const std::string &AttributeMacro : Style.AttributeMacros)
Macros.insert({&IdentTable.get(AttributeMacro), TT_AttributeMacro});
for (const std::string &StatementMacro : Style.StatementMacros)
Macros.insert({&IdentTable.get(StatementMacro), TT_StatementMacro});
for (const std::string &TypenameMacro : Style.TypenameMacros)
Macros.insert({&IdentTable.get(TypenameMacro), TT_TypenameMacro});
for (const std::string &NamespaceMacro : Style.NamespaceMacros)
Macros.insert({&IdentTable.get(NamespaceMacro), TT_NamespaceMacro});
for (const std::string &ForEachMacro : Style.ForEachMacros) {
auto Identifier = &IdentTable.get(ForEachMacro);
Macros.insert({Identifier, TT_ForEachMacro});
}
for (const std::string &IfMacro : Style.IfMacros) {
auto Identifier = &IdentTable.get(IfMacro);
Macros.insert({Identifier, TT_IfMacro});
}
for (const std::string &AttributeMacro : Style.AttributeMacros) {
auto Identifier = &IdentTable.get(AttributeMacro);
Macros.insert({Identifier, TT_AttributeMacro});
}
for (const std::string &StatementMacro : Style.StatementMacros) {
auto Identifier = &IdentTable.get(StatementMacro);
Macros.insert({Identifier, TT_StatementMacro});
}
for (const std::string &TypenameMacro : Style.TypenameMacros) {
auto Identifier = &IdentTable.get(TypenameMacro);
Macros.insert({Identifier, TT_TypenameMacro});
}
for (const std::string &NamespaceMacro : Style.NamespaceMacros) {
auto Identifier = &IdentTable.get(NamespaceMacro);
Macros.insert({Identifier, TT_NamespaceMacro});
}
for (const std::string &WhitespaceSensitiveMacro :
Style.WhitespaceSensitiveMacros) {
Macros.insert(
{&IdentTable.get(WhitespaceSensitiveMacro), TT_UntouchableMacroFunc});
auto Identifier = &IdentTable.get(WhitespaceSensitiveMacro);
Macros.insert({Identifier, TT_UntouchableMacroFunc});
}
for (const std::string &StatementAttributeLikeMacro :
Style.StatementAttributeLikeMacros)
Macros.insert({&IdentTable.get(StatementAttributeLikeMacro),
TT_StatementAttributeLikeMacro});
Style.StatementAttributeLikeMacros) {
auto Identifier = &IdentTable.get(StatementAttributeLikeMacro);
Macros.insert({Identifier, TT_StatementAttributeLikeMacro});
}
}
ArrayRef<FormatToken *> FormatTokenLexer::lex() {
@ -739,6 +752,8 @@ bool FormatTokenLexer::tryMerge_TMacro() {
Tokens.pop_back();
Tokens.pop_back();
Tokens.back() = String;
if (FirstInLineIndex >= Tokens.size())
FirstInLineIndex = Tokens.size() - 1;
return true;
}

View File

@ -553,9 +553,7 @@ tooling::Replacements sortJavaScriptImports(const FormatStyle &Style,
auto Env = Environment::make(Code, FileName, Ranges);
if (!Env)
return {};
return JavaScriptImportSorter(*Env, Style)
.process()
.first;
return JavaScriptImportSorter(*Env, Style).process().first;
}
} // end namespace format

View File

@ -37,7 +37,7 @@ namespace format {
// FIXME: Instead of printing the diagnostic we should store it and have a
// better way to return errors through the format APIs.
class FatalDiagnosticConsumer: public DiagnosticConsumer {
class FatalDiagnosticConsumer : public DiagnosticConsumer {
public:
void HandleDiagnostic(DiagnosticsEngine::Level DiagLevel,
const Diagnostic &Info) override {
@ -71,7 +71,8 @@ Environment::make(StringRef Code, StringRef FileName,
}
// Validate that we can get the buffer data without a fatal error.
Env->SM.getBufferData(Env->ID);
if (Diags.fatalError()) return nullptr;
if (Diags.fatalError())
return nullptr;
return Env;
}
@ -80,8 +81,7 @@ Environment::Environment(StringRef Code, StringRef FileName,
unsigned LastStartColumn)
: VirtualSM(new SourceManagerForFile(FileName, Code)), SM(VirtualSM->get()),
ID(VirtualSM->get().getMainFileID()), FirstStartColumn(FirstStartColumn),
NextStartColumn(NextStartColumn), LastStartColumn(LastStartColumn) {
}
NextStartColumn(NextStartColumn), LastStartColumn(LastStartColumn) {}
TokenAnalyzer::TokenAnalyzer(const Environment &Env, const FormatStyle &Style)
: Style(Style), Env(Env),

View File

@ -314,10 +314,11 @@ class AnnotatingParser {
//
// void (*FunctionPointer)(void);
// void (&FunctionReference)(void);
// void (&&FunctionReference)(void);
// void (^ObjCBlock)(void);
bool MightBeFunctionType = !Contexts[Contexts.size() - 2].IsExpression;
bool ProbablyFunctionType =
CurrentToken->isOneOf(tok::star, tok::amp, tok::caret);
CurrentToken->isOneOf(tok::star, tok::amp, tok::ampamp, tok::caret);
bool HasMultipleLines = false;
bool HasMultipleParametersOnALine = false;
bool MightBeObjCForRangeLoop =
@ -902,9 +903,13 @@ class AnnotatingParser {
break;
}
}
if (Contexts.back().ColonIsDictLiteral ||
Style.Language == FormatStyle::LK_Proto ||
Style.Language == FormatStyle::LK_TextProto) {
if (Line.First->isOneOf(Keywords.kw_module, Keywords.kw_import) ||
Line.First->startsSequence(tok::kw_export, Keywords.kw_module) ||
Line.First->startsSequence(tok::kw_export, Keywords.kw_import)) {
Tok->setType(TT_ModulePartitionColon);
} else if (Contexts.back().ColonIsDictLiteral ||
Style.Language == FormatStyle::LK_Proto ||
Style.Language == FormatStyle::LK_TextProto) {
Tok->setType(TT_DictLiteral);
if (Style.Language == FormatStyle::LK_TextProto) {
if (FormatToken *Previous = Tok->getPreviousNonComment())
@ -946,11 +951,15 @@ class AnnotatingParser {
!Line.First->isOneOf(tok::kw_enum, tok::kw_case,
tok::kw_default)) {
FormatToken *Prev = Tok->getPreviousNonComment();
if (!Prev)
break;
if (Prev->isOneOf(tok::r_paren, tok::kw_noexcept))
Tok->setType(TT_CtorInitializerColon);
else if (Prev->is(tok::kw_try)) {
// Member initializer list within function try block.
FormatToken *PrevPrev = Prev->getPreviousNonComment();
if (!PrevPrev)
break;
if (PrevPrev && PrevPrev->isOneOf(tok::r_paren, tok::kw_noexcept))
Tok->setType(TT_CtorInitializerColon);
} else
@ -995,6 +1004,8 @@ class AnnotatingParser {
if (CurrentToken && CurrentToken->is(Keywords.kw_await))
next();
}
if (Style.isCpp() && CurrentToken && CurrentToken->is(tok::kw_co_await))
next();
Contexts.back().ColonIsForRangeExpr = true;
next();
if (!parseParens())
@ -1578,6 +1589,8 @@ class AnnotatingParser {
if (TemplateCloser->is(tok::l_paren)) {
// No Matching Paren yet so skip to matching paren
TemplateCloser = untilMatchingParen(TemplateCloser);
if (!TemplateCloser)
break;
}
if (TemplateCloser->is(tok::less))
NestingLevel++;
@ -2336,16 +2349,15 @@ void TokenAnnotator::setCommentLineLevels(
if (NextNonCommentLine && CommentLine &&
NextNonCommentLine->First->NewlinesBefore <= 1 &&
NextNonCommentLine->First->OriginalColumn ==
AL->First->OriginalColumn) {
AL->First->OriginalColumn) {
// Align comments for preprocessor lines with the # in column 0 if
// preprocessor lines are not indented. Otherwise, align with the next
// line.
AL->Level =
(Style.IndentPPDirectives != FormatStyle::PPDIS_BeforeHash &&
(NextNonCommentLine->Type == LT_PreprocessorDirective ||
NextNonCommentLine->Type == LT_ImportStatement))
? 0
: NextNonCommentLine->Level;
AL->Level = (Style.IndentPPDirectives != FormatStyle::PPDIS_BeforeHash &&
(NextNonCommentLine->Type == LT_PreprocessorDirective ||
NextNonCommentLine->Type == LT_ImportStatement))
? 0
: NextNonCommentLine->Level;
} else {
NextNonCommentLine = AL->First->isNot(tok::r_brace) ? AL : nullptr;
}
@ -2639,8 +2651,8 @@ void TokenAnnotator::calculateFormattingInformation(AnnotatedLine &Line) {
if (Current->Role)
Current->Role->precomputeFormattingInfos(Current);
if (Current->MatchingParen &&
Current->MatchingParen->opensBlockOrBlockTypeList(Style)) {
assert(IndentLevel > 0);
Current->MatchingParen->opensBlockOrBlockTypeList(Style) &&
IndentLevel > 0) {
--IndentLevel;
}
Current->IndentLevel = IndentLevel;
@ -2942,6 +2954,14 @@ bool TokenAnnotator::spaceRequiredBetween(const AnnotatedLine &Line,
if (Left.is(tok::kw_auto) && Right.isOneOf(tok::l_paren, tok::l_brace))
return false;
// operator co_await(x)
if (Right.is(tok::l_paren) && Left.is(tok::kw_co_await) && Left.Previous &&
Left.Previous->is(tok::kw_operator))
return false;
// co_await (x), co_yield (x), co_return (x)
if (Left.isOneOf(tok::kw_co_await, tok::kw_co_yield, tok::kw_co_return) &&
Right.isNot(tok::semi))
return true;
// requires clause Concept1<T> && Concept2<T>
if (Left.is(TT_ConstraintJunctions) && Right.is(tok::identifier))
return true;
@ -3159,9 +3179,13 @@ bool TokenAnnotator::spaceRequiredBetween(const AnnotatedLine &Line,
if (Left.isIf(Line.Type != LT_PreprocessorDirective))
return Style.SpaceBeforeParensOptions.AfterControlStatements ||
spaceRequiredBeforeParens(Right);
// TODO add Operator overloading specific Options to
// SpaceBeforeParensOptions
if (Right.is(TT_OverloadedOperatorLParen))
return spaceRequiredBeforeParens(Right);
// Function declaration or definition
if (Line.MightBeFunctionDecl && (Left.is(TT_FunctionDeclarationName) ||
Right.is(TT_OverloadedOperatorLParen))) {
if (Line.MightBeFunctionDecl && (Left.is(TT_FunctionDeclarationName))) {
if (Line.mightBeFunctionDefinition())
return Style.SpaceBeforeParensOptions.AfterFunctionDefinitionName ||
spaceRequiredBeforeParens(Right);
@ -3238,9 +3262,35 @@ bool TokenAnnotator::spaceRequiredBefore(const AnnotatedLine &Line,
auto HasExistingWhitespace = [&Right]() {
return Right.WhitespaceRange.getBegin() != Right.WhitespaceRange.getEnd();
};
if (Right.Tok.getIdentifierInfo() && Left.Tok.getIdentifierInfo())
return true; // Never ever merge two identifiers.
// Leave a space between * and /* to avoid C4138 `comment end` found outside
// of comment.
if (Left.is(tok::star) && Right.is(tok::comment))
return true;
if (Style.isCpp()) {
// Space between import <iostream>.
// or import .....;
if (Left.is(Keywords.kw_import) && Right.isOneOf(tok::less, tok::ellipsis))
return true;
// No space between module :.
if (Left.isOneOf(Keywords.kw_module, Keywords.kw_import) &&
Right.is(TT_ModulePartitionColon))
return true;
// No space between import foo:bar but keep a space between import :bar;
if (Left.is(tok::identifier) && Right.is(TT_ModulePartitionColon))
return false;
// No space between :bar;
if (Left.is(TT_ModulePartitionColon) &&
Right.isOneOf(tok::identifier, tok::kw_private))
return false;
if (Left.is(tok::ellipsis) && Right.is(tok::identifier) &&
Line.First->is(Keywords.kw_import))
return false;
if (Left.is(tok::kw_operator))
return Right.is(tok::coloncolon);
if (Right.is(tok::l_brace) && Right.is(BK_BracedInit) &&

View File

@ -320,9 +320,9 @@ class LineJoiner {
}
// Try to merge a control statement block with left brace wrapped
if (I[1]->First->is(tok::l_brace) &&
(TheLine->First->isOneOf(tok::kw_if, tok::kw_while, tok::kw_for,
tok::kw_switch, tok::kw_try, tok::kw_do,
TT_ForEachMacro) ||
(TheLine->First->isOneOf(tok::kw_if, tok::kw_else, tok::kw_while,
tok::kw_for, tok::kw_switch, tok::kw_try,
tok::kw_do, TT_ForEachMacro) ||
(TheLine->First->is(tok::r_brace) && TheLine->First->Next &&
TheLine->First->Next->isOneOf(tok::kw_else, tok::kw_catch))) &&
Style.BraceWrapping.AfterControlStatement ==
@ -335,7 +335,7 @@ class LineJoiner {
? 1
: 0;
} else if (I[1]->First->is(tok::l_brace) &&
TheLine->First->isOneOf(tok::kw_if, tok::kw_while,
TheLine->First->isOneOf(tok::kw_if, tok::kw_else, tok::kw_while,
tok::kw_for)) {
return (Style.BraceWrapping.AfterControlStatement ==
FormatStyle::BWACS_Always)
@ -569,7 +569,7 @@ class LineJoiner {
// Check that the current line allows merging. This depends on whether we
// are in a control flow statements as well as several style flags.
if (Line.First->isOneOf(tok::kw_else, tok::kw_case) ||
if (Line.First->is(tok::kw_case) ||
(Line.First->Next && Line.First->Next->is(tok::kw_else)))
return 0;
// default: in switch statement
@ -578,20 +578,21 @@ class LineJoiner {
if (Tok && Tok->is(tok::colon))
return 0;
}
if (Line.First->isOneOf(tok::kw_if, tok::kw_while, tok::kw_do, tok::kw_try,
tok::kw___try, tok::kw_catch, tok::kw___finally,
tok::kw_for, tok::r_brace, Keywords.kw___except)) {
if (Line.First->isOneOf(tok::kw_if, tok::kw_else, tok::kw_while, tok::kw_do,
tok::kw_try, tok::kw___try, tok::kw_catch,
tok::kw___finally, tok::kw_for, tok::r_brace,
Keywords.kw___except)) {
if (Style.AllowShortBlocksOnASingleLine == FormatStyle::SBS_Never)
return 0;
// Don't merge when we can't except the case when
// the control statement block is empty
if (!Style.AllowShortIfStatementsOnASingleLine &&
Line.startsWith(tok::kw_if) &&
Line.First->isOneOf(tok::kw_if, tok::kw_else) &&
!Style.BraceWrapping.AfterControlStatement &&
!I[1]->First->is(tok::r_brace))
return 0;
if (!Style.AllowShortIfStatementsOnASingleLine &&
Line.startsWith(tok::kw_if) &&
Line.First->isOneOf(tok::kw_if, tok::kw_else) &&
Style.BraceWrapping.AfterControlStatement ==
FormatStyle::BWACS_Always &&
I + 2 != E && !I[2]->First->is(tok::r_brace))
@ -676,7 +677,7 @@ class LineJoiner {
// { <-- current Line
// baz();
// }
if (Line.First == Line.Last &&
if (Line.First == Line.Last && Line.First->isNot(TT_FunctionLBrace) &&
Style.BraceWrapping.AfterControlStatement ==
FormatStyle::BWACS_MultiLine)
return 0;

View File

@ -28,9 +28,28 @@ namespace format {
class FormatTokenSource {
public:
virtual ~FormatTokenSource() {}
// Returns the next token in the token stream.
virtual FormatToken *getNextToken() = 0;
// Returns the token precedint the token returned by the last call to
// getNextToken() in the token stream, or nullptr if no such token exists.
virtual FormatToken *getPreviousToken() = 0;
// Returns the token that would be returned by the next call to
// getNextToken().
virtual FormatToken *peekNextToken() = 0;
// Returns whether we are at the end of the file.
// This can be different from whether getNextToken() returned an eof token
// when the FormatTokenSource is a view on a part of the token stream.
virtual bool isEOF() = 0;
// Gets the current position in the token stream, to be used by setPosition().
virtual unsigned getPosition() = 0;
// Resets the token stream to the state it was in when getPosition() returned
// Position, and return the token at that position in the stream.
virtual FormatToken *setPosition(unsigned Position) = 0;
};
@ -108,6 +127,18 @@ class ScopedMacroState : public FormatTokenSource {
return Token;
}
FormatToken *getPreviousToken() override {
return PreviousTokenSource->getPreviousToken();
}
FormatToken *peekNextToken() override {
if (eof())
return &FakeEOF;
return PreviousTokenSource->peekNextToken();
}
bool isEOF() override { return PreviousTokenSource->isEOF(); }
unsigned getPosition() override { return PreviousTokenSource->getPosition(); }
FormatToken *setPosition(unsigned Position) override {
@ -199,16 +230,45 @@ class IndexedTokenSource : public FormatTokenSource {
: Tokens(Tokens), Position(-1) {}
FormatToken *getNextToken() override {
if (Position >= 0 && Tokens[Position]->is(tok::eof)) {
LLVM_DEBUG({
llvm::dbgs() << "Next ";
dbgToken(Position);
});
return Tokens[Position];
}
++Position;
LLVM_DEBUG({
llvm::dbgs() << "Next ";
dbgToken(Position);
});
return Tokens[Position];
}
FormatToken *getPreviousToken() override {
assert(Position > 0);
return Tokens[Position - 1];
}
FormatToken *peekNextToken() override {
int Next = Position + 1;
LLVM_DEBUG({
llvm::dbgs() << "Peeking ";
dbgToken(Next);
});
return Tokens[Next];
}
bool isEOF() override { return Tokens[Position]->is(tok::eof); }
unsigned getPosition() override {
LLVM_DEBUG(llvm::dbgs() << "Getting Position: " << Position << "\n");
assert(Position >= 0);
return Position;
}
FormatToken *setPosition(unsigned P) override {
LLVM_DEBUG(llvm::dbgs() << "Setting Position: " << P << "\n");
Position = P;
return Tokens[Position];
}
@ -216,6 +276,13 @@ class IndexedTokenSource : public FormatTokenSource {
void reset() { Position = -1; }
private:
void dbgToken(int Position, llvm::StringRef Indent = "") {
FormatToken *Tok = Tokens[Position];
llvm::dbgs() << Indent << "[" << Position
<< "] Token: " << Tok->Tok.getName() << " / " << Tok->TokenText
<< ", Macro: " << !!Tok->MacroCtx << "\n";
}
ArrayRef<FormatToken *> Tokens;
int Position;
};
@ -399,7 +466,7 @@ void UnwrappedLineParser::parseLevel(bool HasOpeningBrace) {
FormatToken *Next;
do {
Next = Tokens->getNextToken();
} while (Next && Next->is(tok::comment));
} while (Next->is(tok::comment));
FormatTok = Tokens->setPosition(StoredPosition);
if (Next && Next->isNot(tok::colon)) {
// default not followed by ':' is not a case label; treat it like
@ -875,10 +942,7 @@ void UnwrappedLineParser::parsePPEndIf() {
parsePPUnknown();
// If the #endif of a potential include guard is the last thing in the file,
// then we found an include guard.
unsigned TokenPosition = Tokens->getPosition();
FormatToken *PeekNext = AllTokens[TokenPosition];
if (IncludeGuard == IG_Defined && PPBranchLevel == -1 &&
PeekNext->is(tok::eof) &&
if (IncludeGuard == IG_Defined && PPBranchLevel == -1 && Tokens->isEOF() &&
Style.IndentPPDirectives != FormatStyle::PPDIS_None)
IncludeGuard = IG_Found;
}
@ -1050,6 +1114,35 @@ static bool isC78ParameterDecl(const FormatToken *Tok, const FormatToken *Next,
return Tok->Previous && Tok->Previous->isOneOf(tok::l_paren, tok::comma);
}
void UnwrappedLineParser::parseModuleImport() {
nextToken();
while (!eof()) {
if (FormatTok->is(tok::colon)) {
FormatTok->setType(TT_ModulePartitionColon);
}
// Handle import <foo/bar.h> as we would an include statement.
else if (FormatTok->is(tok::less)) {
nextToken();
while (!FormatTok->isOneOf(tok::semi, tok::greater, tok::eof)) {
// Mark tokens up to the trailing line comments as implicit string
// literals.
if (FormatTok->isNot(tok::comment) &&
!FormatTok->TokenText.startswith("//"))
FormatTok->setType(TT_ImplicitStringLiteral);
nextToken();
}
}
if (FormatTok->is(tok::semi)) {
nextToken();
break;
}
nextToken();
}
addUnwrappedLine();
return;
}
// readTokenWithJavaScriptASI reads the next token and terminates the current
// line if JavaScript Automatic Semicolon Insertion must
// happen between the current token and the next token.
@ -1097,7 +1190,6 @@ void UnwrappedLineParser::readTokenWithJavaScriptASI() {
}
void UnwrappedLineParser::parseStructuralElement(bool IsTopLevel) {
assert(!FormatTok->is(tok::l_brace));
if (Style.Language == FormatStyle::LK_TableGen &&
FormatTok->is(tok::pp_include)) {
nextToken();
@ -1249,6 +1341,10 @@ void UnwrappedLineParser::parseStructuralElement(bool IsTopLevel) {
addUnwrappedLine();
return;
}
if (Style.isCpp()) {
parseModuleImport();
return;
}
}
if (Style.isCpp() &&
FormatTok->isOneOf(Keywords.kw_signals, Keywords.kw_qsignals,
@ -1402,9 +1498,7 @@ void UnwrappedLineParser::parseStructuralElement(bool IsTopLevel) {
// declaration.
if (!IsTopLevel || !Style.isCpp() || !Previous || FormatTok->is(tok::eof))
break;
const unsigned Position = Tokens->getPosition() + 1;
assert(Position < AllTokens.size());
if (isC78ParameterDecl(FormatTok, AllTokens[Position], Previous)) {
if (isC78ParameterDecl(FormatTok, Tokens->peekNextToken(), Previous)) {
addUnwrappedLine();
return;
}
@ -1488,7 +1582,7 @@ void UnwrappedLineParser::parseStructuralElement(bool IsTopLevel) {
unsigned StoredPosition = Tokens->getPosition();
FormatToken *Next = Tokens->getNextToken();
FormatTok = Tokens->setPosition(StoredPosition);
if (Next && !mustBeJSIdent(Keywords, Next)) {
if (!mustBeJSIdent(Keywords, Next)) {
nextToken();
break;
}
@ -2099,8 +2193,8 @@ void UnwrappedLineParser::parseIfThenElse() {
parseBlock();
addUnwrappedLine();
} else if (FormatTok->Tok.is(tok::kw_if)) {
FormatToken *Previous = AllTokens[Tokens->getPosition() - 1];
bool PrecededByComment = Previous->is(tok::comment);
FormatToken *Previous = Tokens->getPreviousToken();
bool PrecededByComment = Previous && Previous->is(tok::comment);
if (PrecededByComment) {
addUnwrappedLine();
++Line->Level;
@ -2305,6 +2399,8 @@ void UnwrappedLineParser::parseForOrWhileLoop() {
if (Style.Language == FormatStyle::LK_JavaScript &&
FormatTok->is(Keywords.kw_await))
nextToken();
if (Style.isCpp() && FormatTok->is(tok::kw_co_await))
nextToken();
if (FormatTok->Tok.is(tok::l_paren))
parseParens();
if (FormatTok->Tok.is(tok::l_brace)) {
@ -2653,23 +2749,25 @@ bool UnwrappedLineParser::tryToParseSimpleAttribute() {
ScopedTokenPosition AutoPosition(Tokens);
FormatToken *Tok = Tokens->getNextToken();
// We already read the first [ check for the second.
if (Tok && !Tok->is(tok::l_square)) {
if (!Tok->is(tok::l_square)) {
return false;
}
// Double check that the attribute is just something
// fairly simple.
while (Tok) {
while (Tok->isNot(tok::eof)) {
if (Tok->is(tok::r_square)) {
break;
}
Tok = Tokens->getNextToken();
}
if (Tok->is(tok::eof))
return false;
Tok = Tokens->getNextToken();
if (Tok && !Tok->is(tok::r_square)) {
if (!Tok->is(tok::r_square)) {
return false;
}
Tok = Tokens->getNextToken();
if (Tok && Tok->is(tok::semi)) {
if (Tok->is(tok::semi)) {
return false;
}
return true;
@ -2682,7 +2780,7 @@ void UnwrappedLineParser::parseJavaEnumBody() {
unsigned StoredPosition = Tokens->getPosition();
bool IsSimple = true;
FormatToken *Tok = Tokens->getNextToken();
while (Tok) {
while (!Tok->is(tok::eof)) {
if (Tok->is(tok::r_brace))
break;
if (Tok->isOneOf(tok::l_brace, tok::semi)) {
@ -3292,6 +3390,20 @@ void UnwrappedLineParser::readToken(int LevelDifference) {
do {
FormatTok = Tokens->getNextToken();
assert(FormatTok);
while (FormatTok->getType() == TT_ConflictStart ||
FormatTok->getType() == TT_ConflictEnd ||
FormatTok->getType() == TT_ConflictAlternative) {
if (FormatTok->getType() == TT_ConflictStart) {
conditionalCompilationStart(/*Unreachable=*/false);
} else if (FormatTok->getType() == TT_ConflictAlternative) {
conditionalCompilationAlternative();
} else if (FormatTok->getType() == TT_ConflictEnd) {
conditionalCompilationEnd();
}
FormatTok = Tokens->getNextToken();
FormatTok->MustBreakBefore = true;
}
while (!Line->InPPDirective && FormatTok->Tok.is(tok::hash) &&
(FormatTok->HasUnescapedNewline || FormatTok->IsFirst)) {
distributeComments(Comments, FormatTok);
@ -3313,19 +3425,6 @@ void UnwrappedLineParser::readToken(int LevelDifference) {
flushComments(isOnNewLine(*FormatTok));
parsePPDirective();
}
while (FormatTok->getType() == TT_ConflictStart ||
FormatTok->getType() == TT_ConflictEnd ||
FormatTok->getType() == TT_ConflictAlternative) {
if (FormatTok->getType() == TT_ConflictStart) {
conditionalCompilationStart(/*Unreachable=*/false);
} else if (FormatTok->getType() == TT_ConflictAlternative) {
conditionalCompilationAlternative();
} else if (FormatTok->getType() == TT_ConflictEnd) {
conditionalCompilationEnd();
}
FormatTok = Tokens->getNextToken();
FormatTok->MustBreakBefore = true;
}
if (!PPStack.empty() && (PPStack.back().Kind == PP_Unreachable) &&
!Line->InPPDirective) {

View File

@ -110,6 +110,7 @@ class UnwrappedLineParser {
void parseCaseLabel();
void parseSwitch();
void parseNamespace();
void parseModuleImport();
void parseNew();
void parseAccessSpecifier();
bool parseEnum();

View File

@ -372,8 +372,6 @@ AlignTokenSequence(const FormatStyle &Style, unsigned Start, unsigned End,
if (ContinuedStringLiteral)
Changes[i].Spaces += Shift;
assert(Shift >= 0);
Changes[i].StartOfTokenColumn += Shift;
if (i + 1 != Changes.size())
Changes[i + 1].PreviousEndOfTokenColumn += Shift;
@ -915,7 +913,8 @@ void WhitespaceManager::alignTrailingComments(unsigned Start, unsigned End,
Changes[i].StartOfBlockComment->StartOfTokenColumn -
Changes[i].StartOfTokenColumn;
}
assert(Shift >= 0);
if (Shift < 0)
continue;
Changes[i].Spaces += Shift;
if (i + 1 != Changes.size())
Changes[i + 1].PreviousEndOfTokenColumn += Shift;
@ -1270,10 +1269,10 @@ WhitespaceManager::linkCells(CellDescriptions &&CellDesc) {
void WhitespaceManager::generateChanges() {
for (unsigned i = 0, e = Changes.size(); i != e; ++i) {
const Change &C = Changes[i];
if (i > 0) {
assert(Changes[i - 1].OriginalWhitespaceRange.getBegin() !=
C.OriginalWhitespaceRange.getBegin() &&
"Generating two replacements for the same location");
if (i > 0 && Changes[i - 1].OriginalWhitespaceRange.getBegin() ==
C.OriginalWhitespaceRange.getBegin()) {
// Do not generate two replacements for the same location.
continue;
}
if (C.CreateReplacement) {
std::string ReplacementText = C.PreviousLinePostfix;

View File

@ -505,6 +505,11 @@ static void InitializeStandardPredefinedMacros(const TargetInfo &TI,
if (LangOpts.HIP) {
Builder.defineMacro("__HIP__");
Builder.defineMacro("__HIPCC__");
Builder.defineMacro("__HIP_MEMORY_SCOPE_SINGLETHREAD", "1");
Builder.defineMacro("__HIP_MEMORY_SCOPE_WAVEFRONT", "2");
Builder.defineMacro("__HIP_MEMORY_SCOPE_WORKGROUP", "3");
Builder.defineMacro("__HIP_MEMORY_SCOPE_AGENT", "4");
Builder.defineMacro("__HIP_MEMORY_SCOPE_SYSTEM", "5");
if (LangOpts.CUDAIsDevice)
Builder.defineMacro("__HIP_DEVICE_COMPILE__");
}

View File

@ -412,10 +412,13 @@ llvm::ErrorOr<PrecompiledPreamble> PrecompiledPreamble::Build(
std::unique_ptr<PrecompilePreambleAction> Act;
Act.reset(new PrecompilePreambleAction(
StoreInMemory ? &Storage.asMemory().Data : nullptr, Callbacks));
Callbacks.BeforeExecute(*Clang);
if (!Act->BeginSourceFile(*Clang.get(), Clang->getFrontendOpts().Inputs[0]))
return BuildPreambleError::BeginSourceFileFailed;
// Performed after BeginSourceFile to ensure Clang->Preprocessor can be
// referenced in the callback.
Callbacks.BeforeExecute(*Clang);
std::unique_ptr<PPCallbacks> DelegatedPPCallbacks =
Callbacks.createPPCallbacks();
if (DelegatedPPCallbacks)

View File

@ -633,7 +633,7 @@ static bool IsHeaderFile(const std::string &Filename) {
return false;
}
std::string Ext = std::string(Filename.begin()+DotPos+1, Filename.end());
std::string Ext = Filename.substr(DotPos + 1);
// C header: .h
// C++ header: .hh or .H;
return Ext == "h" || Ext == "hh" || Ext == "H";

View File

@ -569,7 +569,7 @@ static bool IsHeaderFile(const std::string &Filename) {
return false;
}
std::string Ext = std::string(Filename.begin()+DotPos+1, Filename.end());
std::string Ext = Filename.substr(DotPos + 1);
// C header: .h
// C++ header: .hh or .H;
return Ext == "h" || Ext == "hh" || Ext == "H";

View File

@ -19,6 +19,10 @@
#define __CR6_EQ_REV 1
#define __CR6_LT 2
#define __CR6_LT_REV 3
#define __CR6_GT 4
#define __CR6_GT_REV 5
#define __CR6_SO 6
#define __CR6_SO_REV 7
/* Constants for vec_test_data_class */
#define __VEC_CLASS_FP_SUBNORMAL_N (1 << 0)
@ -8413,9 +8417,20 @@ static __inline__ vector float __ATTRS_o_ai vec_round(vector float __a) {
}
#ifdef __VSX__
#ifdef __XL_COMPAT_ALTIVEC__
static __inline__ vector double __ATTRS_o_ai vec_rint(vector double __a);
static __inline__ vector double __ATTRS_o_ai vec_round(vector double __a) {
double __fpscr = __builtin_readflm();
__builtin_setrnd(0);
vector double __rounded = vec_rint(__a);
__builtin_setflm(__fpscr);
return __rounded;
}
#else
static __inline__ vector double __ATTRS_o_ai vec_round(vector double __a) {
return __builtin_vsx_xvrdpi(__a);
}
#endif
/* vec_rint */
@ -19026,6 +19041,51 @@ vec_sra(vector signed __int128 __a, vector unsigned __int128 __b) {
#endif /* __SIZEOF_INT128__ */
#endif /* __POWER10_VECTOR__ */
#ifdef __POWER8_VECTOR__
#define __bcdadd(__a, __b, __ps) __builtin_ppc_bcdadd((__a), (__b), (__ps))
#define __bcdsub(__a, __b, __ps) __builtin_ppc_bcdsub((__a), (__b), (__ps))
static __inline__ long __bcdadd_ofl(vector unsigned char __a,
vector unsigned char __b) {
return __builtin_ppc_bcdadd_p(__CR6_SO, __a, __b);
}
static __inline__ long __bcdsub_ofl(vector unsigned char __a,
vector unsigned char __b) {
return __builtin_ppc_bcdsub_p(__CR6_SO, __a, __b);
}
static __inline__ long __bcd_invalid(vector unsigned char __a) {
return __builtin_ppc_bcdsub_p(__CR6_SO, __a, __a);
}
static __inline__ long __bcdcmpeq(vector unsigned char __a,
vector unsigned char __b) {
return __builtin_ppc_bcdsub_p(__CR6_EQ, __a, __b);
}
static __inline__ long __bcdcmplt(vector unsigned char __a,
vector unsigned char __b) {
return __builtin_ppc_bcdsub_p(__CR6_LT, __a, __b);
}
static __inline__ long __bcdcmpgt(vector unsigned char __a,
vector unsigned char __b) {
return __builtin_ppc_bcdsub_p(__CR6_GT, __a, __b);
}
static __inline__ long __bcdcmple(vector unsigned char __a,
vector unsigned char __b) {
return __builtin_ppc_bcdsub_p(__CR6_GT_REV, __a, __b);
}
static __inline__ long __bcdcmpge(vector unsigned char __a,
vector unsigned char __b) {
return __builtin_ppc_bcdsub_p(__CR6_LT_REV, __a, __b);
}
#endif // __POWER8_VECTOR__
#undef __ATTRS_o_ai
#endif /* __ALTIVEC_H */

View File

@ -35,7 +35,7 @@
#ifndef EMMINTRIN_H_
#define EMMINTRIN_H_
#if defined(__linux__) && defined(__ppc64__)
#if defined(__ppc64__) && (defined(__linux__) || defined(__FreeBSD__))
#include <altivec.h>
@ -2319,6 +2319,7 @@ _mm_castsi128_pd(__m128i __A)
#else
#include_next <emmintrin.h>
#endif /* defined(__linux__) && defined(__ppc64__) */
#endif /* defined(__ppc64__) && (defined(__linux__) || defined(__FreeBSD__)) \
*/
#endif /* EMMINTRIN_H_ */

View File

@ -10,7 +10,7 @@
#ifndef _MM_MALLOC_H_INCLUDED
#define _MM_MALLOC_H_INCLUDED
#if defined(__linux__) && defined(__ppc64__)
#if defined(__ppc64__) && (defined(__linux__) || defined(__FreeBSD__))
#include <stdlib.h>

View File

@ -35,7 +35,7 @@
#ifndef _MMINTRIN_H_INCLUDED
#define _MMINTRIN_H_INCLUDED
#if defined(__linux__) && defined(__ppc64__)
#if defined(__ppc64__) && (defined(__linux__) || defined(__FreeBSD__))
#include <altivec.h>
/* The Intel API is flexible enough that we must allow aliasing with other
@ -1445,6 +1445,7 @@ extern __inline __m64
#else
#include_next <mmintrin.h>
#endif /* defined(__linux__) && defined(__ppc64__) */
#endif /* defined(__ppc64__) && (defined(__linux__) || defined(__FreeBSD__)) \
*/
#endif /* _MMINTRIN_H_INCLUDED */

View File

@ -38,7 +38,7 @@
#ifndef PMMINTRIN_H_
#define PMMINTRIN_H_
#if defined(__linux__) && defined(__ppc64__)
#if defined(__ppc64__) && (defined(__linux__) || defined(__FreeBSD__))
/* We need definitions from the SSE2 and SSE header files*/
#include <emmintrin.h>
@ -145,6 +145,7 @@ _mm_lddqu_si128 (__m128i const *__P)
#else
#include_next <pmmintrin.h>
#endif /* defined(__linux__) && defined(__ppc64__) */
#endif /* defined(__ppc64__) && (defined(__linux__) || defined(__FreeBSD__)) \
*/
#endif /* PMMINTRIN_H_ */

View File

@ -29,7 +29,7 @@
#ifndef SMMINTRIN_H_
#define SMMINTRIN_H_
#if defined(__linux__) && defined(__ppc64__)
#if defined(__ppc64__) && (defined(__linux__) || defined(__FreeBSD__))
#include <altivec.h>
#include <tmmintrin.h>
@ -104,6 +104,7 @@ extern __inline __m128i
#else
#include_next <smmintrin.h>
#endif /* defined(__linux__) && defined(__ppc64__) */
#endif /* defined(__ppc64__) && (defined(__linux__) || defined(__FreeBSD__)) \
*/
#endif /* _SMMINTRIN_H_ */

View File

@ -25,7 +25,7 @@
#ifndef TMMINTRIN_H_
#define TMMINTRIN_H_
#if defined(__linux__) && defined(__ppc64__)
#if defined(__ppc64__) && (defined(__linux__) || defined(__FreeBSD__))
#include <altivec.h>
@ -490,6 +490,7 @@ _mm_mulhrs_pi16 (__m64 __A, __m64 __B)
#else
#include_next <tmmintrin.h>
#endif /* defined(__linux__) && defined(__ppc64__) */
#endif /* defined(__ppc64__) && (defined(__linux__) || defined(__FreeBSD__)) \
*/
#endif /* TMMINTRIN_H_ */

View File

@ -34,7 +34,7 @@
#ifndef _XMMINTRIN_H_INCLUDED
#define _XMMINTRIN_H_INCLUDED
#if defined(__linux__) && defined(__ppc64__)
#if defined(__ppc64__) && (defined(__linux__) || defined(__FreeBSD__))
/* Define four value permute mask */
#define _MM_SHUFFLE(w,x,y,z) (((w) << 6) | ((x) << 4) | ((y) << 2) | (z))
@ -1838,6 +1838,7 @@ do { \
#else
#include_next <xmmintrin.h>
#endif /* defined(__linux__) && defined(__ppc64__) */
#endif /* defined(__ppc64__) && (defined(__linux__) || defined(__FreeBSD__)) \
*/
#endif /* _XMMINTRIN_H_INCLUDED */

View File

@ -12,8 +12,12 @@
/* If we're hosted, fall back to the system's stdatomic.h. FreeBSD, for
* example, already has a Clang-compatible stdatomic.h header.
*
* Exclude the MSVC path as well as the MSVC header as of the 14.31.30818
* explicitly disallows `stdatomic.h` in the C mode via an `#error`. Fallback
* to the clang resource header until that is fully supported.
*/
#if __STDC_HOSTED__ && __has_include_next(<stdatomic.h>)
#if __STDC_HOSTED__ && __has_include_next(<stdatomic.h>) && !defined(_MSC_VER)
# include_next <stdatomic.h>
#else

View File

@ -6978,13 +6978,13 @@ void Parser::ParseParameterDeclarationClause(
//
// We care about case 1) where the declarator type should be known, and
// the identifier should be null.
if (!ParmDeclarator.isInvalidType() && !ParmDeclarator.hasName()) {
if (Tok.getIdentifierInfo() &&
Tok.getIdentifierInfo()->isKeyword(getLangOpts())) {
Diag(Tok, diag::err_keyword_as_parameter) << PP.getSpelling(Tok);
// Consume the keyword.
ConsumeToken();
}
if (!ParmDeclarator.isInvalidType() && !ParmDeclarator.hasName() &&
Tok.isNot(tok::raw_identifier) && !Tok.isAnnotation() &&
Tok.getIdentifierInfo() &&
Tok.getIdentifierInfo()->isKeyword(getLangOpts())) {
Diag(Tok, diag::err_keyword_as_parameter) << PP.getSpelling(Tok);
// Consume the keyword.
ConsumeToken();
}
// Inform the actions module about the parameter declarator, so it gets
// added to the current scope.

View File

@ -2108,6 +2108,9 @@ StmtResult Parser::ParseForStatement(SourceLocation *TrailingElseLoc) {
CoawaitLoc = SourceLocation();
}
if (CoawaitLoc.isValid() && getLangOpts().CPlusPlus20)
Diag(CoawaitLoc, diag::warn_deprecated_for_co_await);
// We need to perform most of the semantic analysis for a C++0x for-range
// statememt before parsing the body, in order to be able to deduce the type
// of an auto-typed loop variable.

View File

@ -464,7 +464,7 @@ static ControlFlowKind CheckFallThrough(AnalysisDeclContext &AC) {
// No more CFGElements in the block?
if (ri == re) {
const Stmt *Term = B.getTerminatorStmt();
if (Term && isa<CXXTryStmt>(Term)) {
if (Term && (isa<CXXTryStmt>(Term) || isa<ObjCAtTryStmt>(Term))) {
HasAbnormalEdge = true;
continue;
}

View File

@ -5297,6 +5297,7 @@ static bool isValidOrderingForOp(int64_t Ordering, AtomicExpr::AtomicOp Op) {
case AtomicExpr::AO__c11_atomic_load:
case AtomicExpr::AO__opencl_atomic_load:
case AtomicExpr::AO__hip_atomic_load:
case AtomicExpr::AO__atomic_load_n:
case AtomicExpr::AO__atomic_load:
return OrderingCABI != llvm::AtomicOrderingCABI::release &&
@ -5304,6 +5305,7 @@ static bool isValidOrderingForOp(int64_t Ordering, AtomicExpr::AtomicOp Op) {
case AtomicExpr::AO__c11_atomic_store:
case AtomicExpr::AO__opencl_atomic_store:
case AtomicExpr::AO__hip_atomic_store:
case AtomicExpr::AO__atomic_store:
case AtomicExpr::AO__atomic_store_n:
return OrderingCABI != llvm::AtomicOrderingCABI::consume &&
@ -5380,6 +5382,8 @@ ExprResult Sema::BuildAtomicExpr(SourceRange CallRange, SourceRange ExprRange,
"need to update code for modified C11 atomics");
bool IsOpenCL = Op >= AtomicExpr::AO__opencl_atomic_init &&
Op <= AtomicExpr::AO__opencl_atomic_fetch_max;
bool IsHIP = Op >= AtomicExpr::AO__hip_atomic_load &&
Op <= AtomicExpr::AO__hip_atomic_fetch_max;
bool IsC11 = (Op >= AtomicExpr::AO__c11_atomic_init &&
Op <= AtomicExpr::AO__c11_atomic_fetch_min) ||
IsOpenCL;
@ -5397,6 +5401,7 @@ ExprResult Sema::BuildAtomicExpr(SourceRange CallRange, SourceRange ExprRange,
case AtomicExpr::AO__c11_atomic_load:
case AtomicExpr::AO__opencl_atomic_load:
case AtomicExpr::AO__hip_atomic_load:
case AtomicExpr::AO__atomic_load_n:
Form = Load;
break;
@ -5407,11 +5412,14 @@ ExprResult Sema::BuildAtomicExpr(SourceRange CallRange, SourceRange ExprRange,
case AtomicExpr::AO__c11_atomic_store:
case AtomicExpr::AO__opencl_atomic_store:
case AtomicExpr::AO__hip_atomic_store:
case AtomicExpr::AO__atomic_store:
case AtomicExpr::AO__atomic_store_n:
Form = Copy;
break;
case AtomicExpr::AO__hip_atomic_fetch_add:
case AtomicExpr::AO__hip_atomic_fetch_min:
case AtomicExpr::AO__hip_atomic_fetch_max:
case AtomicExpr::AO__c11_atomic_fetch_add:
case AtomicExpr::AO__c11_atomic_fetch_sub:
case AtomicExpr::AO__opencl_atomic_fetch_add:
@ -5426,6 +5434,9 @@ ExprResult Sema::BuildAtomicExpr(SourceRange CallRange, SourceRange ExprRange,
case AtomicExpr::AO__c11_atomic_fetch_and:
case AtomicExpr::AO__c11_atomic_fetch_or:
case AtomicExpr::AO__c11_atomic_fetch_xor:
case AtomicExpr::AO__hip_atomic_fetch_and:
case AtomicExpr::AO__hip_atomic_fetch_or:
case AtomicExpr::AO__hip_atomic_fetch_xor:
case AtomicExpr::AO__c11_atomic_fetch_nand:
case AtomicExpr::AO__opencl_atomic_fetch_and:
case AtomicExpr::AO__opencl_atomic_fetch_or:
@ -5452,6 +5463,7 @@ ExprResult Sema::BuildAtomicExpr(SourceRange CallRange, SourceRange ExprRange,
break;
case AtomicExpr::AO__c11_atomic_exchange:
case AtomicExpr::AO__hip_atomic_exchange:
case AtomicExpr::AO__opencl_atomic_exchange:
case AtomicExpr::AO__atomic_exchange_n:
Form = Xchg;
@ -5463,8 +5475,10 @@ ExprResult Sema::BuildAtomicExpr(SourceRange CallRange, SourceRange ExprRange,
case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
case AtomicExpr::AO__hip_atomic_compare_exchange_strong:
case AtomicExpr::AO__opencl_atomic_compare_exchange_strong:
case AtomicExpr::AO__opencl_atomic_compare_exchange_weak:
case AtomicExpr::AO__hip_atomic_compare_exchange_weak:
Form = C11CmpXchg;
break;
@ -5475,7 +5489,7 @@ ExprResult Sema::BuildAtomicExpr(SourceRange CallRange, SourceRange ExprRange,
}
unsigned AdjustedNumArgs = NumArgs[Form];
if (IsOpenCL && Op != AtomicExpr::AO__opencl_atomic_init)
if ((IsOpenCL || IsHIP) && Op != AtomicExpr::AO__opencl_atomic_init)
++AdjustedNumArgs;
// Check we have the right number of arguments.
if (Args.size() < AdjustedNumArgs) {
@ -5532,8 +5546,8 @@ ExprResult Sema::BuildAtomicExpr(SourceRange CallRange, SourceRange ExprRange,
// For an arithmetic operation, the implied arithmetic must be well-formed.
if (Form == Arithmetic) {
// GCC does not enforce these rules for GNU atomics, but we do, because if
// we didn't it would be very confusing. FIXME: For whom? How so?
// GCC does not enforce these rules for GNU atomics, but we do to help catch
// trivial type errors.
auto IsAllowedValueType = [&](QualType ValType) {
if (ValType->isIntegerType())
return true;
@ -5574,8 +5588,9 @@ ExprResult Sema::BuildAtomicExpr(SourceRange CallRange, SourceRange ExprRange,
if (!IsC11 && !AtomTy.isTriviallyCopyableType(Context) &&
!AtomTy->isScalarType()) {
// For GNU atomics, require a trivially-copyable type. This is not part of
// the GNU atomics specification, but we enforce it, because if we didn't it
// would be very confusing. FIXME: For whom? How so?
// the GNU atomics specification but we enforce it for consistency with
// other atomics which generally all require a trivially-copyable type. This
// is because atomics just copy bits.
Diag(ExprRange.getBegin(), diag::err_atomic_op_needs_trivial_copy)
<< Ptr->getType() << Ptr->getSourceRange();
return ExprError();
@ -5614,7 +5629,7 @@ ExprResult Sema::BuildAtomicExpr(SourceRange CallRange, SourceRange ExprRange,
// arguments are actually passed as pointers.
QualType ByValType = ValType; // 'CP'
bool IsPassedByAddress = false;
if (!IsC11 && !IsN) {
if (!IsC11 && !IsHIP && !IsN) {
ByValType = Ptr->getType();
IsPassedByAddress = true;
}
@ -5793,11 +5808,14 @@ ExprResult Sema::BuildAtomicExpr(SourceRange CallRange, SourceRange ExprRange,
if ((Op == AtomicExpr::AO__c11_atomic_load ||
Op == AtomicExpr::AO__c11_atomic_store ||
Op == AtomicExpr::AO__opencl_atomic_load ||
Op == AtomicExpr::AO__opencl_atomic_store ) &&
Op == AtomicExpr::AO__hip_atomic_load ||
Op == AtomicExpr::AO__opencl_atomic_store ||
Op == AtomicExpr::AO__hip_atomic_store) &&
Context.AtomicUsesUnsupportedLibcall(AE))
Diag(AE->getBeginLoc(), diag::err_atomic_load_store_uses_lib)
<< ((Op == AtomicExpr::AO__c11_atomic_load ||
Op == AtomicExpr::AO__opencl_atomic_load)
Op == AtomicExpr::AO__opencl_atomic_load ||
Op == AtomicExpr::AO__hip_atomic_load)
? 0
: 1);

View File

@ -10268,13 +10268,9 @@ static bool checkNonMultiVersionCompatAttributes(Sema &S,
const FunctionDecl *FD,
const FunctionDecl *CausedFD,
MultiVersionKind MVType) {
bool IsCPUSpecificCPUDispatchMVType =
MVType == MultiVersionKind::CPUDispatch ||
MVType == MultiVersionKind::CPUSpecific;
const auto Diagnose = [FD, CausedFD, IsCPUSpecificCPUDispatchMVType](
Sema &S, const Attr *A) {
const auto Diagnose = [FD, CausedFD, MVType](Sema &S, const Attr *A) {
S.Diag(FD->getLocation(), diag::err_multiversion_disallowed_other_attr)
<< IsCPUSpecificCPUDispatchMVType << A;
<< static_cast<unsigned>(MVType) << A;
if (CausedFD)
S.Diag(CausedFD->getLocation(), diag::note_multiversioning_caused_here);
return true;
@ -10292,6 +10288,10 @@ static bool checkNonMultiVersionCompatAttributes(Sema &S,
if (MVType != MultiVersionKind::Target)
return Diagnose(S, A);
break;
case attr::TargetClones:
if (MVType != MultiVersionKind::TargetClones)
return Diagnose(S, A);
break;
default:
if (!AttrCompatibleWithMultiVersion(A->getKind(), MVType))
return Diagnose(S, A);
@ -10318,6 +10318,7 @@ bool Sema::areMultiversionVariantFunctionsCompatible(
DefaultedFuncs = 6,
ConstexprFuncs = 7,
ConstevalFuncs = 8,
Lambda = 9,
};
enum Different {
CallingConv = 0,
@ -10445,7 +10446,7 @@ static bool CheckMultiVersionAdditionalRules(Sema &S, const FunctionDecl *OldFD,
S.PDiag(diag::note_multiversioning_caused_here)),
PartialDiagnosticAt(NewFD->getLocation(),
S.PDiag(diag::err_multiversion_doesnt_support)
<< IsCPUSpecificCPUDispatchMVType),
<< static_cast<unsigned>(MVType)),
PartialDiagnosticAt(NewFD->getLocation(),
S.PDiag(diag::err_multiversion_diff)),
/*TemplatesSupported=*/false,
@ -10574,21 +10575,30 @@ static bool CheckTargetCausesMultiVersioning(
return false;
}
static bool MultiVersionTypesCompatible(MultiVersionKind Old,
MultiVersionKind New) {
if (Old == New || Old == MultiVersionKind::None ||
New == MultiVersionKind::None)
return true;
return (Old == MultiVersionKind::CPUDispatch &&
New == MultiVersionKind::CPUSpecific) ||
(Old == MultiVersionKind::CPUSpecific &&
New == MultiVersionKind::CPUDispatch);
}
/// Check the validity of a new function declaration being added to an existing
/// multiversioned declaration collection.
static bool CheckMultiVersionAdditionalDecl(
Sema &S, FunctionDecl *OldFD, FunctionDecl *NewFD,
MultiVersionKind NewMVType, const TargetAttr *NewTA,
const CPUDispatchAttr *NewCPUDisp, const CPUSpecificAttr *NewCPUSpec,
bool &Redeclaration, NamedDecl *&OldDecl, bool &MergeTypeWithPrevious,
LookupResult &Previous) {
const TargetClonesAttr *NewClones, bool &Redeclaration, NamedDecl *&OldDecl,
bool &MergeTypeWithPrevious, LookupResult &Previous) {
MultiVersionKind OldMVType = OldFD->getMultiVersionKind();
// Disallow mixing of multiversioning types.
if ((OldMVType == MultiVersionKind::Target &&
NewMVType != MultiVersionKind::Target) ||
(NewMVType == MultiVersionKind::Target &&
OldMVType != MultiVersionKind::Target)) {
if (!MultiVersionTypesCompatible(OldMVType, NewMVType)) {
S.Diag(NewFD->getLocation(), diag::err_multiversion_types_mixed);
S.Diag(OldFD->getLocation(), diag::note_previous_declaration);
NewFD->setInvalidDecl();
@ -10613,7 +10623,12 @@ static bool CheckMultiVersionAdditionalDecl(
if (S.IsOverload(NewFD, CurFD, UseMemberUsingDeclRules))
continue;
if (NewMVType == MultiVersionKind::Target) {
switch (NewMVType) {
case MultiVersionKind::None:
assert(OldMVType == MultiVersionKind::TargetClones &&
"Only target_clones can be omitted in subsequent declarations");
break;
case MultiVersionKind::Target: {
const auto *CurTA = CurFD->getAttr<TargetAttr>();
if (CurTA->getFeaturesStr() == NewTA->getFeaturesStr()) {
NewFD->setIsMultiVersion();
@ -10629,7 +10644,30 @@ static bool CheckMultiVersionAdditionalDecl(
NewFD->setInvalidDecl();
return true;
}
} else {
break;
}
case MultiVersionKind::TargetClones: {
const auto *CurClones = CurFD->getAttr<TargetClonesAttr>();
Redeclaration = true;
OldDecl = CurFD;
MergeTypeWithPrevious = true;
NewFD->setIsMultiVersion();
if (CurClones && NewClones &&
(CurClones->featuresStrs_size() != NewClones->featuresStrs_size() ||
!std::equal(CurClones->featuresStrs_begin(),
CurClones->featuresStrs_end(),
NewClones->featuresStrs_begin()))) {
S.Diag(NewFD->getLocation(), diag::err_target_clone_doesnt_match);
S.Diag(CurFD->getLocation(), diag::note_previous_declaration);
NewFD->setInvalidDecl();
return true;
}
return false;
}
case MultiVersionKind::CPUSpecific:
case MultiVersionKind::CPUDispatch: {
const auto *CurCPUSpec = CurFD->getAttr<CPUSpecificAttr>();
const auto *CurCPUDisp = CurFD->getAttr<CPUDispatchAttr>();
// Handle CPUDispatch/CPUSpecific versions.
@ -10684,8 +10722,8 @@ static bool CheckMultiVersionAdditionalDecl(
}
}
}
// If the two decls aren't the same MVType, there is no possible error
// condition.
break;
}
}
}
@ -10721,7 +10759,6 @@ static bool CheckMultiVersionAdditionalDecl(
return false;
}
/// Check the validity of a mulitversion function declaration.
/// Also sets the multiversion'ness' of the function itself.
///
@ -10735,23 +10772,14 @@ static bool CheckMultiVersionFunction(Sema &S, FunctionDecl *NewFD,
const auto *NewTA = NewFD->getAttr<TargetAttr>();
const auto *NewCPUDisp = NewFD->getAttr<CPUDispatchAttr>();
const auto *NewCPUSpec = NewFD->getAttr<CPUSpecificAttr>();
// Mixing Multiversioning types is prohibited.
if ((NewTA && NewCPUDisp) || (NewTA && NewCPUSpec) ||
(NewCPUDisp && NewCPUSpec)) {
S.Diag(NewFD->getLocation(), diag::err_multiversion_types_mixed);
NewFD->setInvalidDecl();
return true;
}
MultiVersionKind MVType = NewFD->getMultiVersionKind();
const auto *NewClones = NewFD->getAttr<TargetClonesAttr>();
MultiVersionKind MVType = NewFD->getMultiVersionKind();
// Main isn't allowed to become a multiversion function, however it IS
// permitted to have 'main' be marked with the 'target' optimization hint.
if (NewFD->isMain()) {
if ((MVType == MultiVersionKind::Target && NewTA->isDefaultVersion()) ||
MVType == MultiVersionKind::CPUDispatch ||
MVType == MultiVersionKind::CPUSpecific) {
if (MVType != MultiVersionKind::None &&
!(MVType == MultiVersionKind::Target && !NewTA->isDefaultVersion())) {
S.Diag(NewFD->getLocation(), diag::err_multiversion_not_allowed_on_main);
NewFD->setInvalidDecl();
return true;
@ -10774,13 +10802,35 @@ static bool CheckMultiVersionFunction(Sema &S, FunctionDecl *NewFD,
if (!OldFD->isMultiVersion() && MVType == MultiVersionKind::None)
return false;
if (OldFD->isMultiVersion() && MVType == MultiVersionKind::None) {
// Multiversioned redeclarations aren't allowed to omit the attribute, except
// for target_clones.
if (OldFD->isMultiVersion() && MVType == MultiVersionKind::None &&
OldFD->getMultiVersionKind() != MultiVersionKind::TargetClones) {
S.Diag(NewFD->getLocation(), diag::err_multiversion_required_in_redecl)
<< (OldFD->getMultiVersionKind() != MultiVersionKind::Target);
NewFD->setInvalidDecl();
return true;
}
if (!OldFD->isMultiVersion()) {
switch (MVType) {
case MultiVersionKind::Target:
return CheckTargetCausesMultiVersioning(S, OldFD, NewFD, NewTA,
Redeclaration, OldDecl,
MergeTypeWithPrevious, Previous);
case MultiVersionKind::TargetClones:
if (OldFD->isUsed(false)) {
NewFD->setInvalidDecl();
return S.Diag(NewFD->getLocation(), diag::err_multiversion_after_used);
}
OldFD->setIsMultiVersion();
break;
case MultiVersionKind::CPUDispatch:
case MultiVersionKind::CPUSpecific:
case MultiVersionKind::None:
break;
}
}
// Handle the target potentially causes multiversioning case.
if (!OldFD->isMultiVersion() && MVType == MultiVersionKind::Target)
return CheckTargetCausesMultiVersioning(S, OldFD, NewFD, NewTA,
@ -10791,8 +10841,8 @@ static bool CheckMultiVersionFunction(Sema &S, FunctionDecl *NewFD,
// appropriate attribute in the current function decl. Resolve that these are
// still compatible with previous declarations.
return CheckMultiVersionAdditionalDecl(
S, OldFD, NewFD, MVType, NewTA, NewCPUDisp, NewCPUSpec, Redeclaration,
OldDecl, MergeTypeWithPrevious, Previous);
S, OldFD, NewFD, MVType, NewTA, NewCPUDisp, NewCPUSpec, NewClones,
Redeclaration, OldDecl, MergeTypeWithPrevious, Previous);
}
/// Perform semantic checking of a new function declaration.

View File

@ -1965,6 +1965,28 @@ static void handleRestrictAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
}
static void handleCPUSpecificAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
// Ensure we don't combine these with themselves, since that causes some
// confusing behavior.
if (AL.getParsedKind() == ParsedAttr::AT_CPUDispatch) {
if (checkAttrMutualExclusion<CPUSpecificAttr>(S, D, AL))
return;
if (const auto *Other = D->getAttr<CPUDispatchAttr>()) {
S.Diag(AL.getLoc(), diag::err_disallowed_duplicate_attribute) << AL;
S.Diag(Other->getLocation(), diag::note_conflicting_attribute);
return;
}
} else if (AL.getParsedKind() == ParsedAttr::AT_CPUSpecific) {
if (checkAttrMutualExclusion<CPUDispatchAttr>(S, D, AL))
return;
if (const auto *Other = D->getAttr<CPUSpecificAttr>()) {
S.Diag(AL.getLoc(), diag::err_disallowed_duplicate_attribute) << AL;
S.Diag(Other->getLocation(), diag::note_conflicting_attribute);
return;
}
}
FunctionDecl *FD = cast<FunctionDecl>(D);
if (const auto *MD = dyn_cast<CXXMethodDecl>(D)) {
@ -3211,54 +3233,57 @@ static void handleCodeSegAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
bool Sema::checkTargetAttr(SourceLocation LiteralLoc, StringRef AttrStr) {
enum FirstParam { Unsupported, Duplicate, Unknown };
enum SecondParam { None, Architecture, Tune };
enum ThirdParam { Target, TargetClones };
if (AttrStr.contains("fpmath="))
return Diag(LiteralLoc, diag::warn_unsupported_target_attribute)
<< Unsupported << None << "fpmath=";
<< Unsupported << None << "fpmath=" << Target;
// Diagnose use of tune if target doesn't support it.
if (!Context.getTargetInfo().supportsTargetAttributeTune() &&
AttrStr.contains("tune="))
return Diag(LiteralLoc, diag::warn_unsupported_target_attribute)
<< Unsupported << None << "tune=";
<< Unsupported << None << "tune=" << Target;
ParsedTargetAttr ParsedAttrs = TargetAttr::parse(AttrStr);
if (!ParsedAttrs.Architecture.empty() &&
!Context.getTargetInfo().isValidCPUName(ParsedAttrs.Architecture))
return Diag(LiteralLoc, diag::warn_unsupported_target_attribute)
<< Unknown << Architecture << ParsedAttrs.Architecture;
<< Unknown << Architecture << ParsedAttrs.Architecture << Target;
if (!ParsedAttrs.Tune.empty() &&
!Context.getTargetInfo().isValidCPUName(ParsedAttrs.Tune))
return Diag(LiteralLoc, diag::warn_unsupported_target_attribute)
<< Unknown << Tune << ParsedAttrs.Tune;
<< Unknown << Tune << ParsedAttrs.Tune << Target;
if (ParsedAttrs.DuplicateArchitecture)
return Diag(LiteralLoc, diag::warn_unsupported_target_attribute)
<< Duplicate << None << "arch=";
<< Duplicate << None << "arch=" << Target;
if (ParsedAttrs.DuplicateTune)
return Diag(LiteralLoc, diag::warn_unsupported_target_attribute)
<< Duplicate << None << "tune=";
<< Duplicate << None << "tune=" << Target;
for (const auto &Feature : ParsedAttrs.Features) {
auto CurFeature = StringRef(Feature).drop_front(); // remove + or -.
if (!Context.getTargetInfo().isValidFeatureName(CurFeature))
return Diag(LiteralLoc, diag::warn_unsupported_target_attribute)
<< Unsupported << None << CurFeature;
<< Unsupported << None << CurFeature << Target;
}
TargetInfo::BranchProtectionInfo BPI;
StringRef Error;
if (!ParsedAttrs.BranchProtection.empty() &&
!Context.getTargetInfo().validateBranchProtection(
ParsedAttrs.BranchProtection, BPI, Error)) {
if (Error.empty())
StringRef DiagMsg;
if (ParsedAttrs.BranchProtection.empty())
return false;
if (!Context.getTargetInfo().validateBranchProtection(
ParsedAttrs.BranchProtection, BPI, DiagMsg)) {
if (DiagMsg.empty())
return Diag(LiteralLoc, diag::warn_unsupported_target_attribute)
<< Unsupported << None << "branch-protection";
else
return Diag(LiteralLoc, diag::err_invalid_branch_protection_spec)
<< Error;
<< Unsupported << None << "branch-protection" << Target;
return Diag(LiteralLoc, diag::err_invalid_branch_protection_spec)
<< DiagMsg;
}
if (!DiagMsg.empty())
Diag(LiteralLoc, diag::warn_unsupported_branch_protection_spec) << DiagMsg;
return false;
}
@ -3274,6 +3299,107 @@ static void handleTargetAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
D->addAttr(NewAttr);
}
bool Sema::checkTargetClonesAttrString(SourceLocation LiteralLoc, StringRef Str,
const StringLiteral *Literal,
bool &HasDefault, bool &HasCommas,
SmallVectorImpl<StringRef> &Strings) {
enum FirstParam { Unsupported, Duplicate, Unknown };
enum SecondParam { None, Architecture, Tune };
enum ThirdParam { Target, TargetClones };
HasCommas = HasCommas || Str.contains(',');
// Warn on empty at the beginning of a string.
if (Str.size() == 0)
return Diag(LiteralLoc, diag::warn_unsupported_target_attribute)
<< Unsupported << None << "" << TargetClones;
std::pair<StringRef, StringRef> Parts = {{}, Str};
while (!Parts.second.empty()) {
Parts = Parts.second.split(',');
StringRef Cur = Parts.first.trim();
SourceLocation CurLoc = Literal->getLocationOfByte(
Cur.data() - Literal->getString().data(), getSourceManager(),
getLangOpts(), Context.getTargetInfo());
bool DefaultIsDupe = false;
if (Cur.empty())
return Diag(CurLoc, diag::warn_unsupported_target_attribute)
<< Unsupported << None << "" << TargetClones;
if (Cur.startswith("arch=")) {
if (!Context.getTargetInfo().isValidCPUName(
Cur.drop_front(sizeof("arch=") - 1)))
return Diag(CurLoc, diag::warn_unsupported_target_attribute)
<< Unsupported << Architecture
<< Cur.drop_front(sizeof("arch=") - 1) << TargetClones;
} else if (Cur == "default") {
DefaultIsDupe = HasDefault;
HasDefault = true;
} else if (!Context.getTargetInfo().isValidFeatureName(Cur))
return Diag(CurLoc, diag::warn_unsupported_target_attribute)
<< Unsupported << None << Cur << TargetClones;
if (llvm::find(Strings, Cur) != Strings.end() || DefaultIsDupe)
Diag(CurLoc, diag::warn_target_clone_duplicate_options);
// Note: Add even if there are duplicates, since it changes name mangling.
Strings.push_back(Cur);
}
if (Str.rtrim().endswith(","))
return Diag(LiteralLoc, diag::warn_unsupported_target_attribute)
<< Unsupported << None << "" << TargetClones;
return false;
}
static void handleTargetClonesAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
// Ensure we don't combine these with themselves, since that causes some
// confusing behavior.
if (const auto *Other = D->getAttr<TargetClonesAttr>()) {
S.Diag(AL.getLoc(), diag::err_disallowed_duplicate_attribute) << AL;
S.Diag(Other->getLocation(), diag::note_conflicting_attribute);
return;
}
if (checkAttrMutualExclusion<TargetClonesAttr>(S, D, AL))
return;
SmallVector<StringRef, 2> Strings;
bool HasCommas = false, HasDefault = false;
for (unsigned I = 0, E = AL.getNumArgs(); I != E; ++I) {
StringRef CurStr;
SourceLocation LiteralLoc;
if (!S.checkStringLiteralArgumentAttr(AL, I, CurStr, &LiteralLoc) ||
S.checkTargetClonesAttrString(
LiteralLoc, CurStr,
cast<StringLiteral>(AL.getArgAsExpr(I)->IgnoreParenCasts()),
HasDefault, HasCommas, Strings))
return;
}
if (HasCommas && AL.getNumArgs() > 1)
S.Diag(AL.getLoc(), diag::warn_target_clone_mixed_values);
if (!HasDefault) {
S.Diag(AL.getLoc(), diag::err_target_clone_must_have_default);
return;
}
// FIXME: We could probably figure out how to get this to work for lambdas
// someday.
if (const auto *MD = dyn_cast<CXXMethodDecl>(D)) {
if (MD->getParent()->isLambda()) {
S.Diag(D->getLocation(), diag::err_multiversion_doesnt_support)
<< static_cast<unsigned>(MultiVersionKind::TargetClones)
<< /*Lambda*/ 9;
return;
}
}
cast<FunctionDecl>(D)->setIsMultiVersion();
TargetClonesAttr *NewAttr = ::new (S.Context)
TargetClonesAttr(S.Context, AL, Strings.data(), Strings.size());
D->addAttr(NewAttr);
}
static void handleMinVectorWidthAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
Expr *E = AL.getArgAsExpr(0);
uint32_t VecWidth;
@ -8217,6 +8343,9 @@ static void ProcessDeclAttribute(Sema &S, Scope *scope, Decl *D,
case ParsedAttr::AT_Target:
handleTargetAttr(S, D, AL);
break;
case ParsedAttr::AT_TargetClones:
handleTargetClonesAttr(S, D, AL);
break;
case ParsedAttr::AT_MinVectorWidth:
handleMinVectorWidthAttr(S, D, AL);
break;

View File

@ -16566,6 +16566,17 @@ Sema::PushExpressionEvaluationContext(
ExpressionEvaluationContextRecord::ExpressionKind ExprContext) {
ExprEvalContexts.emplace_back(NewContext, ExprCleanupObjects.size(), Cleanup,
LambdaContextDecl, ExprContext);
// Discarded statements and immediate contexts nested in other
// discarded statements or immediate context are themselves
// a discarded statement or an immediate context, respectively.
ExprEvalContexts.back().InDiscardedStatement =
ExprEvalContexts[ExprEvalContexts.size() - 2]
.isDiscardedStatementContext();
ExprEvalContexts.back().InImmediateFunctionContext =
ExprEvalContexts[ExprEvalContexts.size() - 2]
.isImmediateFunctionContext();
Cleanup.reset();
if (!MaybeODRUseExprs.empty())
std::swap(MaybeODRUseExprs, ExprEvalContexts.back().SavedMaybeODRUseExprs);
@ -18965,6 +18976,10 @@ bool Sema::DiagIfReachable(SourceLocation Loc, ArrayRef<const Stmt *> Stmts,
/// during overload resolution or within sizeof/alignof/typeof/typeid.
bool Sema::DiagRuntimeBehavior(SourceLocation Loc, ArrayRef<const Stmt*> Stmts,
const PartialDiagnostic &PD) {
if (ExprEvalContexts.back().isDiscardedStatementContext())
return false;
switch (ExprEvalContexts.back().Context) {
case ExpressionEvaluationContext::Unevaluated:
case ExpressionEvaluationContext::UnevaluatedList:

View File

@ -1508,8 +1508,9 @@ Sema::BuildCXXTypeConstructExpr(TypeSourceInfo *TInfo,
}
// Only construct objects with object types.
// There doesn't seem to be an explicit rule for this but functions are
// not objects, so they cannot take initializers.
// The standard doesn't explicitly forbid function types here, but that's an
// obvious oversight, as there's no way to dynamically construct a function
// in general.
if (Ty->isFunctionType())
return ExprError(Diag(TyBeginLoc, diag::err_init_for_function_type)
<< Ty << FullRange);

View File

@ -3563,8 +3563,7 @@ StmtResult Sema::ActOnCapScopeReturnStmt(SourceLocation ReturnLoc,
bool HasDeducedReturnType =
CurLambda && hasDeducedReturnType(CurLambda->CallOperator);
if (ExprEvalContexts.back().Context ==
ExpressionEvaluationContext::DiscardedStatement &&
if (ExprEvalContexts.back().isDiscardedStatementContext() &&
(HasDeducedReturnType || CurCap->HasImplicitReturnType)) {
if (RetValExp) {
ExprResult ER =
@ -3880,8 +3879,7 @@ Sema::ActOnReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp,
if (RetVal.isInvalid())
return StmtError();
StmtResult R = BuildReturnStmt(ReturnLoc, RetVal.get());
if (R.isInvalid() || ExprEvalContexts.back().Context ==
ExpressionEvaluationContext::DiscardedStatement)
if (R.isInvalid() || ExprEvalContexts.back().isDiscardedStatementContext())
return R;
if (VarDecl *VD =
@ -3966,8 +3964,7 @@ StmtResult Sema::BuildReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp) {
// C++1z: discarded return statements are not considered when deducing a
// return type.
if (ExprEvalContexts.back().Context ==
ExpressionEvaluationContext::DiscardedStatement &&
if (ExprEvalContexts.back().isDiscardedStatementContext() &&
FnRetType->getContainedAutoType()) {
if (RetValExp) {
ExprResult ER =

View File

@ -686,8 +686,8 @@ SwitchNodeBuilder::generateDefaultCaseNode(ProgramStateRef St,
assert(Src->succ_rbegin() != Src->succ_rend());
CFGBlock *DefaultBlock = *Src->succ_rbegin();
// Sanity check for default blocks that are unreachable and not caught
// by earlier stages.
// Basic correctness check for default blocks that are unreachable and not
// caught by earlier stages.
if (!DefaultBlock)
return nullptr;

View File

@ -2191,6 +2191,42 @@ LLVM_NODISCARD ProgramStateRef reAssume(ProgramStateRef State,
Constraint->getMaxValue(), true);
}
// Simplify the given symbol with the help of the SValBuilder. In
// SValBuilder::symplifySval, we traverse the symbol tree and query the
// constraint values for the sub-trees and if a value is a constant we do the
// constant folding. Compound symbols might collapse to simpler symbol tree
// that is still possible to further simplify. Thus, we do the simplification on
// a new symbol tree until we reach the simplest form, i.e. the fixpoint.
//
// Consider the following symbol `(b * b) * b * b` which has this tree:
// *
// / \
// * b
// / \
// / b
// (b * b)
// Now, if the `b * b == 1` new constraint is added then during the first
// iteration we have the following transformations:
// * *
// / \ / \
// * b --> b b
// / \
// / b
// 1
// We need another iteration to reach the final result `1`.
LLVM_NODISCARD
static SVal simplifyUntilFixpoint(SValBuilder &SVB, ProgramStateRef State,
const SymbolRef Sym) {
SVal Val = SVB.makeSymbolVal(Sym);
SVal SimplifiedVal = SVB.simplifySVal(State, Val);
// Do the simplification until we can.
while (SimplifiedVal != Val) {
Val = SimplifiedVal;
SimplifiedVal = SVB.simplifySVal(State, Val);
}
return SimplifiedVal;
}
// Iterate over all symbols and try to simplify them. Once a symbol is
// simplified then we check if we can merge the simplified symbol's equivalence
// class to this class. This way, we simplify not just the symbols but the
@ -2202,7 +2238,8 @@ EquivalenceClass::simplify(SValBuilder &SVB, RangeSet::Factory &F,
SymbolSet ClassMembers = Class.getClassMembers(State);
for (const SymbolRef &MemberSym : ClassMembers) {
const SVal SimplifiedMemberVal = simplifyToSVal(State, MemberSym);
const SVal SimplifiedMemberVal =
simplifyUntilFixpoint(SVB, State, MemberSym);
const SymbolRef SimplifiedMemberSym = SimplifiedMemberVal.getAsSymbol();
// The symbol is collapsed to a constant, check if the current State is

View File

@ -372,6 +372,15 @@ SVal SimpleSValBuilder::evalBinOpNN(ProgramStateRef state,
NonLoc InputLHS = lhs;
NonLoc InputRHS = rhs;
// Constraints may have changed since the creation of a bound SVal. Check if
// the values can be simplified based on those new constraints.
SVal simplifiedLhs = simplifySVal(state, lhs);
SVal simplifiedRhs = simplifySVal(state, rhs);
if (auto simplifiedLhsAsNonLoc = simplifiedLhs.getAs<NonLoc>())
lhs = *simplifiedLhsAsNonLoc;
if (auto simplifiedRhsAsNonLoc = simplifiedRhs.getAs<NonLoc>())
rhs = *simplifiedRhsAsNonLoc;
// Handle trivial case where left-side and right-side are the same.
if (lhs == rhs)
switch (op) {
@ -619,16 +628,6 @@ SVal SimpleSValBuilder::evalBinOpNN(ProgramStateRef state,
}
}
// Does the symbolic expression simplify to a constant?
// If so, "fold" the constant by setting 'lhs' to a ConcreteInt
// and try again.
SVal simplifiedLhs = simplifySVal(state, lhs);
if (simplifiedLhs != lhs)
if (auto simplifiedLhsAsNonLoc = simplifiedLhs.getAs<NonLoc>()) {
lhs = *simplifiedLhsAsNonLoc;
continue;
}
// Is the RHS a constant?
if (const llvm::APSInt *RHSValue = getKnownValue(state, rhs))
return MakeSymIntVal(Sym, op, *RHSValue, resultTy);
@ -1103,7 +1102,6 @@ const llvm::APSInt *SimpleSValBuilder::getKnownValue(ProgramStateRef state,
if (SymbolRef Sym = V.getAsSymbol())
return state->getConstraintManager().getSymVal(state, Sym);
// FIXME: Add support for SymExprs.
return nullptr;
}
@ -1135,6 +1133,24 @@ SVal SimpleSValBuilder::simplifySVal(ProgramStateRef State, SVal V) {
return cache(Sym, SVB.makeSymbolVal(Sym));
}
// Return the known const value for the Sym if available, or return Undef
// otherwise.
SVal getConst(SymbolRef Sym) {
const llvm::APSInt *Const =
State->getConstraintManager().getSymVal(State, Sym);
if (Const)
return Loc::isLocType(Sym->getType()) ? (SVal)SVB.makeIntLocVal(*Const)
: (SVal)SVB.makeIntVal(*Const);
return UndefinedVal();
}
SVal getConstOrVisit(SymbolRef Sym) {
const SVal Ret = getConst(Sym);
if (Ret.isUndef())
return Visit(Sym);
return Ret;
}
public:
Simplifier(ProgramStateRef State)
: State(State), SVB(State->getStateManager().getSValBuilder()) {}
@ -1148,15 +1164,14 @@ SVal SimpleSValBuilder::simplifySVal(ProgramStateRef State, SVal V) {
return SVB.makeSymbolVal(S);
}
// TODO: Support SymbolCast. Support IntSymExpr when/if we actually
// start producing them.
// TODO: Support SymbolCast.
SVal VisitSymIntExpr(const SymIntExpr *S) {
auto I = Cached.find(S);
if (I != Cached.end())
return I->second;
SVal LHS = Visit(S->getLHS());
SVal LHS = getConstOrVisit(S->getLHS());
if (isUnchanged(S->getLHS(), LHS))
return skip(S);
@ -1183,6 +1198,20 @@ SVal SimpleSValBuilder::simplifySVal(ProgramStateRef State, SVal V) {
S, SVB.evalBinOp(State, S->getOpcode(), LHS, RHS, S->getType()));
}
SVal VisitIntSymExpr(const IntSymExpr *S) {
auto I = Cached.find(S);
if (I != Cached.end())
return I->second;
SVal RHS = getConstOrVisit(S->getRHS());
if (isUnchanged(S->getRHS(), RHS))
return skip(S);
SVal LHS = SVB.makeIntVal(S->getLHS());
return cache(
S, SVB.evalBinOp(State, S->getOpcode(), LHS, RHS, S->getType()));
}
SVal VisitSymSymExpr(const SymSymExpr *S) {
auto I = Cached.find(S);
if (I != Cached.end())
@ -1196,8 +1225,9 @@ SVal SimpleSValBuilder::simplifySVal(ProgramStateRef State, SVal V) {
Loc::isLocType(S->getRHS()->getType()))
return skip(S);
SVal LHS = Visit(S->getLHS());
SVal RHS = Visit(S->getRHS());
SVal LHS = getConstOrVisit(S->getLHS());
SVal RHS = getConstOrVisit(S->getRHS());
if (isUnchanged(S->getLHS(), LHS) && isUnchanged(S->getRHS(), RHS))
return skip(S);

View File

@ -591,16 +591,24 @@ AnalysisConsumer::getModeForDecl(Decl *D, AnalysisMode Mode) {
// - Main source file: run both path-sensitive and non-path-sensitive checks.
// - Header files: run non-path-sensitive checks only.
// - System headers: don't run any checks.
SourceManager &SM = Ctx->getSourceManager();
const Stmt *Body = D->getBody();
SourceLocation SL = Body ? Body->getBeginLoc() : D->getLocation();
SL = SM.getExpansionLoc(SL);
if (Opts->AnalyzeAll)
return Mode;
if (!Opts->AnalyzeAll && !Mgr->isInCodeFile(SL)) {
if (SL.isInvalid() || SM.isInSystemHeader(SL))
return AM_None;
const SourceManager &SM = Ctx->getSourceManager();
const SourceLocation Loc = [&SM](Decl *D) -> SourceLocation {
const Stmt *Body = D->getBody();
SourceLocation SL = Body ? Body->getBeginLoc() : D->getLocation();
return SM.getExpansionLoc(SL);
}(D);
// Ignore system headers.
if (Loc.isInvalid() || SM.isInSystemHeader(Loc))
return AM_None;
// Disable path sensitive analysis in user-headers.
if (!Mgr->isInCodeFile(Loc))
return Mode & ~AM_Path;
}
return Mode;
}

View File

@ -129,7 +129,7 @@ DependencyScanningFilesystemSharedCache::get(StringRef Key, bool Minimized) {
///
/// This is kinda hacky, it would be better if we knew what kind of file Clang
/// was expecting instead.
static bool shouldMinimize(StringRef Filename) {
static bool shouldMinimizeBasedOnExtension(StringRef Filename) {
StringRef Ext = llvm::sys::path::extension(Filename);
if (Ext.empty())
return true; // C++ standard library
@ -147,26 +147,43 @@ static bool shouldCacheStatFailures(StringRef Filename) {
StringRef Ext = llvm::sys::path::extension(Filename);
if (Ext.empty())
return false; // This may be the module cache directory.
return shouldMinimize(Filename); // Only cache stat failures on source files.
// Only cache stat failures on source files.
return shouldMinimizeBasedOnExtension(Filename);
}
void DependencyScanningWorkerFilesystem::ignoreFile(StringRef RawFilename) {
llvm::SmallString<256> Filename;
llvm::sys::path::native(RawFilename, Filename);
IgnoredFiles.insert(Filename);
}
bool DependencyScanningWorkerFilesystem::shouldIgnoreFile(
void DependencyScanningWorkerFilesystem::disableMinimization(
StringRef RawFilename) {
llvm::SmallString<256> Filename;
llvm::sys::path::native(RawFilename, Filename);
return IgnoredFiles.contains(Filename);
NotToBeMinimized.insert(Filename);
}
bool DependencyScanningWorkerFilesystem::shouldMinimize(StringRef RawFilename) {
if (!shouldMinimizeBasedOnExtension(RawFilename))
return false;
llvm::SmallString<256> Filename;
llvm::sys::path::native(RawFilename, Filename);
return !NotToBeMinimized.contains(Filename);
}
CachedFileSystemEntry DependencyScanningWorkerFilesystem::createFileSystemEntry(
llvm::ErrorOr<llvm::vfs::Status> &&MaybeStatus, StringRef Filename,
bool ShouldMinimize) {
if (!MaybeStatus)
return CachedFileSystemEntry(MaybeStatus.getError());
if (MaybeStatus->isDirectory())
return CachedFileSystemEntry::createDirectoryEntry(std::move(*MaybeStatus));
return CachedFileSystemEntry::createFileEntry(Filename, getUnderlyingFS(),
ShouldMinimize);
}
llvm::ErrorOr<const CachedFileSystemEntry *>
DependencyScanningWorkerFilesystem::getOrCreateFileSystemEntry(
const StringRef Filename) {
bool ShouldMinimize = !shouldIgnoreFile(Filename) && shouldMinimize(Filename);
bool ShouldMinimize = shouldMinimize(Filename);
if (const auto *Entry = Cache.getCachedEntry(Filename, ShouldMinimize))
return Entry;
@ -182,23 +199,15 @@ DependencyScanningWorkerFilesystem::getOrCreateFileSystemEntry(
CachedFileSystemEntry &CacheEntry = SharedCacheEntry.Value;
if (!CacheEntry.isValid()) {
llvm::vfs::FileSystem &FS = getUnderlyingFS();
auto MaybeStatus = FS.status(Filename);
if (!MaybeStatus) {
if (!shouldCacheStatFailures(Filename))
// HACK: We need to always restat non source files if the stat fails.
// This is because Clang first looks up the module cache and module
// files before building them, and then looks for them again. If we
// cache the stat failure, it won't see them the second time.
return MaybeStatus.getError();
else
CacheEntry = CachedFileSystemEntry(MaybeStatus.getError());
} else if (MaybeStatus->isDirectory())
CacheEntry = CachedFileSystemEntry::createDirectoryEntry(
std::move(*MaybeStatus));
else
CacheEntry = CachedFileSystemEntry::createFileEntry(Filename, FS,
ShouldMinimize);
auto MaybeStatus = getUnderlyingFS().status(Filename);
if (!MaybeStatus && !shouldCacheStatFailures(Filename))
// HACK: We need to always restat non source files if the stat fails.
// This is because Clang first looks up the module cache and module
// files before building them, and then looks for them again. If we
// cache the stat failure, it won't see them the second time.
return MaybeStatus.getError();
CacheEntry = createFileSystemEntry(std::move(MaybeStatus), Filename,
ShouldMinimize);
}
Result = &CacheEntry;

View File

@ -193,20 +193,19 @@ class DependencyScanningAction : public tooling::ToolAction {
// Use the dependency scanning optimized file system if requested to do so.
if (DepFS) {
DepFS->clearIgnoredFiles();
// Ignore any files that contributed to prebuilt modules. The implicit
// build validates the modules by comparing the reported sizes of their
// inputs to the current state of the filesystem. Minimization would throw
// this mechanism off.
DepFS->enableMinimizationOfAllFiles();
// Don't minimize any files that contributed to prebuilt modules. The
// implicit build validates the modules by comparing the reported sizes of
// their inputs to the current state of the filesystem. Minimization would
// throw this mechanism off.
for (const auto &File : PrebuiltModulesInputFiles)
DepFS->ignoreFile(File.getKey());
// Add any filenames that were explicity passed in the build settings and
// that might be opened, as we want to ensure we don't run source
// minimization on them.
DepFS->disableMinimization(File.getKey());
// Don't minimize any files that were explicitly passed in the build
// settings and that might be opened.
for (const auto &E : ScanInstance.getHeaderSearchOpts().UserEntries)
DepFS->ignoreFile(E.Path);
DepFS->disableMinimization(E.Path);
for (const auto &F : ScanInstance.getHeaderSearchOpts().VFSOverlayFiles)
DepFS->ignoreFile(F);
DepFS->disableMinimization(F);
// Support for virtual file system overlays on top of the caching
// filesystem.

View File

@ -107,7 +107,7 @@ static void visitASTNodeRecursive(ASTNode node, ASTNode base,
static void visitHierarchy(RecordKeeper &records,
StringRef nodeClassName,
ASTNodeHierarchyVisitor<ASTNode> visit) {
// Check for the node class, just as a sanity check.
// Check for the node class, just as a basic correctness check.
if (!records.getClass(nodeClassName)) {
PrintFatalError(Twine("cannot find definition for node class ")
+ nodeClassName);

View File

@ -0,0 +1,61 @@
#ifndef MEMPROF_DATA_INC
#define MEMPROF_DATA_INC
/*===-- MemProfData.inc - MemProf profiling runtime structures -*- C++ -*-=== *\
|*
|* Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|* See https://llvm.org/LICENSE.txt for license information.
|* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|*
\*===----------------------------------------------------------------------===*/
/*
* This is the main file that defines all the data structure, signature,
* constant literals that are shared across profiling runtime library,
* and host tools (reader/writer).
*
* This file has two identical copies. The primary copy lives in LLVM and
* the other one sits in compiler-rt/include/profile directory. To make changes
* in this file, first modify the primary copy and copy it over to compiler-rt.
* Testing of any change in this file can start only after the two copies are
* synced up.
*
\*===----------------------------------------------------------------------===*/
#ifdef _MSC_VER
#define PACKED(__decl__) __pragma(pack(push,1)) __decl__ __pragma(pack(pop))
#else
#define PACKED(__decl__) __decl__ __attribute__((__packed__))
#endif
// A 64-bit magic number to uniquely identify the raw binary memprof profile file.
#define MEMPROF_RAW_MAGIC_64 \
((uint64_t)255 << 56 | (uint64_t)'m' << 48 | (uint64_t)'p' << 40 | (uint64_t)'r' << 32 | \
(uint64_t)'o' << 24 | (uint64_t)'f' << 16 | (uint64_t)'r' << 8 | (uint64_t)129)
// The version number of the raw binary format.
#define MEMPROF_RAW_VERSION 1ULL
namespace llvm {
namespace memprof {
// A struct describing the header used for the raw binary memprof profile format.
PACKED(struct Header {
uint64_t Magic;
uint64_t Version;
uint64_t TotalSize;
uint64_t SegmentOffset;
uint64_t MIBOffset;
uint64_t StackOffset;
});
// A struct describing the information necessary to describe a /proc/maps
// segment entry for a particular binary/library identified by its build id.
PACKED(struct SegmentEntry {
uint64_t Start;
uint64_t End;
uint64_t Offset;
uint8_t BuildId[32];
});
} // namespace memprof
} // namespace llvm
#endif

View File

@ -130,23 +130,24 @@ DECLARE_REAL_AND_INTERCEPTOR(void, free, void *)
#define COMMON_INTERCEPTOR_BLOCK_REAL(name) REAL(name)
// Strict init-order checking is dlopen-hostile:
// https://github.com/google/sanitizers/issues/178
#define COMMON_INTERCEPTOR_ON_DLOPEN(filename, flag) \
do { \
if (flags()->strict_init_order) \
StopInitOrderChecking(); \
CheckNoDeepBind(filename, flag); \
} while (false)
#define COMMON_INTERCEPTOR_ON_EXIT(ctx) OnExit()
#define COMMON_INTERCEPTOR_LIBRARY_LOADED(filename, handle)
#define COMMON_INTERCEPTOR_LIBRARY_UNLOADED()
#define COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED (!asan_inited)
#define COMMON_INTERCEPTOR_GET_TLS_RANGE(begin, end) \
if (AsanThread *t = GetCurrentThread()) { \
*begin = t->tls_begin(); \
*end = t->tls_end(); \
} else { \
*begin = *end = 0; \
}
# define COMMON_INTERCEPTOR_DLOPEN(filename, flag) \
({ \
if (flags()->strict_init_order) \
StopInitOrderChecking(); \
CheckNoDeepBind(filename, flag); \
REAL(dlopen)(filename, flag); \
})
# define COMMON_INTERCEPTOR_ON_EXIT(ctx) OnExit()
# define COMMON_INTERCEPTOR_LIBRARY_LOADED(filename, handle)
# define COMMON_INTERCEPTOR_LIBRARY_UNLOADED()
# define COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED (!asan_inited)
# define COMMON_INTERCEPTOR_GET_TLS_RANGE(begin, end) \
if (AsanThread *t = GetCurrentThread()) { \
*begin = t->tls_begin(); \
*end = t->tls_end(); \
} else { \
*begin = *end = 0; \
}
#define COMMON_INTERCEPTOR_MEMMOVE_IMPL(ctx, to, from, size) \
do { \

View File

@ -460,6 +460,10 @@ static bool SuppressErrorReport(uptr pc) {
void ReportGenericError(uptr pc, uptr bp, uptr sp, uptr addr, bool is_write,
uptr access_size, u32 exp, bool fatal) {
if (__asan_test_only_reported_buggy_pointer) {
*__asan_test_only_reported_buggy_pointer = addr;
return;
}
if (!fatal && SuppressErrorReport(pc)) return;
ENABLE_FRAME_POINTER;

View File

@ -85,12 +85,8 @@ void ShowStatsAndAbort() {
NOINLINE
static void ReportGenericErrorWrapper(uptr addr, bool is_write, int size,
int exp_arg, bool fatal) {
if (__asan_test_only_reported_buggy_pointer) {
*__asan_test_only_reported_buggy_pointer = addr;
} else {
GET_CALLER_PC_BP_SP;
ReportGenericError(pc, bp, sp, addr, is_write, size, exp_arg, fatal);
}
GET_CALLER_PC_BP_SP;
ReportGenericError(pc, bp, sp, addr, is_write, size, exp_arg, fatal);
}
// --------------- LowLevelAllocateCallbac ---------- {{{1

Some files were not shown because too many files have changed in this diff Show More