Merge llvm-project main llvmorg-14-init-18294-gdb01b123d012

This updates llvm, clang, compiler-rt, libc++, libunwind, lld, lldb and
openmp to llvmorg-14-init-18294-gdb01b123d012, the last commit before
the upstream release/14.x branch was created.

PR:		261742
MFC after:	2 weeks
This commit is contained in:
Dimitry Andric 2022-02-05 21:07:43 +01:00
commit 1fd87a682a
808 changed files with 9719 additions and 5594 deletions

View File

@ -653,6 +653,20 @@ class ASTContext : public RefCountedBase<ASTContext> {
/// Returns the clang bytecode interpreter context.
interp::Context &getInterpContext();
struct CUDAConstantEvalContext {
/// Do not allow wrong-sided variables in constant expressions.
bool NoWrongSidedVars = false;
} CUDAConstantEvalCtx;
struct CUDAConstantEvalContextRAII {
ASTContext &Ctx;
CUDAConstantEvalContext SavedCtx;
CUDAConstantEvalContextRAII(ASTContext &Ctx_, bool NoWrongSidedVars)
: Ctx(Ctx_), SavedCtx(Ctx_.CUDAConstantEvalCtx) {
Ctx_.CUDAConstantEvalCtx.NoWrongSidedVars = NoWrongSidedVars;
}
~CUDAConstantEvalContextRAII() { Ctx.CUDAConstantEvalCtx = SavedCtx; }
};
/// Returns the dynamic AST node parent map context.
ParentMapContext &getParentMapContext();
@ -2616,23 +2630,32 @@ class ASTContext : public RefCountedBase<ASTContext> {
/// template name uses the shortest form of the dependent
/// nested-name-specifier, which itself contains all canonical
/// types, values, and templates.
TemplateName getCanonicalTemplateName(TemplateName Name) const;
TemplateName getCanonicalTemplateName(const TemplateName &Name) const;
/// Determine whether the given template names refer to the same
/// template.
bool hasSameTemplateName(TemplateName X, TemplateName Y);
bool hasSameTemplateName(const TemplateName &X, const TemplateName &Y) const;
/// Determine whether the two declarations refer to the same entity.
bool isSameEntity(NamedDecl *X, NamedDecl *Y);
///
/// FIXME: isSameEntity is not const due to its implementation calls
/// hasSameFunctionTypeIgnoringExceptionSpec which may alter this.
bool isSameEntity(const NamedDecl *X, const NamedDecl *Y);
/// Determine whether two template parameter lists are similar enough
/// that they may be used in declarations of the same template.
bool isSameTemplateParameterList(TemplateParameterList *X,
TemplateParameterList *Y);
///
/// FIXME: isSameTemplateParameterList is not const since it calls
/// isSameTemplateParameter.
bool isSameTemplateParameterList(const TemplateParameterList *X,
const TemplateParameterList *Y);
/// Determine whether two template parameters are similar enough
/// that they may be used in declarations of the same template.
bool isSameTemplateParameter(NamedDecl *X, NamedDecl *Y);
///
/// FIXME: isSameTemplateParameterList is not const since it calls
/// isSameEntity.
bool isSameTemplateParameter(const NamedDecl *X, const NamedDecl *Y);
/// Retrieve the "canonical" template argument.
///

View File

@ -193,7 +193,7 @@ namespace IDFCalculatorDetail {
/// Specialize ChildrenGetterTy to skip nullpointer successors.
template <bool IsPostDom>
struct ChildrenGetterTy<clang::CFGBlock, IsPostDom> {
using NodeRef = typename GraphTraits<clang::CFGBlock>::NodeRef;
using NodeRef = typename GraphTraits<clang::CFGBlock *>::NodeRef;
using ChildrenTy = SmallVector<NodeRef, 8>;
ChildrenTy get(const NodeRef &N) {

View File

@ -1494,9 +1494,6 @@ template <> struct GraphTraits< ::clang::CFGBlock *> {
static ChildIteratorType child_end(NodeRef N) { return N->succ_end(); }
};
template <> struct GraphTraits<clang::CFGBlock>
: GraphTraits<clang::CFGBlock *> {};
template <> struct GraphTraits< const ::clang::CFGBlock *> {
using NodeRef = const ::clang::CFGBlock *;
using ChildIteratorType = ::clang::CFGBlock::const_succ_iterator;
@ -1506,9 +1503,6 @@ template <> struct GraphTraits< const ::clang::CFGBlock *> {
static ChildIteratorType child_end(NodeRef N) { return N->succ_end(); }
};
template <> struct GraphTraits<const clang::CFGBlock>
: GraphTraits<clang::CFGBlock *> {};
template <> struct GraphTraits<Inverse< ::clang::CFGBlock *>> {
using NodeRef = ::clang::CFGBlock *;
using ChildIteratorType = ::clang::CFGBlock::const_pred_iterator;
@ -1521,9 +1515,6 @@ template <> struct GraphTraits<Inverse< ::clang::CFGBlock *>> {
static ChildIteratorType child_end(NodeRef N) { return N->pred_end(); }
};
template <> struct GraphTraits<Inverse<clang::CFGBlock>>
: GraphTraits<clang::CFGBlock *> {};
template <> struct GraphTraits<Inverse<const ::clang::CFGBlock *>> {
using NodeRef = const ::clang::CFGBlock *;
using ChildIteratorType = ::clang::CFGBlock::const_pred_iterator;
@ -1536,9 +1527,6 @@ template <> struct GraphTraits<Inverse<const ::clang::CFGBlock *>> {
static ChildIteratorType child_end(NodeRef N) { return N->pred_end(); }
};
template <> struct GraphTraits<const Inverse<clang::CFGBlock>>
: GraphTraits<clang::CFGBlock *> {};
// Traits for: CFG
template <> struct GraphTraits< ::clang::CFG* >

View File

@ -27,6 +27,7 @@
#include "llvm/ADT/Any.h"
#include "llvm/ADT/Optional.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/Support/Error.h"
namespace clang {
namespace dataflow {
@ -106,18 +107,24 @@ template <typename LatticeT> struct DataflowAnalysisState {
/// Performs dataflow analysis and returns a mapping from basic block IDs to
/// dataflow analysis states that model the respective basic blocks. Indices
/// of the returned vector correspond to basic block IDs.
/// of the returned vector correspond to basic block IDs. Returns an error if
/// the dataflow analysis cannot be performed successfully.
template <typename AnalysisT>
std::vector<llvm::Optional<DataflowAnalysisState<typename AnalysisT::Lattice>>>
llvm::Expected<std::vector<
llvm::Optional<DataflowAnalysisState<typename AnalysisT::Lattice>>>>
runDataflowAnalysis(const ControlFlowContext &CFCtx, AnalysisT &Analysis,
const Environment &InitEnv) {
auto TypeErasedBlockStates =
runTypeErasedDataflowAnalysis(CFCtx, Analysis, InitEnv);
if (!TypeErasedBlockStates)
return TypeErasedBlockStates.takeError();
std::vector<
llvm::Optional<DataflowAnalysisState<typename AnalysisT::Lattice>>>
BlockStates;
BlockStates.reserve(TypeErasedBlockStates.size());
llvm::transform(std::move(TypeErasedBlockStates),
BlockStates.reserve(TypeErasedBlockStates->size());
llvm::transform(std::move(*TypeErasedBlockStates),
std::back_inserter(BlockStates), [](auto &OptState) {
return std::move(OptState).map([](auto &&State) {
return DataflowAnalysisState<typename AnalysisT::Lattice>{

View File

@ -51,19 +51,36 @@ enum class SkipPast {
/// Holds the state of the program (store and heap) at a given program point.
class Environment {
public:
/// Supplements `Environment` with non-standard join operations.
class Merger {
/// Supplements `Environment` with non-standard comparison and join
/// operations.
class ValueModel {
public:
virtual ~Merger() = default;
virtual ~ValueModel() = default;
/// Given distinct `Val1` and `Val2`, modifies `MergedVal` to approximate
/// both `Val1` and `Val2`. This could be a strict lattice join or a more
/// general widening operation. If this function returns true, `MergedVal`
/// will be assigned to a storage location of type `Type` in `Env`.
/// Returns true if and only if `Val1` is equivalent to `Val2`.
///
/// Requirements:
///
/// `Val1` and `Val2` must be distinct.
///
/// `Val1` and `Val2` must model values of type `Type`.
virtual bool compareEquivalent(QualType Type, const Value &Val1,
const Value &Val2) {
// FIXME: Consider adding QualType to StructValue and removing the Type
// argument here.
return false;
}
/// Modifies `MergedVal` to approximate both `Val1` and `Val2`. This could
/// be a strict lattice join or a more general widening operation. If this
/// function returns true, `MergedVal` will be assigned to a storage
/// location of type `Type` in `Env`.
///
/// Requirements:
///
/// `Val1` and `Val2` must be distinct.
///
/// `Val1`, `Val2`, and `MergedVal` must model values of type `Type`.
virtual bool merge(QualType Type, const Value &Val1, const Value &Val2,
Value &MergedVal, Environment &Env) {
return false;
@ -84,9 +101,29 @@ class Environment {
/// with a symbolic representation of the `this` pointee.
Environment(DataflowAnalysisContext &DACtx, const DeclContext &DeclCtx);
bool operator==(const Environment &) const;
/// Returns true if and only if the environment is equivalent to `Other`, i.e
/// the two environments:
/// - have the same mappings from declarations to storage locations,
/// - have the same mappings from expressions to storage locations,
/// - have the same or equivalent (according to `Model`) values assigned to
/// the same storage locations.
///
/// Requirements:
///
/// `Other` and `this` must use the same `DataflowAnalysisContext`.
bool equivalentTo(const Environment &Other,
Environment::ValueModel &Model) const;
LatticeJoinEffect join(const Environment &, Environment::Merger &);
/// Joins the environment with `Other` by taking the intersection of storage
/// locations and values that are stored in them. Distinct values that are
/// assigned to the same storage locations in the environment and `Other` are
/// merged using `Model`.
///
/// Requirements:
///
/// `Other` and `this` must use the same `DataflowAnalysisContext`.
LatticeJoinEffect join(const Environment &Other,
Environment::ValueModel &Model);
// FIXME: Rename `createOrGetStorageLocation` to `getOrCreateStorageLocation`,
// `getStableStorageLocation`, or something more appropriate.

View File

@ -112,7 +112,7 @@ template <typename Key, typename ElementLattice>
std::ostream &
operator<<(std::ostream &Os,
const clang::dataflow::MapLattice<Key, ElementLattice> &M) {
std::string Separator = "";
std::string Separator;
Os << "{";
for (const auto &E : M) {
Os << std::exchange(Separator, ", ") << E.first << " => " << E.second;
@ -125,7 +125,7 @@ template <typename ElementLattice>
std::ostream &
operator<<(std::ostream &Os,
const clang::dataflow::VarMapLattice<ElementLattice> &M) {
std::string Separator = "";
std::string Separator;
Os << "{";
for (const auto &E : M) {
Os << std::exchange(Separator, ", ") << E.first->getName().str() << " => "

View File

@ -25,6 +25,7 @@
#include "clang/Analysis/FlowSensitive/DataflowLattice.h"
#include "llvm/ADT/Any.h"
#include "llvm/ADT/Optional.h"
#include "llvm/Support/Error.h"
namespace clang {
namespace dataflow {
@ -40,7 +41,7 @@ struct TypeErasedLattice {
};
/// Type-erased base class for dataflow analyses built on a single lattice type.
class TypeErasedDataflowAnalysis : public Environment::Merger {
class TypeErasedDataflowAnalysis : public Environment::ValueModel {
/// Determines whether to apply the built-in transfer functions.
// FIXME: Remove this option once the framework supports composing analyses
// (at which point the built-in transfer functions can be simply a standalone
@ -115,8 +116,9 @@ TypeErasedDataflowAnalysisState transferBlock(
/// Performs dataflow analysis and returns a mapping from basic block IDs to
/// dataflow analysis states that model the respective basic blocks. Indices
/// of the returned vector correspond to basic block IDs.
std::vector<llvm::Optional<TypeErasedDataflowAnalysisState>>
/// of the returned vector correspond to basic block IDs. Returns an error if
/// the dataflow analysis cannot be performed successfully.
llvm::Expected<std::vector<llvm::Optional<TypeErasedDataflowAnalysisState>>>
runTypeErasedDataflowAnalysis(const ControlFlowContext &CFCtx,
TypeErasedDataflowAnalysis &Analysis,
const Environment &InitEnv);

View File

@ -432,45 +432,45 @@ implementation detail and not intended to be used by external users.
The syntax of the attribute is as follows:
.. code-block:: c++
.. code-block:: text
class __attribute__((sycl_special_class)) accessor {};
class [[clang::sycl_special_class]] accessor {};
class __attribute__((sycl_special_class)) accessor {};
class [[clang::sycl_special_class]] accessor {};
This is a code example that illustrates the use of the attribute:
.. code-block:: c++
class __attribute__((sycl_special_class)) SpecialType {
int F1;
int F2;
void __init(int f1) {
F1 = f1;
F2 = f1;
}
void __finalize() {}
public:
SpecialType() = default;
int getF2() const { return F2; }
};
class __attribute__((sycl_special_class)) SpecialType {
int F1;
int F2;
void __init(int f1) {
F1 = f1;
F2 = f1;
}
void __finalize() {}
public:
SpecialType() = default;
int getF2() const { return F2; }
};
int main () {
SpecialType T;
cgh.single_task([=] {
T.getF2();
});
}
int main () {
SpecialType T;
cgh.single_task([=] {
T.getF2();
});
}
This would trigger the following kernel entry point in the AST:
.. code-block:: c++
void __sycl_kernel(int f1) {
SpecialType T;
T.__init(f1);
...
T.__finalize()
}
void __sycl_kernel(int f1) {
SpecialType T;
T.__init(f1);
...
T.__finalize()
}
}];
}

View File

@ -62,6 +62,9 @@ BUILTIN(__builtin_arm_ldg, "v*v*", "t")
BUILTIN(__builtin_arm_stg, "vv*", "t")
BUILTIN(__builtin_arm_subp, "Uiv*v*", "t")
// Memory Operations
BUILTIN(__builtin_arm_mops_memset_tag, "v*v*iz", "")
// Memory barrier
BUILTIN(__builtin_arm_dmb, "vUi", "nc")
BUILTIN(__builtin_arm_dsb, "vUi", "nc")

View File

@ -276,6 +276,11 @@ class CodeGenOptions : public CodeGenOptionsBase {
/// CUDA runtime back-end for incorporating them into host-side object file.
std::string CudaGpuBinaryFileName;
/// List of filenames and section name pairs passed in using the
/// -fembed-offload-object option to embed device-side offloading objects into
/// the host as a named section. Input passed in as '<filename>,<section>'
std::vector<std::string> OffloadObjects;
/// The name of the file to which the backend should save YAML optimization
/// records.
std::string OptRecordFile;

View File

@ -145,6 +145,13 @@ def warn_conflicting_nullability_attr_overriding_param_types : Warning<
def err_nullability_conflicting : Error<
"nullability specifier %0 conflicts with existing specifier %1">;
def warn_target_unsupported_branch_protection_option: Warning <
"ignoring '-mbranch-protection=' option because the '%0' architecture does not support it">,
InGroup<BranchProtection>;
def warn_target_unsupported_branch_protection_attribute: Warning <
"ignoring the 'branch-protection' attribute because the '%0' architecture does not support it">,
InGroup<BranchProtection>;
}
// OpenCL Section 6.8.g

View File

@ -627,8 +627,10 @@ def err_cc1_unbounded_vscale_min : Error<
def err_drv_ssp_missing_offset_argument : Error<
"'%0' is used without '-mstack-protector-guard-offset', and there is no default">;
def err_drv_only_one_offload_target_supported_in : Error<
"Only one offload target is supported in %0.">;
def err_drv_only_one_offload_target_supported : Error<
"only one offload target is supported">;
def err_drv_invalid_or_unsupported_offload_target : Error<
"Invalid or unsupported offload target: '%0'.">;
"invalid or unsupported offload target: '%0'">;
def err_drv_cuda_offload_only_emit_bc : Error<
"CUDA offload target is supported only along with --emit-llvm">;
}

View File

@ -181,6 +181,10 @@ class LangOptions : public LangOptionsBase {
/// global-scope inline variables incorrectly.
Ver12,
/// Attempt to be ABI-compatible with code generated by Clang 13.0.x.
/// This causes clang to not pack non-POD members of packed structs.
Ver13,
/// Conform to the underlying platform's C and C++ ABIs as closely
/// as we can.
Latest

View File

@ -590,6 +590,17 @@ class TargetInfo : public virtual TransferrableTargetInfo,
return false;
}
// Different targets may support a different maximum width for the _BitInt
// type, depending on what operations are supported.
virtual size_t getMaxBitIntWidth() const {
// FIXME: this value should be llvm::IntegerType::MAX_INT_BITS, which is
// maximum bit width that LLVM claims its IR can support. However, most
// backends currently have a bug where they only support division
// operations on types that are <= 128 bits and crash otherwise. We're
// setting the max supported value to 128 to be conservative.
return 128;
}
/// Determine whether _Float16 is supported on this target.
virtual bool hasLegalHalfType() const { return HasLegalHalfType; }
@ -1289,9 +1300,15 @@ class TargetInfo : public virtual TransferrableTargetInfo,
bool BranchTargetEnforcement = false;
};
/// Determine if the Architecture in this TargetInfo supports branch
/// protection
virtual bool isBranchProtectionSupportedArch(StringRef Arch) const {
return false;
}
/// Determine if this TargetInfo supports the given branch protection
/// specification
virtual bool validateBranchProtection(StringRef Spec,
virtual bool validateBranchProtection(StringRef Spec, StringRef Arch,
BranchProtectionInfo &BPI,
StringRef &Err) const {
Err = "";

View File

@ -44,6 +44,9 @@ namespace clang {
void EmbedBitcode(llvm::Module *M, const CodeGenOptions &CGOpts,
llvm::MemoryBufferRef Buf);
void EmbedObject(llvm::Module *M, const CodeGenOptions &CGOpts,
DiagnosticsEngine &Diags);
}
#endif

View File

@ -73,6 +73,7 @@ class Action {
OffloadBundlingJobClass,
OffloadUnbundlingJobClass,
OffloadWrapperJobClass,
LinkerWrapperJobClass,
StaticLibJobClass,
JobClassFirst = PreprocessJobClass,
@ -642,6 +643,17 @@ class OffloadWrapperJobAction : public JobAction {
}
};
class LinkerWrapperJobAction : public JobAction {
void anchor() override;
public:
LinkerWrapperJobAction(ActionList &Inputs, types::ID Type);
static bool classof(const Action *A) {
return A->getKind() == LinkerWrapperJobClass;
}
};
class StaticLibJobAction : public JobAction {
void anchor() override;

View File

@ -12,6 +12,7 @@
#include "clang/Basic/Diagnostic.h"
#include "clang/Basic/LLVM.h"
#include "clang/Driver/Action.h"
#include "clang/Driver/InputInfo.h"
#include "clang/Driver/Options.h"
#include "clang/Driver/Phases.h"
#include "clang/Driver/ToolChain.h"
@ -38,13 +39,14 @@ namespace clang {
namespace driver {
class Command;
class Compilation;
class InputInfo;
class JobList;
class JobAction;
class SanitizerArgs;
class ToolChain;
typedef SmallVector<InputInfo, 4> InputInfoList;
class Command;
class Compilation;
class JobList;
class JobAction;
class SanitizerArgs;
class ToolChain;
/// Describes the kind of LTO mode selected via -f(no-)?lto(=.*)? options.
enum LTOKind {
@ -171,9 +173,11 @@ class Driver {
/// The file to log CC_LOG_DIAGNOSTICS output to, if enabled.
std::string CCLogDiagnosticsFilename;
/// An input type and its arguments.
using InputTy = std::pair<types::ID, const llvm::opt::Arg *>;
/// A list of inputs and their types for the given arguments.
typedef SmallVector<std::pair<types::ID, const llvm::opt::Arg *>, 16>
InputList;
using InputList = SmallVector<InputTy, 16>;
/// Whether the driver should follow g++ like behavior.
bool CCCIsCXX() const { return Mode == GXXMode; }
@ -413,6 +417,18 @@ class Driver {
void BuildUniversalActions(Compilation &C, const ToolChain &TC,
const InputList &BAInputs) const;
/// BuildOffloadingActions - Construct the list of actions to perform for the
/// offloading toolchain that will be embedded in the host.
///
/// \param C - The compilation that is being built.
/// \param Args - The input arguments.
/// \param Input - The input type and arguments
/// \param HostAction - The host action used in the offloading toolchain.
Action *BuildOffloadingActions(Compilation &C,
llvm::opt::DerivedArgList &Args,
const InputTy &Input,
Action *HostAction) const;
/// Check that the file referenced by Value exists. If it doesn't,
/// issue a diagnostic and return false.
/// If TypoCorrect is true and the file does not exist, see if it looks
@ -503,13 +519,12 @@ class Driver {
/// BuildJobsForAction - Construct the jobs to perform for the action \p A and
/// return an InputInfo for the result of running \p A. Will only construct
/// jobs for a given (Action, ToolChain, BoundArch, DeviceKind) tuple once.
InputInfo
BuildJobsForAction(Compilation &C, const Action *A, const ToolChain *TC,
StringRef BoundArch, bool AtTopLevel, bool MultipleArchs,
const char *LinkingOutput,
std::map<std::pair<const Action *, std::string>, InputInfo>
&CachedResults,
Action::OffloadKind TargetDeviceOffloadKind) const;
InputInfoList BuildJobsForAction(
Compilation &C, const Action *A, const ToolChain *TC, StringRef BoundArch,
bool AtTopLevel, bool MultipleArchs, const char *LinkingOutput,
std::map<std::pair<const Action *, std::string>, InputInfoList>
&CachedResults,
Action::OffloadKind TargetDeviceOffloadKind) const;
/// Returns the default name for linked images (e.g., "a.out").
const char *getDefaultImageName() const;
@ -617,10 +632,10 @@ class Driver {
/// Helper used in BuildJobsForAction. Doesn't use the cache when building
/// jobs specifically for the given action, but will use the cache when
/// building jobs for the Action's inputs.
InputInfo BuildJobsForActionNoCache(
InputInfoList BuildJobsForActionNoCache(
Compilation &C, const Action *A, const ToolChain *TC, StringRef BoundArch,
bool AtTopLevel, bool MultipleArchs, const char *LinkingOutput,
std::map<std::pair<const Action *, std::string>, InputInfo>
std::map<std::pair<const Action *, std::string>, InputInfoList>
&CachedResults,
Action::OffloadKind TargetDeviceOffloadKind) const;

View File

@ -208,6 +208,8 @@ class Command {
Arguments = std::move(List);
}
void replaceExecutable(const char *Exe) { Executable = Exe; }
const char *getExecutable() const { return Executable; }
const llvm::opt::ArgStringList &getArguments() const { return Arguments; }

View File

@ -638,8 +638,8 @@ def _DASH_DASH : Option<["--"], "", KIND_REMAINING_ARGS>,
Flags<[NoXarchOption, CoreOption]>;
def A : JoinedOrSeparate<["-"], "A">, Flags<[RenderJoined]>, Group<gfortran_Group>;
def B : JoinedOrSeparate<["-"], "B">, MetaVarName<"<prefix>">,
HelpText<"Search $prefix/$triple-$file and $prefix$file for executables, libraries, "
"includes, and data files used by the compiler. $prefix may or may not be a directory">;
HelpText<"Search $prefix$file for executables, libraries, and data files. "
"If $prefix is a directory, search $prefix/$file">;
def gcc_toolchain : Joined<["--"], "gcc-toolchain=">, Flags<[NoXarchOption]>,
HelpText<"Search for GCC installation in the specified directory on targets which commonly use GCC. "
"The directory usually contains 'lib{,32,64}/gcc{,-cross}/$triple' and 'include'. If specified, "
@ -1143,8 +1143,7 @@ defm autolink : BoolFOption<"autolink",
// languages and accept other values such as CPU/GPU architectures,
// offload kinds and target aliases.
def offload_EQ : CommaJoined<["--"], "offload=">, Flags<[NoXarchOption]>,
HelpText<"Specify comma-separated list of offloading target triples"
" (HIP only)">;
HelpText<"Specify comma-separated list of offloading target triples (CUDA and HIP only)">;
// C++ Coroutines TS
defm coroutines_ts : BoolFOption<"coroutines-ts",
@ -1152,6 +1151,10 @@ defm coroutines_ts : BoolFOption<"coroutines-ts",
PosFlag<SetTrue, [CC1Option], "Enable support for the C++ Coroutines TS">,
NegFlag<SetFalse>>;
def fembed_offload_object_EQ : Joined<["-"], "fembed-offload-object=">,
Group<f_Group>, Flags<[NoXarchOption, CC1Option]>,
HelpText<"Embed Offloading device-side binary into host object file as a section.">,
MarshallingInfoStringVector<CodeGenOpts<"OffloadObjects">>;
def fembed_bitcode_EQ : Joined<["-"], "fembed-bitcode=">,
Group<f_Group>, Flags<[NoXarchOption, CC1Option, CC1AsOption]>, MetaVarName<"<option>">,
HelpText<"Embed LLVM bitcode (option: off, all, bitcode, marker)">,
@ -1907,7 +1910,7 @@ defm legacy_pass_manager : BoolOption<"f", "legacy-pass-manager",
def fexperimental_new_pass_manager : Flag<["-"], "fexperimental-new-pass-manager">,
Group<f_clang_Group>, Flags<[CC1Option]>, Alias<fno_legacy_pass_manager>;
def fno_experimental_new_pass_manager : Flag<["-"], "fno-experimental-new-pass-manager">,
Group<f_clang_Group>, Flags<[CC1Option]>, Alias<flegacy_pass_manager>;
Group<f_clang_Group>, Flags<[CC1Option,NoDriverOption]>, Alias<flegacy_pass_manager>;
def fexperimental_strict_floating_point : Flag<["-"], "fexperimental-strict-floating-point">,
Group<f_clang_Group>, Flags<[CC1Option]>,
HelpText<"Enables experimental strict floating point in LLVM.">,
@ -2473,6 +2476,8 @@ defm openmp_optimistic_collapse : BoolFOption<"openmp-optimistic-collapse",
PosFlag<SetTrue, [CC1Option]>, NegFlag<SetFalse>, BothFlags<[NoArgumentUnused, HelpHidden]>>;
def static_openmp: Flag<["-"], "static-openmp">,
HelpText<"Use the static host OpenMP runtime while linking.">;
def fopenmp_new_driver : Flag<["-"], "fopenmp-new-driver">, Flags<[CC1Option]>, Group<Action_Group>,
HelpText<"Use the new driver for OpenMP offloading.">;
def fno_optimize_sibling_calls : Flag<["-"], "fno-optimize-sibling-calls">, Group<f_Group>;
def foptimize_sibling_calls : Flag<["-"], "foptimize-sibling-calls">, Group<f_Group>;
defm escaping_block_tail_calls : BoolFOption<"escaping-block-tail-calls",
@ -3895,6 +3900,11 @@ def frtlib_add_rpath: Flag<["-"], "frtlib-add-rpath">, Flags<[NoArgumentUnused]>
HelpText<"Add -rpath with architecture-specific resource directory to the linker flags">;
def fno_rtlib_add_rpath: Flag<["-"], "fno-rtlib-add-rpath">, Flags<[NoArgumentUnused]>,
HelpText<"Do not add -rpath with architecture-specific resource directory to the linker flags">;
defm openmp_implicit_rpath: BoolFOption<"openmp-implicit-rpath",
LangOpts<"OpenMP">,
DefaultTrue,
PosFlag<SetTrue, [], "Set rpath on OpenMP executables">,
NegFlag<SetFalse>>;
def r : Flag<["-"], "r">, Flags<[LinkerInput,NoArgumentUnused]>,
Group<Link_Group>;
def save_temps_EQ : Joined<["-", "--"], "save-temps=">, Flags<[CC1Option, NoXarchOption]>,

View File

@ -151,6 +151,7 @@ class ToolChain {
mutable std::unique_ptr<Tool> IfsMerge;
mutable std::unique_ptr<Tool> OffloadBundler;
mutable std::unique_ptr<Tool> OffloadWrapper;
mutable std::unique_ptr<Tool> LinkerWrapper;
Tool *getClang() const;
Tool *getFlang() const;
@ -161,6 +162,7 @@ class ToolChain {
Tool *getClangAs() const;
Tool *getOffloadBundler() const;
Tool *getOffloadWrapper() const;
Tool *getLinkerWrapper() const;
mutable bool SanitizerArgsChecked = false;
mutable std::unique_ptr<XRayArgs> XRayArguments;
@ -711,6 +713,22 @@ class ToolChain {
const llvm::fltSemantics *FPType = nullptr) const {
return llvm::DenormalMode::getIEEE();
}
// We want to expand the shortened versions of the triples passed in to
// the values used for the bitcode libraries.
static llvm::Triple getOpenMPTriple(StringRef TripleStr) {
llvm::Triple TT(TripleStr);
if (TT.getVendor() == llvm::Triple::UnknownVendor ||
TT.getOS() == llvm::Triple::UnknownOS) {
if (TT.getArch() == llvm::Triple::nvptx)
return llvm::Triple("nvptx-nvidia-cuda");
if (TT.getArch() == llvm::Triple::nvptx64)
return llvm::Triple("nvptx64-nvidia-cuda");
if (TT.getArch() == llvm::Triple::amdgcn)
return llvm::Triple("amdgcn-amd-amdhsa");
}
return TT;
}
};
/// Set a ToolChain's effective triple. Reset it when the registration object

View File

@ -552,7 +552,7 @@ def StdCLibraryFunctionArgsChecker : Checker<"StdCLibraryFunctionArgs">,
"or is EOF.">,
Dependencies<[StdCLibraryFunctionsChecker]>,
WeakDependencies<[CallAndMessageChecker, NonNullParamChecker, StreamChecker]>,
Documentation<NotDocumented>;
Documentation<HasAlphaDocumentation>;
} // end "alpha.unix"

View File

@ -3370,8 +3370,9 @@ QualType ASTContext::getBlockPointerType(QualType T) const {
/// lvalue reference to the specified type.
QualType
ASTContext::getLValueReferenceType(QualType T, bool SpelledAsLValue) const {
assert(getCanonicalType(T) != OverloadTy &&
"Unresolved overloaded function type");
assert((!T->isPlaceholderType() ||
T->isSpecificPlaceholderType(BuiltinType::UnknownAny)) &&
"Unresolved placeholder type");
// Unique pointers, to guarantee there is only one pointer of a particular
// structure.
@ -3409,6 +3410,10 @@ ASTContext::getLValueReferenceType(QualType T, bool SpelledAsLValue) const {
/// getRValueReferenceType - Return the uniqued reference to the type for an
/// rvalue reference to the specified type.
QualType ASTContext::getRValueReferenceType(QualType T) const {
assert((!T->isPlaceholderType() ||
T->isSpecificPlaceholderType(BuiltinType::UnknownAny)) &&
"Unresolved placeholder type");
// Unique pointers, to guarantee there is only one pointer of a particular
// structure.
llvm::FoldingSetNodeID ID;
@ -6099,7 +6104,8 @@ ASTContext::getNameForTemplate(TemplateName Name,
llvm_unreachable("bad template name kind!");
}
TemplateName ASTContext::getCanonicalTemplateName(TemplateName Name) const {
TemplateName
ASTContext::getCanonicalTemplateName(const TemplateName &Name) const {
switch (Name.getKind()) {
case TemplateName::QualifiedTemplate:
case TemplateName::Template: {
@ -6141,13 +6147,14 @@ TemplateName ASTContext::getCanonicalTemplateName(TemplateName Name) const {
llvm_unreachable("bad template name!");
}
bool ASTContext::hasSameTemplateName(TemplateName X, TemplateName Y) {
X = getCanonicalTemplateName(X);
Y = getCanonicalTemplateName(Y);
return X.getAsVoidPointer() == Y.getAsVoidPointer();
bool ASTContext::hasSameTemplateName(const TemplateName &X,
const TemplateName &Y) const {
return getCanonicalTemplateName(X).getAsVoidPointer() ==
getCanonicalTemplateName(Y).getAsVoidPointer();
}
bool ASTContext::isSameTemplateParameter(NamedDecl *X, NamedDecl *Y) {
bool ASTContext::isSameTemplateParameter(const NamedDecl *X,
const NamedDecl *Y) {
if (X->getKind() != Y->getKind())
return false;
@ -6198,8 +6205,8 @@ bool ASTContext::isSameTemplateParameter(NamedDecl *X, NamedDecl *Y) {
TY->getTemplateParameters());
}
bool ASTContext::isSameTemplateParameterList(TemplateParameterList *X,
TemplateParameterList *Y) {
bool ASTContext::isSameTemplateParameterList(const TemplateParameterList *X,
const TemplateParameterList *Y) {
if (X->size() != Y->size())
return false;
@ -6302,7 +6309,7 @@ static bool hasSameOverloadableAttrs(const FunctionDecl *A,
return true;
}
bool ASTContext::isSameEntity(NamedDecl *X, NamedDecl *Y) {
bool ASTContext::isSameEntity(const NamedDecl *X, const NamedDecl *Y) {
if (X == Y)
return true;
@ -6409,6 +6416,8 @@ bool ASTContext::isSameEntity(NamedDecl *X, NamedDecl *Y) {
if (getLangOpts().CPlusPlus17 && XFPT && YFPT &&
(isUnresolvedExceptionSpec(XFPT->getExceptionSpecType()) ||
isUnresolvedExceptionSpec(YFPT->getExceptionSpecType())) &&
// FIXME: We could make isSameEntity const after we make
// hasSameFunctionTypeIgnoringExceptionSpec const.
hasSameFunctionTypeIgnoringExceptionSpec(XT, YT))
return true;
return false;
@ -8286,6 +8295,11 @@ void ASTContext::getObjCEncodingForTypeImpl(QualType T, std::string &S,
*NotEncodedT = T;
return;
case Type::BitInt:
if (NotEncodedT)
*NotEncodedT = T;
return;
// We could see an undeduced auto type here during error recovery.
// Just ignore it.
case Type::Auto:
@ -8293,7 +8307,6 @@ void ASTContext::getObjCEncodingForTypeImpl(QualType T, std::string &S,
return;
case Type::Pipe:
case Type::BitInt:
#define ABSTRACT_TYPE(KIND, BASE)
#define TYPE(KIND, BASE)
#define DEPENDENT_TYPE(KIND, BASE) \

View File

@ -983,6 +983,8 @@ namespace {
discardCleanups();
}
ASTContext &getCtx() const override { return Ctx; }
void setEvaluatingDecl(APValue::LValueBase Base, APValue &Value,
EvaluatingDeclKind EDK = EvaluatingDeclKind::Ctor) {
EvaluatingDecl = Base;
@ -1116,8 +1118,6 @@ namespace {
Expr::EvalStatus &getEvalStatus() const override { return EvalStatus; }
ASTContext &getCtx() const override { return Ctx; }
// If we have a prior diagnostic, it will be noting that the expression
// isn't a constant expression. This diagnostic is more important,
// unless we require this evaluation to produce a constant expression.
@ -2216,6 +2216,19 @@ static bool CheckLValueConstantExpression(EvalInfo &Info, SourceLocation Loc,
if (!isForManglingOnly(Kind) && Var->hasAttr<DLLImportAttr>())
// FIXME: Diagnostic!
return false;
// In CUDA/HIP device compilation, only device side variables have
// constant addresses.
if (Info.getCtx().getLangOpts().CUDA &&
Info.getCtx().getLangOpts().CUDAIsDevice &&
Info.getCtx().CUDAConstantEvalCtx.NoWrongSidedVars) {
if ((!Var->hasAttr<CUDADeviceAttr>() &&
!Var->hasAttr<CUDAConstantAttr>() &&
!Var->getType()->isCUDADeviceBuiltinSurfaceType() &&
!Var->getType()->isCUDADeviceBuiltinTextureType()) ||
Var->hasAttr<HIPManagedAttr>())
return false;
}
}
if (const auto *FD = dyn_cast<const FunctionDecl>(BaseVD)) {
// __declspec(dllimport) must be handled very carefully:

View File

@ -1887,7 +1887,12 @@ void ItaniumRecordLayoutBuilder::LayoutField(const FieldDecl *D,
UnfilledBitsInLastUnit = 0;
LastBitfieldStorageUnitSize = 0;
bool FieldPacked = Packed || D->hasAttr<PackedAttr>();
llvm::Triple Target = Context.getTargetInfo().getTriple();
bool FieldPacked = (Packed && (!FieldClass || FieldClass->isPOD() ||
Context.getLangOpts().getClangABICompat() <=
LangOptions::ClangABI::Ver13 ||
Target.isPS4() || Target.isOSDarwin())) ||
D->hasAttr<PackedAttr>();
AlignRequirementKind AlignRequirement = AlignRequirementKind::None;
CharUnits FieldSize;

View File

@ -41,6 +41,21 @@ llvm::DenseMap<K, V> intersectDenseMaps(const llvm::DenseMap<K, V> &Map1,
return Result;
}
/// Returns true if and only if `Val1` is equivalent to `Val2`.
static bool equivalentValues(QualType Type, Value *Val1, Value *Val2,
Environment::ValueModel &Model) {
if (Val1 == Val2)
return true;
if (auto *IndVal1 = dyn_cast<IndirectionValue>(Val1)) {
auto *IndVal2 = cast<IndirectionValue>(Val2);
assert(IndVal1->getKind() == IndVal2->getKind());
return &IndVal1->getPointeeLoc() == &IndVal2->getPointeeLoc();
}
return Model.compareEquivalent(Type, *Val1, *Val2);
}
Environment::Environment(DataflowAnalysisContext &DACtx,
const DeclContext &DeclCtx)
: Environment(DACtx) {
@ -68,13 +83,40 @@ Environment::Environment(DataflowAnalysisContext &DACtx,
}
}
bool Environment::operator==(const Environment &Other) const {
bool Environment::equivalentTo(const Environment &Other,
Environment::ValueModel &Model) const {
assert(DACtx == Other.DACtx);
return DeclToLoc == Other.DeclToLoc && LocToVal == Other.LocToVal;
if (DeclToLoc != Other.DeclToLoc)
return false;
if (ExprToLoc != Other.ExprToLoc)
return false;
if (LocToVal.size() != Other.LocToVal.size())
return false;
for (auto &Entry : LocToVal) {
const StorageLocation *Loc = Entry.first;
assert(Loc != nullptr);
Value *Val = Entry.second;
assert(Val != nullptr);
auto It = Other.LocToVal.find(Loc);
if (It == Other.LocToVal.end())
return false;
assert(It->second != nullptr);
if (!equivalentValues(Loc->getType(), Val, It->second, Model))
return false;
}
return true;
}
LatticeJoinEffect Environment::join(const Environment &Other,
Environment::Merger &Merger) {
Environment::ValueModel &Model) {
assert(DACtx == Other.DACtx);
auto Effect = LatticeJoinEffect::Unchanged;
@ -89,8 +131,12 @@ LatticeJoinEffect Environment::join(const Environment &Other,
if (ExprToLocSizeBefore != ExprToLoc.size())
Effect = LatticeJoinEffect::Changed;
llvm::DenseMap<const StorageLocation *, Value *> MergedLocToVal;
for (auto &Entry : LocToVal) {
// Move `LocToVal` so that `Environment::ValueModel::merge` can safely assign
// values to storage locations while this code iterates over the current
// assignments.
llvm::DenseMap<const StorageLocation *, Value *> OldLocToVal =
std::move(LocToVal);
for (auto &Entry : OldLocToVal) {
const StorageLocation *Loc = Entry.first;
assert(Loc != nullptr);
@ -102,20 +148,19 @@ LatticeJoinEffect Environment::join(const Environment &Other,
continue;
assert(It->second != nullptr);
if (It->second == Val) {
MergedLocToVal.insert({Loc, Val});
if (equivalentValues(Loc->getType(), Val, It->second, Model)) {
LocToVal.insert({Loc, Val});
continue;
}
// FIXME: Consider destroying `MergedValue` immediately if `Merger::merge`
// returns false to avoid storing unneeded values in `DACtx`.
// FIXME: Consider destroying `MergedValue` immediately if
// `ValueModel::merge` returns false to avoid storing unneeded values in
// `DACtx`.
if (Value *MergedVal = createValue(Loc->getType()))
if (Merger.merge(Loc->getType(), *Val, *It->second, *MergedVal, *this))
MergedLocToVal.insert({Loc, MergedVal});
if (Model.merge(Loc->getType(), *Val, *It->second, *MergedVal, *this))
LocToVal.insert({Loc, MergedVal});
}
const unsigned LocToValSizeBefore = LocToVal.size();
LocToVal = std::move(MergedLocToVal);
if (LocToValSizeBefore != LocToVal.size())
if (OldLocToVal.size() != LocToVal.size())
Effect = LatticeJoinEffect::Changed;
return Effect;

View File

@ -12,6 +12,7 @@
//===----------------------------------------------------------------------===//
#include <memory>
#include <system_error>
#include <utility>
#include <vector>
@ -26,7 +27,7 @@
#include "llvm/ADT/DenseSet.h"
#include "llvm/ADT/None.h"
#include "llvm/ADT/Optional.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Support/Error.h"
namespace clang {
namespace dataflow {
@ -190,7 +191,7 @@ TypeErasedDataflowAnalysisState transferBlock(
return State;
}
std::vector<llvm::Optional<TypeErasedDataflowAnalysisState>>
llvm::Expected<std::vector<llvm::Optional<TypeErasedDataflowAnalysisState>>>
runTypeErasedDataflowAnalysis(const ControlFlowContext &CFCtx,
TypeErasedDataflowAnalysis &Analysis,
const Environment &InitEnv) {
@ -216,8 +217,8 @@ runTypeErasedDataflowAnalysis(const ControlFlowContext &CFCtx,
static constexpr uint32_t MaxIterations = 1 << 16;
while (const CFGBlock *Block = Worklist.dequeue()) {
if (++Iterations > MaxIterations) {
llvm::errs() << "Maximum number of iterations reached, giving up.\n";
break;
return llvm::createStringError(std::errc::timed_out,
"maximum number of iterations reached");
}
const llvm::Optional<TypeErasedDataflowAnalysisState> &OldBlockState =
@ -228,7 +229,7 @@ runTypeErasedDataflowAnalysis(const ControlFlowContext &CFCtx,
if (OldBlockState.hasValue() &&
Analysis.isEqualTypeErased(OldBlockState.getValue().Lattice,
NewBlockState.Lattice) &&
OldBlockState->Env == NewBlockState.Env) {
OldBlockState->Env.equivalentTo(NewBlockState.Env, Analysis)) {
// The state of `Block` didn't change after transfer so there's no need to
// revisit its successors.
continue;

View File

@ -138,7 +138,7 @@ bool AArch64TargetInfo::setABI(const std::string &Name) {
return true;
}
bool AArch64TargetInfo::validateBranchProtection(StringRef Spec,
bool AArch64TargetInfo::validateBranchProtection(StringRef Spec, StringRef,
BranchProtectionInfo &BPI,
StringRef &Err) const {
llvm::ARM::ParsedBranchProtection PBP;

View File

@ -70,8 +70,9 @@ class LLVM_LIBRARY_VISIBILITY AArch64TargetInfo : public TargetInfo {
StringRef getABI() const override;
bool setABI(const std::string &Name) override;
bool validateBranchProtection(StringRef, BranchProtectionInfo &,
StringRef &) const override;
bool validateBranchProtection(StringRef Spec, StringRef Arch,
BranchProtectionInfo &BPI,
StringRef &Err) const override;
bool isValidCPUName(StringRef Name) const override;
void fillValidCPUList(SmallVectorImpl<StringRef> &Values) const override;

View File

@ -371,13 +371,34 @@ bool ARMTargetInfo::setABI(const std::string &Name) {
return false;
}
bool ARMTargetInfo::validateBranchProtection(StringRef Spec,
bool ARMTargetInfo::isBranchProtectionSupportedArch(StringRef Arch) const {
llvm::ARM::ArchKind CPUArch = llvm::ARM::parseCPUArch(Arch);
if (CPUArch == llvm::ARM::ArchKind::INVALID)
CPUArch = llvm::ARM::parseArch(getTriple().getArchName());
if (CPUArch == llvm::ARM::ArchKind::INVALID)
return false;
StringRef ArchFeature = llvm::ARM::getArchName(CPUArch);
auto a =
llvm::Triple(ArchFeature, getTriple().getVendorName(),
getTriple().getOSName(), getTriple().getEnvironmentName());
StringRef SubArch = llvm::ARM::getSubArch(CPUArch);
llvm::ARM::ProfileKind Profile = llvm::ARM::parseArchProfile(SubArch);
return a.isArmT32() && (Profile == llvm::ARM::ProfileKind::M);
}
bool ARMTargetInfo::validateBranchProtection(StringRef Spec, StringRef Arch,
BranchProtectionInfo &BPI,
StringRef &Err) const {
llvm::ARM::ParsedBranchProtection PBP;
if (!llvm::ARM::parseBranchProtection(Spec, PBP, Err))
return false;
if (!isBranchProtectionSupportedArch(Arch))
return false;
BPI.SignReturnAddr =
llvm::StringSwitch<LangOptions::SignReturnAddressScopeKind>(PBP.Scope)
.Case("non-leaf", LangOptions::SignReturnAddressScopeKind::NonLeaf)

View File

@ -126,8 +126,10 @@ class LLVM_LIBRARY_VISIBILITY ARMTargetInfo : public TargetInfo {
StringRef getABI() const override;
bool setABI(const std::string &Name) override;
bool validateBranchProtection(StringRef, BranchProtectionInfo &,
StringRef &) const override;
bool isBranchProtectionSupportedArch(StringRef Arch) const override;
bool validateBranchProtection(StringRef Spec, StringRef Arch,
BranchProtectionInfo &BPI,
StringRef &Err) const override;
// FIXME: This should be based on Arch attributes, not CPU names.
bool

View File

@ -260,6 +260,7 @@ void WebAssemblyTargetInfo::adjust(DiagnosticsEngine &Diags,
if (!HasAtomics) {
Opts.POSIXThreads = false;
Opts.setThreadModel(LangOptions::ThreadModelKind::Single);
Opts.ThreadsafeStatics = false;
}
}

View File

@ -84,6 +84,7 @@
#include "llvm/Transforms/Utils/CanonicalizeAliases.h"
#include "llvm/Transforms/Utils/Debugify.h"
#include "llvm/Transforms/Utils/EntryExitInstrumenter.h"
#include "llvm/Transforms/Utils/ModuleUtils.h"
#include "llvm/Transforms/Utils/NameAnonGlobals.h"
#include "llvm/Transforms/Utils/SymbolRewriter.h"
#include <memory>
@ -1745,8 +1746,36 @@ void clang::EmbedBitcode(llvm::Module *M, const CodeGenOptions &CGOpts,
llvm::MemoryBufferRef Buf) {
if (CGOpts.getEmbedBitcode() == CodeGenOptions::Embed_Off)
return;
llvm::EmbedBitcodeInModule(
llvm::embedBitcodeInModule(
*M, Buf, CGOpts.getEmbedBitcode() != CodeGenOptions::Embed_Marker,
CGOpts.getEmbedBitcode() != CodeGenOptions::Embed_Bitcode,
CGOpts.CmdArgs);
}
void clang::EmbedObject(llvm::Module *M, const CodeGenOptions &CGOpts,
DiagnosticsEngine &Diags) {
if (CGOpts.OffloadObjects.empty())
return;
for (StringRef OffloadObject : CGOpts.OffloadObjects) {
if (OffloadObject.count(',') != 1) {
Diags.Report(Diags.getCustomDiagID(
DiagnosticsEngine::Error, "Invalid string pair for embedding '%0'"))
<< OffloadObject;
return;
}
auto FilenameAndSection = OffloadObject.split(',');
llvm::ErrorOr<std::unique_ptr<llvm::MemoryBuffer>> ObjectOrErr =
llvm::MemoryBuffer::getFileOrSTDIN(std::get<0>(FilenameAndSection));
if (std::error_code EC = ObjectOrErr.getError()) {
auto DiagID = Diags.getCustomDiagID(DiagnosticsEngine::Error,
"could not open '%0' for embedding");
Diags.Report(DiagID) << std::get<0>(FilenameAndSection);
return;
}
SmallString<128> SectionName(
{".llvm.offloading.", std::get<1>(FilenameAndSection)});
llvm::embedBufferInModule(*M, **ObjectOrErr, SectionName);
}
}

View File

@ -9777,6 +9777,18 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
return Builder.CreateCall(F, {Arg0, Arg1});
}
// Memory Operations (MOPS)
if (BuiltinID == AArch64::BI__builtin_arm_mops_memset_tag) {
Value *Dst = EmitScalarExpr(E->getArg(0));
Value *Val = EmitScalarExpr(E->getArg(1));
Value *Size = EmitScalarExpr(E->getArg(2));
Dst = Builder.CreatePointerCast(Dst, Int8PtrTy);
Val = Builder.CreateTrunc(Val, Int8Ty);
Size = Builder.CreateIntCast(Size, Int64Ty, false);
return Builder.CreateCall(
CGM.getIntrinsic(Intrinsic::aarch64_mops_memset_tag), {Dst, Val, Size});
}
// Memory Tagging Extensions (MTE) Intrinsics
Intrinsic::ID MTEIntrinsicID = Intrinsic::not_intrinsic;
switch (BuiltinID) {

View File

@ -162,7 +162,8 @@ CodeGenFunction::EmitCXXMemberDataPointerAddress(const Expr *E, Address base,
CGM.getDynamicOffsetAlignment(base.getAlignment(),
memberPtrType->getClass()->getAsCXXRecordDecl(),
memberAlign);
return Address(ptr, memberAlign);
return Address(ptr, ConvertTypeForMem(memberPtrType->getPointeeType()),
memberAlign);
}
CharUnits CodeGenModule::computeNonVirtualBaseClassOffset(

View File

@ -150,7 +150,7 @@ Address CodeGenFunction::CreateMemTemp(QualType Ty, CharUnits Align,
Result = Address(
Builder.CreateBitCast(Result.getPointer(), VectorTy->getPointerTo()),
Result.getAlignment());
VectorTy, Result.getAlignment());
}
return Result;
}

View File

@ -1834,8 +1834,8 @@ void AggExprEmitter::VisitArrayInitLoopExpr(const ArrayInitLoopExpr *E,
// at the end of each iteration.
CodeGenFunction::RunCleanupsScope CleanupsScope(CGF);
CodeGenFunction::ArrayInitLoopExprScope Scope(CGF, index);
LValue elementLV =
CGF.MakeAddrLValue(Address(element, elementAlign), elementType);
LValue elementLV = CGF.MakeAddrLValue(
Address(element, llvmElementType, elementAlign), elementType);
if (InnerLoop) {
// If the subexpression is an ArrayInitLoopExpr, share its cleanup.

View File

@ -1108,10 +1108,10 @@ void CodeGenFunction::EmitNewArrayInitializer(
StoreAnyExprIntoOneUnit(*this, ILE->getInit(i),
ILE->getInit(i)->getType(), CurPtr,
AggValueSlot::DoesNotOverlap);
CurPtr = Address(Builder.CreateInBoundsGEP(CurPtr.getElementType(),
CurPtr.getPointer(),
Builder.getSize(1),
"array.exp.next"),
CurPtr = Address(Builder.CreateInBoundsGEP(
CurPtr.getElementType(), CurPtr.getPointer(),
Builder.getSize(1), "array.exp.next"),
CurPtr.getElementType(),
StartAlign.alignmentAtOffset((i + 1) * ElementSize));
}

View File

@ -851,6 +851,7 @@ bool ConstStructBuilder::Build(const APValue &Val, const RecordDecl *RD,
}
llvm::Constant *ConstStructBuilder::Finalize(QualType Type) {
Type = Type.getNonReferenceType();
RecordDecl *RD = Type->castAs<RecordType>()->getDecl();
llvm::Type *ValTy = CGM.getTypes().ConvertType(Type);
return Builder.build(ValTy, RD->hasFlexibleArrayMember());

View File

@ -1125,20 +1125,25 @@ void CGOpenMPRuntimeGPU::createOffloadEntry(llvm::Constant *ID,
llvm::GlobalValue::LinkageTypes) {
// TODO: Add support for global variables on the device after declare target
// support.
if (!isa<llvm::Function>(Addr))
llvm::Function *Fn = dyn_cast<llvm::Function>(Addr);
if (!Fn)
return;
llvm::Module &M = CGM.getModule();
llvm::LLVMContext &Ctx = CGM.getLLVMContext();
// Get "nvvm.annotations" metadata node
// Get "nvvm.annotations" metadata node.
llvm::NamedMDNode *MD = M.getOrInsertNamedMetadata("nvvm.annotations");
llvm::Metadata *MDVals[] = {
llvm::ConstantAsMetadata::get(Addr), llvm::MDString::get(Ctx, "kernel"),
llvm::ConstantAsMetadata::get(Fn), llvm::MDString::get(Ctx, "kernel"),
llvm::ConstantAsMetadata::get(
llvm::ConstantInt::get(llvm::Type::getInt32Ty(Ctx), 1))};
// Append metadata to nvvm.annotations
// Append metadata to nvvm.annotations.
MD->addOperand(llvm::MDNode::get(Ctx, MDVals));
// Add a function attribute for the kernel.
Fn->addFnAttr(llvm::Attribute::get(Ctx, "kernel"));
}
void CGOpenMPRuntimeGPU::emitTargetOutlinedFunction(
@ -1198,7 +1203,8 @@ CGOpenMPRuntimeGPU::CGOpenMPRuntimeGPU(CodeGenModule &CGM)
llvm_unreachable("OpenMP can only handle device code.");
llvm::OpenMPIRBuilder &OMPBuilder = getOMPBuilder();
if (CGM.getLangOpts().OpenMPTargetNewRuntime) {
if (CGM.getLangOpts().OpenMPTargetNewRuntime &&
!CGM.getLangOpts().OMPHostIRFile.empty()) {
OMPBuilder.createGlobalFlag(CGM.getLangOpts().OpenMPTargetDebug,
"__omp_rtl_debug_kind");
OMPBuilder.createGlobalFlag(CGM.getLangOpts().OpenMPTeamSubscription,

View File

@ -385,7 +385,7 @@ void CodeGenFunction::EmitStmt(const Stmt *S, ArrayRef<const Attr *> Attrs) {
cast<OMPTargetTeamsDistributeSimdDirective>(*S));
break;
case Stmt::OMPInteropDirectiveClass:
llvm_unreachable("Interop directive not supported yet.");
EmitOMPInteropDirective(cast<OMPInteropDirective>(*S));
break;
case Stmt::OMPDispatchDirectiveClass:
llvm_unreachable("Dispatch directive not supported yet.");

View File

@ -30,6 +30,7 @@
#include "llvm/IR/Constants.h"
#include "llvm/IR/DebugInfoMetadata.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/IntrinsicInst.h"
#include "llvm/IR/Metadata.h"
#include "llvm/Support/AtomicOrdering.h"
using namespace clang;
@ -6568,6 +6569,60 @@ void CodeGenFunction::EmitOMPTeamsDistributeParallelForSimdDirective(
[](CodeGenFunction &) { return nullptr; });
}
void CodeGenFunction::EmitOMPInteropDirective(const OMPInteropDirective &S) {
llvm::OpenMPIRBuilder &OMPBuilder = CGM.getOpenMPRuntime().getOMPBuilder();
llvm::Value *Device = nullptr;
if (const auto *C = S.getSingleClause<OMPDeviceClause>())
Device = EmitScalarExpr(C->getDevice());
llvm::Value *NumDependences = nullptr;
llvm::Value *DependenceAddress = nullptr;
if (const auto *DC = S.getSingleClause<OMPDependClause>()) {
OMPTaskDataTy::DependData Dependencies(DC->getDependencyKind(),
DC->getModifier());
Dependencies.DepExprs.append(DC->varlist_begin(), DC->varlist_end());
std::pair<llvm::Value *, Address> DependencePair =
CGM.getOpenMPRuntime().emitDependClause(*this, Dependencies,
DC->getBeginLoc());
NumDependences = DependencePair.first;
DependenceAddress = Builder.CreatePointerCast(
DependencePair.second.getPointer(), CGM.Int8PtrTy);
}
assert(!(S.hasClausesOfKind<OMPNowaitClause>() &&
!(S.getSingleClause<OMPInitClause>() ||
S.getSingleClause<OMPDestroyClause>() ||
S.getSingleClause<OMPUseClause>())) &&
"OMPNowaitClause clause is used separately in OMPInteropDirective.");
if (const auto *C = S.getSingleClause<OMPInitClause>()) {
llvm::Value *InteropvarPtr =
EmitLValue(C->getInteropVar()).getPointer(*this);
llvm::omp::OMPInteropType InteropType = llvm::omp::OMPInteropType::Unknown;
if (C->getIsTarget()) {
InteropType = llvm::omp::OMPInteropType::Target;
} else {
assert(C->getIsTargetSync() && "Expected interop-type target/targetsync");
InteropType = llvm::omp::OMPInteropType::TargetSync;
}
OMPBuilder.createOMPInteropInit(Builder, InteropvarPtr, InteropType, Device,
NumDependences, DependenceAddress,
S.hasClausesOfKind<OMPNowaitClause>());
} else if (const auto *C = S.getSingleClause<OMPDestroyClause>()) {
llvm::Value *InteropvarPtr =
EmitLValue(C->getInteropVar()).getPointer(*this);
OMPBuilder.createOMPInteropDestroy(Builder, InteropvarPtr, Device,
NumDependences, DependenceAddress,
S.hasClausesOfKind<OMPNowaitClause>());
} else if (const auto *C = S.getSingleClause<OMPUseClause>()) {
llvm::Value *InteropvarPtr =
EmitLValue(C->getInteropVar()).getPointer(*this);
OMPBuilder.createOMPInteropUse(Builder, InteropvarPtr, Device,
NumDependences, DependenceAddress,
S.hasClausesOfKind<OMPNowaitClause>());
}
}
static void emitTargetTeamsDistributeParallelForRegion(
CodeGenFunction &CGF, const OMPTargetTeamsDistributeParallelForDirective &S,
PrePostActionTy &Action) {

View File

@ -1134,6 +1134,7 @@ void CodeGenAction::ExecuteAction() {
TheModule->setTargetTriple(TargetOpts.Triple);
}
EmbedObject(TheModule.get(), CodeGenOpts, Diagnostics);
EmbedBitcode(TheModule.get(), CodeGenOpts, *MainFile);
LLVMContext &Ctx = TheModule->getContext();

View File

@ -3562,6 +3562,7 @@ class CodeGenFunction : public CodeGenTypeCache {
void EmitOMPTargetTeamsDistributeSimdDirective(
const OMPTargetTeamsDistributeSimdDirective &S);
void EmitOMPGenericLoopDirective(const OMPGenericLoopDirective &S);
void EmitOMPInteropDirective(const OMPInteropDirective &S);
/// Emit device code for the target directive.
static void EmitOMPTargetDeviceFunction(CodeGenModule &CGM,

View File

@ -731,6 +731,7 @@ void CodeGenModule::Release() {
"tag-stack-memory-buildattr", 1);
if (Arch == llvm::Triple::thumb || Arch == llvm::Triple::thumbeb ||
Arch == llvm::Triple::arm || Arch == llvm::Triple::armeb ||
Arch == llvm::Triple::aarch64 || Arch == llvm::Triple::aarch64_32 ||
Arch == llvm::Triple::aarch64_be) {
getModule().addModuleFlag(llvm::Module::Error, "branch-target-enforcement",
@ -742,11 +743,9 @@ void CodeGenModule::Release() {
getModule().addModuleFlag(llvm::Module::Error, "sign-return-address-all",
LangOpts.isSignReturnAddressScopeAll());
if (Arch != llvm::Triple::thumb && Arch != llvm::Triple::thumbeb) {
getModule().addModuleFlag(llvm::Module::Error,
"sign-return-address-with-bkey",
!LangOpts.isSignReturnAddressWithAKey());
}
getModule().addModuleFlag(llvm::Module::Error,
"sign-return-address-with-bkey",
!LangOpts.isSignReturnAddressWithAKey());
}
if (!CodeGenOpts.MemoryProfileOutput.empty()) {

View File

@ -394,13 +394,6 @@ class CodeGenModule : public CodeGenTypeCache {
llvm::MapVector<GlobalDecl, StringRef> MangledDeclNames;
llvm::StringMap<GlobalDecl, llvm::BumpPtrAllocator> Manglings;
// An ordered map of canonical GlobalDecls paired with the cpu-index for
// cpu-specific name manglings.
llvm::MapVector<std::pair<GlobalDecl, unsigned>, StringRef>
CPUSpecificMangledDeclNames;
llvm::StringMap<std::pair<GlobalDecl, unsigned>, llvm::BumpPtrAllocator>
CPUSpecificManglings;
/// Global annotations.
std::vector<llvm::Constant*> Annotations;

View File

@ -5563,8 +5563,8 @@ class AArch64TargetCodeGenInfo : public TargetCodeGenInfo {
TargetInfo::BranchProtectionInfo BPI;
StringRef Error;
(void)CGM.getTarget().validateBranchProtection(Attr.BranchProtection,
BPI, Error);
(void)CGM.getTarget().validateBranchProtection(
Attr.BranchProtection, Attr.Architecture, BPI, Error);
assert(Error.empty());
auto *Fn = cast<llvm::Function>(GV);
@ -6377,17 +6377,36 @@ class ARMTargetCodeGenInfo : public TargetCodeGenInfo {
if (!Attr.BranchProtection.empty()) {
TargetInfo::BranchProtectionInfo BPI;
StringRef DiagMsg;
(void)CGM.getTarget().validateBranchProtection(Attr.BranchProtection,
BPI, DiagMsg);
StringRef Arch = Attr.Architecture.empty()
? CGM.getTarget().getTargetOpts().CPU
: Attr.Architecture;
if (!CGM.getTarget().validateBranchProtection(Attr.BranchProtection,
Arch, BPI, DiagMsg)) {
CGM.getDiags().Report(
D->getLocation(),
diag::warn_target_unsupported_branch_protection_attribute)
<< Arch;
} else {
static const char *SignReturnAddrStr[] = {"none", "non-leaf", "all"};
assert(static_cast<unsigned>(BPI.SignReturnAddr) <= 2 &&
"Unexpected SignReturnAddressScopeKind");
Fn->addFnAttr(
"sign-return-address",
SignReturnAddrStr[static_cast<int>(BPI.SignReturnAddr)]);
static const char *SignReturnAddrStr[] = {"none", "non-leaf", "all"};
assert(static_cast<unsigned>(BPI.SignReturnAddr) <= 2 &&
"Unexpected SignReturnAddressScopeKind");
Fn->addFnAttr("sign-return-address",
SignReturnAddrStr[static_cast<int>(BPI.SignReturnAddr)]);
Fn->addFnAttr("branch-target-enforcement",
BPI.BranchTargetEnforcement ? "true" : "false");
Fn->addFnAttr("branch-target-enforcement",
BPI.BranchTargetEnforcement ? "true" : "false");
}
} else if (CGM.getLangOpts().BranchTargetEnforcement ||
CGM.getLangOpts().hasSignReturnAddress()) {
// If the Branch Protection attribute is missing, validate the target
// Architecture attribute against Branch Protection command line
// settings.
if (!CGM.getTarget().isBranchProtectionSupportedArch(Attr.Architecture))
CGM.getDiags().Report(
D->getLocation(),
diag::warn_target_unsupported_branch_protection_attribute)
<< Attr.Architecture;
}
}
@ -8285,12 +8304,14 @@ class AVRTargetCodeGenInfo : public TargetCodeGenInfo {
// Check if global/static variable is defined in address space
// 1~6 (__flash, __flash1, __flash2, __flash3, __flash4, __flash5)
// but not constant.
LangAS AS = D->getType().getAddressSpace();
if (isTargetAddressSpace(AS) && 1 <= toTargetAddressSpace(AS) &&
toTargetAddressSpace(AS) <= 6 && !D->getType().isConstQualified())
CGM.getDiags().Report(D->getLocation(),
diag::err_verify_nonconst_addrspace)
<< "__flash*";
if (D) {
LangAS AS = D->getType().getAddressSpace();
if (isTargetAddressSpace(AS) && 1 <= toTargetAddressSpace(AS) &&
toTargetAddressSpace(AS) <= 6 && !D->getType().isConstQualified())
CGM.getDiags().Report(D->getLocation(),
diag::err_verify_nonconst_addrspace)
<< "__flash*";
}
return TargetCodeGenInfo::getGlobalVarAddressSpace(CGM, D);
}

View File

@ -43,6 +43,8 @@ const char *Action::getClassName(ActionClass AC) {
return "clang-offload-unbundler";
case OffloadWrapperJobClass:
return "clang-offload-wrapper";
case LinkerWrapperJobClass:
return "clang-linker-wrapper";
case StaticLibJobClass:
return "static-lib-linker";
}
@ -418,6 +420,12 @@ OffloadWrapperJobAction::OffloadWrapperJobAction(ActionList &Inputs,
types::ID Type)
: JobAction(OffloadWrapperJobClass, Inputs, Type) {}
void LinkerWrapperJobAction::anchor() {}
LinkerWrapperJobAction::LinkerWrapperJobAction(ActionList &Inputs,
types::ID Type)
: JobAction(LinkerWrapperJobClass, Inputs, Type) {}
void StaticLibJobAction::anchor() {}
StaticLibJobAction::StaticLibJobAction(ActionList &Inputs, types::ID Type)

View File

@ -103,39 +103,58 @@ using namespace clang;
using namespace llvm::opt;
static llvm::Optional<llvm::Triple>
getHIPOffloadTargetTriple(const Driver &D, const ArgList &Args) {
if (Args.hasArg(options::OPT_offload_EQ)) {
auto HIPOffloadTargets = Args.getAllArgValues(options::OPT_offload_EQ);
getOffloadTargetTriple(const Driver &D, const ArgList &Args) {
auto OffloadTargets = Args.getAllArgValues(options::OPT_offload_EQ);
// Offload compilation flow does not support multiple targets for now. We
// need the HIPActionBuilder (and possibly the CudaActionBuilder{,Base}too)
// to support multiple tool chains first.
switch (OffloadTargets.size()) {
default:
D.Diag(diag::err_drv_only_one_offload_target_supported);
return llvm::None;
case 0:
D.Diag(diag::err_drv_invalid_or_unsupported_offload_target) << "";
return llvm::None;
case 1:
break;
}
return llvm::Triple(OffloadTargets[0]);
}
// HIP compilation flow does not support multiple targets for now. We need
// the HIPActionBuilder (and possibly the CudaActionBuilder{,Base}too) to
// support multiple tool chains first.
switch (HIPOffloadTargets.size()) {
default:
D.Diag(diag::err_drv_only_one_offload_target_supported_in) << "HIP";
return llvm::None;
case 0:
D.Diag(diag::err_drv_invalid_or_unsupported_offload_target) << "";
return llvm::None;
case 1:
break;
}
llvm::Triple TT(HIPOffloadTargets[0]);
if (TT.getArch() == llvm::Triple::amdgcn &&
TT.getVendor() == llvm::Triple::AMD &&
TT.getOS() == llvm::Triple::AMDHSA)
static llvm::Optional<llvm::Triple>
getNVIDIAOffloadTargetTriple(const Driver &D, const ArgList &Args,
const llvm::Triple &HostTriple) {
if (!Args.hasArg(options::OPT_offload_EQ)) {
return llvm::Triple(HostTriple.isArch64Bit() ? "nvptx64-nvidia-cuda"
: "nvptx-nvidia-cuda");
}
auto TT = getOffloadTargetTriple(D, Args);
if (TT && (TT->getArch() == llvm::Triple::spirv32 ||
TT->getArch() == llvm::Triple::spirv64)) {
if (Args.hasArg(options::OPT_emit_llvm))
return TT;
if (TT.getArch() == llvm::Triple::spirv64 &&
TT.getVendor() == llvm::Triple::UnknownVendor &&
TT.getOS() == llvm::Triple::UnknownOS)
return TT;
D.Diag(diag::err_drv_invalid_or_unsupported_offload_target)
<< HIPOffloadTargets[0];
D.Diag(diag::err_drv_cuda_offload_only_emit_bc);
return llvm::None;
}
static const llvm::Triple T("amdgcn-amd-amdhsa"); // Default HIP triple.
return T;
D.Diag(diag::err_drv_invalid_or_unsupported_offload_target) << TT->str();
return llvm::None;
}
static llvm::Optional<llvm::Triple>
getHIPOffloadTargetTriple(const Driver &D, const ArgList &Args) {
if (!Args.hasArg(options::OPT_offload_EQ)) {
return llvm::Triple("amdgcn-amd-amdhsa"); // Default HIP triple.
}
auto TT = getOffloadTargetTriple(D, Args);
if (!TT)
return llvm::None;
if (TT->getArch() == llvm::Triple::amdgcn &&
TT->getVendor() == llvm::Triple::AMD &&
TT->getOS() == llvm::Triple::AMDHSA)
return TT;
if (TT->getArch() == llvm::Triple::spirv64)
return TT;
D.Diag(diag::err_drv_invalid_or_unsupported_offload_target) << TT->str();
return llvm::None;
}
// static
@ -719,17 +738,17 @@ void Driver::CreateOffloadingDeviceToolChains(Compilation &C,
if (IsCuda) {
const ToolChain *HostTC = C.getSingleOffloadToolChain<Action::OFK_Host>();
const llvm::Triple &HostTriple = HostTC->getTriple();
StringRef DeviceTripleStr;
auto OFK = Action::OFK_Cuda;
DeviceTripleStr =
HostTriple.isArch64Bit() ? "nvptx64-nvidia-cuda" : "nvptx-nvidia-cuda";
llvm::Triple CudaTriple(DeviceTripleStr);
auto CudaTriple =
getNVIDIAOffloadTargetTriple(*this, C.getInputArgs(), HostTriple);
if (!CudaTriple)
return;
// Use the CUDA and host triples as the key into the ToolChains map,
// because the device toolchain we create depends on both.
auto &CudaTC = ToolChains[CudaTriple.str() + "/" + HostTriple.str()];
auto &CudaTC = ToolChains[CudaTriple->str() + "/" + HostTriple.str()];
if (!CudaTC) {
CudaTC = std::make_unique<toolchains::CudaToolChain>(
*this, CudaTriple, *HostTC, C.getInputArgs(), OFK);
*this, *CudaTriple, *HostTC, C.getInputArgs(), OFK);
}
C.addOffloadDeviceToolChain(CudaTC.get(), OFK);
} else if (IsHIP) {
@ -773,21 +792,9 @@ void Driver::CreateOffloadingDeviceToolChains(Compilation &C,
if (HasValidOpenMPRuntime) {
llvm::StringMap<const char *> FoundNormalizedTriples;
for (const char *Val : OpenMPTargets->getValues()) {
llvm::Triple TT(Val);
llvm::Triple TT(ToolChain::getOpenMPTriple(Val));
std::string NormalizedName = TT.normalize();
// We want to expand the shortened versions of the triples passed in to
// the values used for the bitcode libraries for convenience.
if (TT.getVendor() == llvm::Triple::UnknownVendor ||
TT.getOS() == llvm::Triple::UnknownOS) {
if (TT.getArch() == llvm::Triple::nvptx)
TT = llvm::Triple("nvptx-nvidia-cuda");
else if (TT.getArch() == llvm::Triple::nvptx64)
TT = llvm::Triple("nvptx64-nvidia-cuda");
else if (TT.getArch() == llvm::Triple::amdgcn)
TT = llvm::Triple("amdgcn-amd-amdhsa");
}
// Make sure we don't have a duplicate triple.
auto Duplicate = FoundNormalizedTriples.find(NormalizedName);
if (Duplicate != FoundNormalizedTriples.end()) {
@ -3823,6 +3830,11 @@ void Driver::BuildActions(Compilation &C, DerivedArgList &Args,
// Builder to be used to build offloading actions.
OffloadingActionBuilder OffloadBuilder(C, Args, Inputs);
// Offload kinds active for this compilation.
unsigned OffloadKinds = Action::OFK_None;
if (C.hasOffloadToolChain<Action::OFK_OpenMP>())
OffloadKinds |= Action::OFK_OpenMP;
// Construct the actions to perform.
HeaderModulePrecompileJobAction *HeaderModuleAction = nullptr;
ActionList LinkerInputs;
@ -3843,14 +3855,16 @@ void Driver::BuildActions(Compilation &C, DerivedArgList &Args,
// Use the current host action in any of the offloading actions, if
// required.
if (OffloadBuilder.addHostDependenceToDeviceActions(Current, InputArg))
break;
if (!Args.hasArg(options::OPT_fopenmp_new_driver))
if (OffloadBuilder.addHostDependenceToDeviceActions(Current, InputArg))
break;
for (phases::ID Phase : PL) {
// Add any offload action the host action depends on.
Current = OffloadBuilder.addDeviceDependencesToHostAction(
Current, InputArg, Phase, PL.back(), FullPL);
if (!Args.hasArg(options::OPT_fopenmp_new_driver))
Current = OffloadBuilder.addDeviceDependencesToHostAction(
Current, InputArg, Phase, PL.back(), FullPL);
if (!Current)
break;
@ -3883,6 +3897,11 @@ void Driver::BuildActions(Compilation &C, DerivedArgList &Args,
break;
}
// Try to build the offloading actions and add the result as a dependency
// to the host.
if (Args.hasArg(options::OPT_fopenmp_new_driver))
Current = BuildOffloadingActions(C, Args, I, Current);
// FIXME: Should we include any prior module file outputs as inputs of
// later actions in the same command line?
@ -3900,8 +3919,9 @@ void Driver::BuildActions(Compilation &C, DerivedArgList &Args,
// Use the current host action in any of the offloading actions, if
// required.
if (OffloadBuilder.addHostDependenceToDeviceActions(Current, InputArg))
break;
if (!Args.hasArg(options::OPT_fopenmp_new_driver))
if (OffloadBuilder.addHostDependenceToDeviceActions(Current, InputArg))
break;
if (Current->getType() == types::TY_Nothing)
break;
@ -3912,7 +3932,11 @@ void Driver::BuildActions(Compilation &C, DerivedArgList &Args,
Actions.push_back(Current);
// Add any top level actions generated for offloading.
OffloadBuilder.appendTopLevelActions(Actions, Current, InputArg);
if (!Args.hasArg(options::OPT_fopenmp_new_driver))
OffloadBuilder.appendTopLevelActions(Actions, Current, InputArg);
else if (Current)
Current->propagateHostOffloadInfo(OffloadKinds,
/*BoundArch=*/nullptr);
}
// Add a link action if necessary.
@ -3924,16 +3948,23 @@ void Driver::BuildActions(Compilation &C, DerivedArgList &Args,
}
if (!LinkerInputs.empty()) {
if (Action *Wrapper = OffloadBuilder.makeHostLinkAction())
LinkerInputs.push_back(Wrapper);
if (!Args.hasArg(options::OPT_fopenmp_new_driver))
if (Action *Wrapper = OffloadBuilder.makeHostLinkAction())
LinkerInputs.push_back(Wrapper);
Action *LA;
// Check if this Linker Job should emit a static library.
if (ShouldEmitStaticLibrary(Args)) {
LA = C.MakeAction<StaticLibJobAction>(LinkerInputs, types::TY_Image);
} else if (Args.hasArg(options::OPT_fopenmp_new_driver) &&
OffloadKinds != Action::OFK_None) {
LA = C.MakeAction<LinkerWrapperJobAction>(LinkerInputs, types::TY_Image);
LA->propagateHostOffloadInfo(OffloadKinds,
/*BoundArch=*/nullptr);
} else {
LA = C.MakeAction<LinkJobAction>(LinkerInputs, types::TY_Image);
}
LA = OffloadBuilder.processHostLinkAction(LA);
if (!Args.hasArg(options::OPT_fopenmp_new_driver))
LA = OffloadBuilder.processHostLinkAction(LA);
Actions.push_back(LA);
}
@ -4019,6 +4050,68 @@ void Driver::BuildActions(Compilation &C, DerivedArgList &Args,
Args.ClaimAllArgs(options::OPT_cuda_compile_host_device);
}
Action *Driver::BuildOffloadingActions(Compilation &C,
llvm::opt::DerivedArgList &Args,
const InputTy &Input,
Action *HostAction) const {
if (!isa<CompileJobAction>(HostAction))
return HostAction;
SmallVector<const ToolChain *, 2> ToolChains;
ActionList DeviceActions;
types::ID InputType = Input.first;
const Arg *InputArg = Input.second;
auto OpenMPTCRange = C.getOffloadToolChains<Action::OFK_OpenMP>();
for (auto TI = OpenMPTCRange.first, TE = OpenMPTCRange.second; TI != TE; ++TI)
ToolChains.push_back(TI->second);
for (unsigned I = 0; I < ToolChains.size(); ++I)
DeviceActions.push_back(C.MakeAction<InputAction>(*InputArg, InputType));
if (DeviceActions.empty())
return HostAction;
auto PL = types::getCompilationPhases(*this, Args, InputType);
for (phases::ID Phase : PL) {
if (Phase == phases::Link) {
assert(Phase == PL.back() && "linking must be final compilation step.");
break;
}
auto TC = ToolChains.begin();
for (Action *&A : DeviceActions) {
A = ConstructPhaseAction(C, Args, Phase, A, Action::OFK_OpenMP);
if (isa<CompileJobAction>(A)) {
HostAction->setCannotBeCollapsedWithNextDependentAction();
OffloadAction::HostDependence HDep(
*HostAction, *C.getSingleOffloadToolChain<Action::OFK_Host>(),
/*BourdArch=*/nullptr, Action::OFK_OpenMP);
OffloadAction::DeviceDependences DDep;
DDep.add(*A, **TC, /*BoundArch=*/nullptr, Action::OFK_OpenMP);
A = C.MakeAction<OffloadAction>(HDep, DDep);
}
++TC;
}
}
OffloadAction::DeviceDependences DDeps;
auto TC = ToolChains.begin();
for (Action *A : DeviceActions) {
DDeps.add(*A, **TC, /*BoundArch=*/nullptr, Action::OFK_OpenMP);
TC++;
}
OffloadAction::HostDependence HDep(
*HostAction, *C.getSingleOffloadToolChain<Action::OFK_Host>(),
/*BoundArch=*/nullptr, DDeps);
return C.MakeAction<OffloadAction>(HDep, DDeps);
}
Action *Driver::ConstructPhaseAction(
Compilation &C, const ArgList &Args, phases::ID Phase, Action *Input,
Action::OffloadKind TargetDeviceOffloadKind) const {
@ -4110,6 +4203,12 @@ Action *Driver::ConstructPhaseAction(
Args.hasArg(options::OPT_S) ? types::TY_LTO_IR : types::TY_LTO_BC;
return C.MakeAction<BackendJobAction>(Input, Output);
}
if (isUsingLTO(/* IsOffload */ true) &&
TargetDeviceOffloadKind == Action::OFK_OpenMP) {
types::ID Output =
Args.hasArg(options::OPT_S) ? types::TY_LTO_IR : types::TY_LTO_BC;
return C.MakeAction<BackendJobAction>(Input, Output);
}
if (Args.hasArg(options::OPT_emit_llvm) ||
(TargetDeviceOffloadKind == Action::OFK_HIP &&
Args.hasFlag(options::OPT_fgpu_rdc, options::OPT_fno_gpu_rdc,
@ -4181,7 +4280,7 @@ void Driver::BuildJobs(Compilation &C) const {
ArchNames.insert(A->getValue());
// Set of (Action, canonical ToolChain triple) pairs we've built jobs for.
std::map<std::pair<const Action *, std::string>, InputInfo> CachedResults;
std::map<std::pair<const Action *, std::string>, InputInfoList> CachedResults;
for (Action *A : C.getActions()) {
// If we are linking an image for multiple archs then the linker wants
// -arch_multiple and -final_output <final image name>. Unfortunately, this
@ -4638,10 +4737,11 @@ static std::string GetTriplePlusArchString(const ToolChain *TC,
return TriplePlusArch;
}
InputInfo Driver::BuildJobsForAction(
InputInfoList Driver::BuildJobsForAction(
Compilation &C, const Action *A, const ToolChain *TC, StringRef BoundArch,
bool AtTopLevel, bool MultipleArchs, const char *LinkingOutput,
std::map<std::pair<const Action *, std::string>, InputInfo> &CachedResults,
std::map<std::pair<const Action *, std::string>, InputInfoList>
&CachedResults,
Action::OffloadKind TargetDeviceOffloadKind) const {
std::pair<const Action *, std::string> ActionTC = {
A, GetTriplePlusArchString(TC, BoundArch, TargetDeviceOffloadKind)};
@ -4649,17 +4749,18 @@ InputInfo Driver::BuildJobsForAction(
if (CachedResult != CachedResults.end()) {
return CachedResult->second;
}
InputInfo Result = BuildJobsForActionNoCache(
InputInfoList Result = BuildJobsForActionNoCache(
C, A, TC, BoundArch, AtTopLevel, MultipleArchs, LinkingOutput,
CachedResults, TargetDeviceOffloadKind);
CachedResults[ActionTC] = Result;
return Result;
}
InputInfo Driver::BuildJobsForActionNoCache(
InputInfoList Driver::BuildJobsForActionNoCache(
Compilation &C, const Action *A, const ToolChain *TC, StringRef BoundArch,
bool AtTopLevel, bool MultipleArchs, const char *LinkingOutput,
std::map<std::pair<const Action *, std::string>, InputInfo> &CachedResults,
std::map<std::pair<const Action *, std::string>, InputInfoList>
&CachedResults,
Action::OffloadKind TargetDeviceOffloadKind) const {
llvm::PrettyStackTraceString CrashInfo("Building compilation jobs");
@ -4697,7 +4798,7 @@ InputInfo Driver::BuildJobsForActionNoCache(
// If there is a single device option, just generate the job for it.
if (OA->hasSingleDeviceDependence()) {
InputInfo DevA;
InputInfoList DevA;
OA->doOnEachDeviceDependence([&](Action *DepA, const ToolChain *DepTC,
const char *DepBoundArch) {
DevA =
@ -4715,7 +4816,7 @@ InputInfo Driver::BuildJobsForActionNoCache(
OA->doOnEachDependence(
/*IsHostDependence=*/BuildingForOffloadDevice,
[&](Action *DepA, const ToolChain *DepTC, const char *DepBoundArch) {
OffloadDependencesInputInfo.push_back(BuildJobsForAction(
OffloadDependencesInputInfo.append(BuildJobsForAction(
C, DepA, DepTC, DepBoundArch, /*AtTopLevel=*/false,
/*MultipleArchs*/ !!DepBoundArch, LinkingOutput, CachedResults,
DepA->getOffloadingDeviceKind()));
@ -4724,6 +4825,17 @@ InputInfo Driver::BuildJobsForActionNoCache(
A = BuildingForOffloadDevice
? OA->getSingleDeviceDependence(/*DoNotConsiderHostActions=*/true)
: OA->getHostDependence();
// We may have already built this action as a part of the offloading
// toolchain, return the cached input if so.
std::pair<const Action *, std::string> ActionTC = {
OA->getHostDependence(),
GetTriplePlusArchString(TC, BoundArch, TargetDeviceOffloadKind)};
if (CachedResults.find(ActionTC) != CachedResults.end()) {
InputInfoList Inputs = CachedResults[ActionTC];
Inputs.append(OffloadDependencesInputInfo);
return Inputs;
}
}
if (const InputAction *IA = dyn_cast<InputAction>(A)) {
@ -4733,9 +4845,9 @@ InputInfo Driver::BuildJobsForActionNoCache(
Input.claim();
if (Input.getOption().matches(options::OPT_INPUT)) {
const char *Name = Input.getValue();
return InputInfo(A, Name, /* _BaseInput = */ Name);
return {InputInfo(A, Name, /* _BaseInput = */ Name)};
}
return InputInfo(A, &Input, /* _BaseInput = */ "");
return {InputInfo(A, &Input, /* _BaseInput = */ "")};
}
if (const BindArchAction *BAA = dyn_cast<BindArchAction>(A)) {
@ -4765,7 +4877,7 @@ InputInfo Driver::BuildJobsForActionNoCache(
const Tool *T = TS.getTool(Inputs, CollapsedOffloadActions);
if (!T)
return InputInfo();
return {InputInfo()};
if (BuildingForOffloadDevice &&
A->getOffloadingDeviceKind() == Action::OFK_OpenMP) {
@ -4792,7 +4904,7 @@ InputInfo Driver::BuildJobsForActionNoCache(
cast<OffloadAction>(OA)->doOnEachDependence(
/*IsHostDependence=*/BuildingForOffloadDevice,
[&](Action *DepA, const ToolChain *DepTC, const char *DepBoundArch) {
OffloadDependencesInputInfo.push_back(BuildJobsForAction(
OffloadDependencesInputInfo.append(BuildJobsForAction(
C, DepA, DepTC, DepBoundArch, /* AtTopLevel */ false,
/*MultipleArchs=*/!!DepBoundArch, LinkingOutput, CachedResults,
DepA->getOffloadingDeviceKind()));
@ -4806,7 +4918,7 @@ InputInfo Driver::BuildJobsForActionNoCache(
// FIXME: Clean this up.
bool SubJobAtTopLevel =
AtTopLevel && (isa<DsymutilJobAction>(A) || isa<VerifyJobAction>(A));
InputInfos.push_back(BuildJobsForAction(
InputInfos.append(BuildJobsForAction(
C, Input, TC, BoundArch, SubJobAtTopLevel, MultipleArchs, LinkingOutput,
CachedResults, A->getOffloadingDeviceKind()));
}
@ -4890,8 +5002,8 @@ InputInfo Driver::BuildJobsForActionNoCache(
Arch = BoundArch;
CachedResults[{A, GetTriplePlusArchString(UI.DependentToolChain, Arch,
UI.DependentOffloadKind)}] =
CurI;
UI.DependentOffloadKind)}] = {
CurI};
}
// Now that we have all the results generated, select the one that should be
@ -4900,9 +5012,9 @@ InputInfo Driver::BuildJobsForActionNoCache(
A, GetTriplePlusArchString(TC, BoundArch, TargetDeviceOffloadKind)};
assert(CachedResults.find(ActionTC) != CachedResults.end() &&
"Result does not exist??");
Result = CachedResults[ActionTC];
Result = CachedResults[ActionTC].front();
} else if (JA->getType() == types::TY_Nothing)
Result = InputInfo(A, BaseInput);
Result = {InputInfo(A, BaseInput)};
else {
// We only have to generate a prefix for the host if this is not a top-level
// action.
@ -4955,7 +5067,7 @@ InputInfo Driver::BuildJobsForActionNoCache(
C.getArgsForToolChain(TC, BoundArch, JA->getOffloadingDeviceKind()),
LinkingOutput);
}
return Result;
return {Result};
}
const char *Driver::getDefaultImageName() const {

View File

@ -327,6 +327,12 @@ Tool *ToolChain::getOffloadWrapper() const {
return OffloadWrapper.get();
}
Tool *ToolChain::getLinkerWrapper() const {
if (!LinkerWrapper)
LinkerWrapper.reset(new tools::LinkerWrapper(*this, getLink()));
return LinkerWrapper.get();
}
Tool *ToolChain::getTool(Action::ActionClass AC) const {
switch (AC) {
case Action::AssembleJobClass:
@ -365,6 +371,8 @@ Tool *ToolChain::getTool(Action::ActionClass AC) const {
case Action::OffloadWrapperJobClass:
return getOffloadWrapper();
case Action::LinkerWrapperJobClass:
return getLinkerWrapper();
}
llvm_unreachable("Invalid tool kind.");
@ -1129,8 +1137,10 @@ llvm::opt::DerivedArgList *ToolChain::TranslateOpenMPTargetArgs(
A->getOption().matches(options::OPT_Xopenmp_target);
if (A->getOption().matches(options::OPT_Xopenmp_target_EQ)) {
llvm::Triple TT(getOpenMPTriple(A->getValue(0)));
// Passing device args: -Xopenmp-target=<triple> -opt=val.
if (A->getValue(0) == getTripleString())
if (TT.getTriple() == getTripleString())
Index = Args.getBaseArgs().MakeIndex(A->getValue(1));
else
continue;

View File

@ -285,6 +285,10 @@ void AMDGPUOpenMPToolChain::addClangTargetOptions(
if (DriverArgs.hasArg(options::OPT_nogpulib))
return;
// Link the bitcode library late if we're using device LTO.
if (getDriver().isUsingLTO(/* IsOffload */ true))
return;
std::string BitcodeSuffix;
if (DriverArgs.hasFlag(options::OPT_fopenmp_target_new_runtime,
options::OPT_fno_openmp_target_new_runtime, true))

View File

@ -379,6 +379,11 @@ void AVRToolChain::addClangTargetOptions(
if (!DriverArgs.hasFlag(options::OPT_fuse_init_array,
options::OPT_fno_use_init_array, false))
CC1Args.push_back("-fno-use-init-array");
// Use `-fno-use-cxa-atexit` as default, since avr-libc does not support
// `__cxa_atexit()`.
if (!DriverArgs.hasFlag(options::OPT_fuse_cxa_atexit,
options::OPT_fno_use_cxa_atexit, false))
CC1Args.push_back("-fno-use-cxa-atexit");
}
Tool *AVRToolChain::buildLinker() const {

View File

@ -1627,7 +1627,7 @@ void RenderARMABI(const Driver &D, const llvm::Triple &Triple,
}
}
static void CollectARMPACBTIOptions(const Driver &D, const ArgList &Args,
static void CollectARMPACBTIOptions(const ToolChain &TC, const ArgList &Args,
ArgStringList &CmdArgs, bool isAArch64) {
const Arg *A = isAArch64
? Args.getLastArg(options::OPT_msign_return_address_EQ,
@ -1636,6 +1636,12 @@ static void CollectARMPACBTIOptions(const Driver &D, const ArgList &Args,
if (!A)
return;
const Driver &D = TC.getDriver();
const llvm::Triple &Triple = TC.getEffectiveTriple();
if (!(isAArch64 || (Triple.isArmT32() && Triple.isArmMClass())))
D.Diag(diag::warn_target_unsupported_branch_protection_option)
<< Triple.getArchName();
StringRef Scope, Key;
bool IndirectBranches;
@ -1713,8 +1719,7 @@ void Clang::AddARMTargetArgs(const llvm::Triple &Triple, const ArgList &Args,
AddAAPCSVolatileBitfieldArgs(Args, CmdArgs);
// Enable/disable return address signing and indirect branch targets.
CollectARMPACBTIOptions(getToolChain().getDriver(), Args, CmdArgs,
false /*isAArch64*/);
CollectARMPACBTIOptions(getToolChain(), Args, CmdArgs, false /*isAArch64*/);
}
void Clang::RenderTargetOptions(const llvm::Triple &EffectiveTriple,
@ -1841,8 +1846,7 @@ void Clang::AddAArch64TargetArgs(const ArgList &Args,
}
// Enable/disable return address signing and indirect branch targets.
CollectARMPACBTIOptions(getToolChain().getDriver(), Args, CmdArgs,
true /*isAArch64*/);
CollectARMPACBTIOptions(getToolChain(), Args, CmdArgs, true /*isAArch64*/);
// Handle -msve_vector_bits=<bits>
if (Arg *A = Args.getLastArg(options::OPT_msve_vector_bits_EQ)) {
@ -4347,6 +4351,7 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
bool IsHIP = JA.isOffloading(Action::OFK_HIP);
bool IsHIPDevice = JA.isDeviceOffloading(Action::OFK_HIP);
bool IsOpenMPDevice = JA.isDeviceOffloading(Action::OFK_OpenMP);
bool IsOpenMPHost = JA.isHostOffloading(Action::OFK_OpenMP);
bool IsHeaderModulePrecompile = isa<HeaderModulePrecompileJobAction>(JA);
bool IsDeviceOffloadAction = !(JA.isDeviceOffloading(Action::OFK_None) ||
JA.isDeviceOffloading(Action::OFK_Host));
@ -4365,6 +4370,7 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
IsHeaderModulePrecompile ? HeaderModuleInput : Inputs[0];
InputInfoList ModuleHeaderInputs;
InputInfoList OpenMPHostInputs;
const InputInfo *CudaDeviceInput = nullptr;
const InputInfo *OpenMPDeviceInput = nullptr;
for (const InputInfo &I : Inputs) {
@ -4383,6 +4389,8 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
CudaDeviceInput = &I;
} else if (IsOpenMPDevice && !OpenMPDeviceInput) {
OpenMPDeviceInput = &I;
} else if (IsOpenMPHost) {
OpenMPHostInputs.push_back(I);
} else {
llvm_unreachable("unexpectedly given multiple inputs");
}
@ -4611,7 +4619,7 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
if (JA.getType() == types::TY_LLVM_BC)
CmdArgs.push_back("-emit-llvm-uselists");
if (IsUsingLTO) {
if (IsUsingLTO && !Args.hasArg(options::OPT_fopenmp_new_driver)) {
// Only AMDGPU supports device-side LTO.
if (IsDeviceOffloadAction && !Triple.isAMDGPU()) {
D.Diag(diag::err_drv_unsupported_opt_for_target)
@ -6262,7 +6270,6 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
if (!Args.hasFlag(
options::OPT_fuse_cxa_atexit, options::OPT_fno_use_cxa_atexit,
!RawTriple.isOSAIX() && !RawTriple.isOSWindows() &&
TC.getArch() != llvm::Triple::xcore &&
((RawTriple.getVendor() != llvm::Triple::MipsTechnologies) ||
RawTriple.hasEnvironment())) ||
KernelOrKext)
@ -6890,6 +6897,25 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
}
}
// Host-side OpenMP offloading recieves the device object files and embeds it
// in a named section including the associated target triple and architecture.
if (IsOpenMPHost && !OpenMPHostInputs.empty()) {
auto InputFile = OpenMPHostInputs.begin();
auto OpenMPTCs = C.getOffloadToolChains<Action::OFK_OpenMP>();
for (auto TI = OpenMPTCs.first, TE = OpenMPTCs.second; TI != TE;
++TI, ++InputFile) {
const ToolChain *TC = TI->second;
const ArgList &TCArgs = C.getArgsForToolChain(TC, "", Action::OFK_OpenMP);
StringRef File =
C.getArgs().MakeArgString(TC->getInputFilename(*InputFile));
StringRef InputName = Clang::getBaseInputStem(Args, Inputs);
CmdArgs.push_back(Args.MakeArgString(
"-fembed-offload-object=" + File + "," + TC->getTripleString() + "." +
TCArgs.getLastArgValue(options::OPT_march_EQ) + "." + InputName));
}
}
if (Triple.isAMDGPU()) {
handleAMDGPUCodeObjectVersionOptions(D, Args, CmdArgs);
@ -8116,3 +8142,122 @@ void OffloadWrapper::ConstructJob(Compilation &C, const JobAction &JA,
Args.MakeArgString(getToolChain().GetProgramPath(getShortName())),
CmdArgs, Inputs, Output));
}
void LinkerWrapper::ConstructJob(Compilation &C, const JobAction &JA,
const InputInfo &Output,
const InputInfoList &Inputs,
const ArgList &Args,
const char *LinkingOutput) const {
ArgStringList CmdArgs;
if (getToolChain().getDriver().isUsingLTO(/* IsOffload */ true)) {
// Pass in target features for each toolchain.
auto OpenMPTCRange = C.getOffloadToolChains<Action::OFK_OpenMP>();
for (auto &I :
llvm::make_range(OpenMPTCRange.first, OpenMPTCRange.second)) {
const ToolChain *TC = I.second;
const ArgList &TCArgs = C.getArgsForToolChain(TC, "", Action::OFK_OpenMP);
ArgStringList FeatureArgs;
TC->addClangTargetOptions(TCArgs, FeatureArgs, Action::OFK_OpenMP);
auto FeatureIt = llvm::find(FeatureArgs, "-target-feature");
CmdArgs.push_back(Args.MakeArgString(
"-target-feature=" + TC->getTripleString() + "=" + *(FeatureIt + 1)));
}
// Pass in the bitcode library to be linked during LTO.
for (auto &I : llvm::make_range(OpenMPTCRange.first, OpenMPTCRange.second)) {
const ToolChain *TC = I.second;
const Driver &D = TC->getDriver();
const ArgList &TCArgs = C.getArgsForToolChain(TC, "", Action::OFK_OpenMP);
StringRef Arch = TCArgs.getLastArgValue(options::OPT_march_EQ);
std::string BitcodeSuffix;
if (TCArgs.hasFlag(options::OPT_fopenmp_target_new_runtime,
options::OPT_fno_openmp_target_new_runtime, true))
BitcodeSuffix += "new-";
if (TC->getTriple().isNVPTX())
BitcodeSuffix += "nvptx-";
else if (TC->getTriple().isAMDGPU())
BitcodeSuffix += "amdgpu-";
BitcodeSuffix += Arch;
ArgStringList BitcodeLibrary;
addOpenMPDeviceRTL(D, TCArgs, BitcodeLibrary, BitcodeSuffix,
TC->getTriple());
if (!BitcodeLibrary.empty())
CmdArgs.push_back(
Args.MakeArgString("-target-library=" + TC->getTripleString() +
"-" + Arch + "=" + BitcodeLibrary.back()));
}
// Pass in the optimization level to use for LTO.
if (const Arg *A = Args.getLastArg(options::OPT_O_Group)) {
StringRef OOpt;
if (A->getOption().matches(options::OPT_O4) ||
A->getOption().matches(options::OPT_Ofast))
OOpt = "3";
else if (A->getOption().matches(options::OPT_O)) {
OOpt = A->getValue();
if (OOpt == "g")
OOpt = "1";
else if (OOpt == "s" || OOpt == "z")
OOpt = "2";
} else if (A->getOption().matches(options::OPT_O0))
OOpt = "0";
if (!OOpt.empty())
CmdArgs.push_back(Args.MakeArgString(Twine("-opt-level=O") + OOpt));
}
}
// Construct the link job so we can wrap around it.
Linker->ConstructJob(C, JA, Output, Inputs, Args, LinkingOutput);
const auto &LinkCommand = C.getJobs().getJobs().back();
CmdArgs.push_back("-host-triple");
CmdArgs.push_back(Args.MakeArgString(getToolChain().getTripleString()));
if (Args.hasArg(options::OPT_v))
CmdArgs.push_back("-v");
// Add debug information if present.
if (const Arg *A = Args.getLastArg(options::OPT_g_Group)) {
const Option &Opt = A->getOption();
if (Opt.matches(options::OPT_gN_Group)) {
if (Opt.matches(options::OPT_gline_directives_only) ||
Opt.matches(options::OPT_gline_tables_only))
CmdArgs.push_back("-gline-directives-only");
} else
CmdArgs.push_back("-g");
}
for (const auto &A : Args.getAllArgValues(options::OPT_Xcuda_ptxas))
CmdArgs.push_back(Args.MakeArgString("-ptxas-args=" + A));
// Forward remarks passes to the LLVM backend in the wrapper.
if (const Arg *A = Args.getLastArg(options::OPT_Rpass_EQ))
CmdArgs.push_back(
Args.MakeArgString(Twine("-pass-remarks=") + A->getValue()));
if (const Arg *A = Args.getLastArg(options::OPT_Rpass_missed_EQ))
CmdArgs.push_back(
Args.MakeArgString(Twine("-pass-remarks-missed=") + A->getValue()));
if (const Arg *A = Args.getLastArg(options::OPT_Rpass_analysis_EQ))
CmdArgs.push_back(
Args.MakeArgString(Twine("-pass-remarks-analysis=") + A->getValue()));
if (Args.getLastArg(options::OPT_save_temps_EQ))
CmdArgs.push_back("-save-temps");
// Add the linker arguments to be forwarded by the wrapper.
CmdArgs.push_back("-linker-path");
CmdArgs.push_back(LinkCommand->getExecutable());
CmdArgs.push_back("--");
for (const char *LinkArg : LinkCommand->getArguments())
CmdArgs.push_back(LinkArg);
const char *Exec =
Args.MakeArgString(getToolChain().GetProgramPath("clang-linker-wrapper"));
// Replace the executable and arguments of the link job with the
// wrapper.
LinkCommand->replaceExecutable(Exec);
LinkCommand->replaceArguments(CmdArgs);
}

View File

@ -170,6 +170,21 @@ class LLVM_LIBRARY_VISIBILITY OffloadWrapper final : public Tool {
const char *LinkingOutput) const override;
};
/// Linker wrapper tool.
class LLVM_LIBRARY_VISIBILITY LinkerWrapper final : public Tool {
const Tool *Linker;
public:
LinkerWrapper(const ToolChain &TC, const Tool *Linker)
: Tool("Offload::Linker", "linker", TC), Linker(Linker) {}
bool hasIntegratedCPP() const override { return false; }
void ConstructJob(Compilation &C, const JobAction &JA,
const InputInfo &Output, const InputInfoList &Inputs,
const llvm::opt::ArgList &TCArgs,
const char *LinkingOutput) const override;
};
} // end namespace tools
} // end namespace driver

View File

@ -645,6 +645,22 @@ void tools::addLTOOptions(const ToolChain &ToolChain, const ArgList &Args,
/*IsLTO=*/true);
}
void tools::addOpenMPRuntimeSpecificRPath(const ToolChain &TC,
const ArgList &Args,
ArgStringList &CmdArgs) {
if (Args.hasFlag(options::OPT_fopenmp_implicit_rpath,
options::OPT_fno_openmp_implicit_rpath, true)) {
// Default to clang lib / lib64 folder, i.e. the same location as device
// runtime
SmallString<256> DefaultLibPath =
llvm::sys::path::parent_path(TC.getDriver().Dir);
llvm::sys::path::append(DefaultLibPath, Twine("lib") + CLANG_LIBDIR_SUFFIX);
CmdArgs.push_back("-rpath");
CmdArgs.push_back(Args.MakeArgString(DefaultLibPath));
}
}
void tools::addArchSpecificRPath(const ToolChain &TC, const ArgList &Args,
ArgStringList &CmdArgs) {
// Enable -frtlib-add-rpath by default for the case of VE.
@ -702,6 +718,9 @@ bool tools::addOpenMPRuntime(ArgStringList &CmdArgs, const ToolChain &TC,
addArchSpecificRPath(TC, Args, CmdArgs);
if (RTKind == Driver::OMPRT_OMP)
addOpenMPRuntimeSpecificRPath(TC, Args, CmdArgs);
return true;
}
@ -826,16 +845,16 @@ collectSanitizerRuntimes(const ToolChain &TC, const ArgList &Args,
if (SanArgs.needsStatsRt() && SanArgs.linkRuntimes())
StaticRuntimes.push_back("stats_client");
// Always link the static runtime regardless of DSO or executable.
if (SanArgs.needsAsanRt())
HelperStaticRuntimes.push_back("asan_static");
// Collect static runtimes.
if (Args.hasArg(options::OPT_shared)) {
// Don't link static runtimes into DSOs.
return;
}
// Always link the static runtime for executable.
if (SanArgs.needsAsanRt())
HelperStaticRuntimes.push_back("asan_static");
// Each static runtime that has a DSO counterpart above is excluded below,
// but runtimes that exist only as static are not affected by needsSharedRt.

View File

@ -106,6 +106,9 @@ void AddAssemblerKPIC(const ToolChain &ToolChain,
const llvm::opt::ArgList &Args,
llvm::opt::ArgStringList &CmdArgs);
void addOpenMPRuntimeSpecificRPath(const ToolChain &TC,
const llvm::opt::ArgList &Args,
llvm::opt::ArgStringList &CmdArgs);
void addArchSpecificRPath(const ToolChain &TC, const llvm::opt::ArgList &Args,
llvm::opt::ArgStringList &CmdArgs);
/// Returns true, if an OpenMP runtime has been added.

View File

@ -744,6 +744,10 @@ void CudaToolChain::addClangTargetOptions(
return;
}
// Link the bitcode library late if we're using device LTO.
if (getDriver().isUsingLTO(/* IsOffload */ true))
return;
std::string BitcodeSuffix;
if (DriverArgs.hasFlag(options::OPT_fopenmp_target_new_runtime,
options::OPT_fno_openmp_target_new_runtime, true))

View File

@ -130,6 +130,10 @@ void XCoreToolChain::addClangTargetOptions(const ArgList &DriverArgs,
ArgStringList &CC1Args,
Action::OffloadKind) const {
CC1Args.push_back("-nostdsysteminc");
// Set `-fno-use-cxa-atexit` to default.
if (!DriverArgs.hasFlag(options::OPT_fuse_cxa_atexit,
options::OPT_fno_use_cxa_atexit, false))
CC1Args.push_back("-fno-use-cxa-atexit");
}
void XCoreToolChain::AddClangCXXStdlibIncludeArgs(

View File

@ -254,8 +254,8 @@ unsigned
BreakableStringLiteral::getRemainingLength(unsigned LineIndex, unsigned Offset,
unsigned StartColumn) const {
return UnbreakableTailLength + Postfix.size() +
encoding::columnWidthWithTabs(Line.substr(Offset, StringRef::npos),
StartColumn, Style.TabWidth, Encoding);
encoding::columnWidthWithTabs(Line.substr(Offset), StartColumn,
Style.TabWidth, Encoding);
}
unsigned BreakableStringLiteral::getContentStartColumn(unsigned LineIndex,
@ -539,29 +539,28 @@ unsigned BreakableBlockComment::getRangeLength(unsigned LineIndex,
unsigned Offset,
StringRef::size_type Length,
unsigned StartColumn) const {
unsigned LineLength =
encoding::columnWidthWithTabs(Content[LineIndex].substr(Offset, Length),
StartColumn, Style.TabWidth, Encoding);
// FIXME: This should go into getRemainingLength instead, but we currently
// break tests when putting it there. Investigate how to fix those tests.
// The last line gets a "*/" postfix.
if (LineIndex + 1 == Lines.size()) {
LineLength += 2;
// We never need a decoration when breaking just the trailing "*/" postfix.
// Note that checking that Length == 0 is not enough, since Length could
// also be StringRef::npos.
if (Content[LineIndex].substr(Offset, StringRef::npos).empty()) {
LineLength -= Decoration.size();
}
}
return LineLength;
return encoding::columnWidthWithTabs(
Content[LineIndex].substr(Offset, Length), StartColumn, Style.TabWidth,
Encoding);
}
unsigned BreakableBlockComment::getRemainingLength(unsigned LineIndex,
unsigned Offset,
unsigned StartColumn) const {
return UnbreakableTailLength +
getRangeLength(LineIndex, Offset, StringRef::npos, StartColumn);
unsigned LineLength =
UnbreakableTailLength +
getRangeLength(LineIndex, Offset, StringRef::npos, StartColumn);
if (LineIndex + 1 == Lines.size()) {
LineLength += 2;
// We never need a decoration when breaking just the trailing "*/" postfix.
bool HasRemainingText = Offset < Content[LineIndex].size();
if (!HasRemainingText) {
bool HasDecoration = Lines[LineIndex].ltrim().startswith(Decoration);
if (HasDecoration)
LineLength -= Decoration.size();
}
}
return LineLength;
}
unsigned BreakableBlockComment::getContentStartColumn(unsigned LineIndex,

View File

@ -1817,8 +1817,8 @@ unsigned ContinuationIndenter::reformatRawStringLiteral(
ContentStartsOnNewline || (NewCode->find('\n') != std::string::npos);
if (IsMultiline) {
// Break before further function parameters on all levels.
for (unsigned i = 0, e = State.Stack.size(); i != e; ++i)
State.Stack[i].BreakBeforeParameter = true;
for (ParenState &Paren : State.Stack)
Paren.BreakBeforeParameter = true;
}
return Fixes.second + PrefixExcessCharacters * Style.PenaltyExcessCharacter;
}
@ -1826,8 +1826,8 @@ unsigned ContinuationIndenter::reformatRawStringLiteral(
unsigned ContinuationIndenter::addMultilineToken(const FormatToken &Current,
LineState &State) {
// Break before further function parameters on all levels.
for (unsigned i = 0, e = State.Stack.size(); i != e; ++i)
State.Stack[i].BreakBeforeParameter = true;
for (ParenState &Paren : State.Stack)
Paren.BreakBeforeParameter = true;
unsigned ColumnsUsed = State.Column;
// We can only affect layout of the first and the last line, so the penalty
@ -2380,8 +2380,8 @@ ContinuationIndenter::breakProtrudingToken(const FormatToken &Current,
// the next parameter on all levels, so that the next parameter is clearly
// visible. Line comments already introduce a break.
if (Current.isNot(TT_LineComment)) {
for (unsigned i = 0, e = State.Stack.size(); i != e; ++i)
State.Stack[i].BreakBeforeParameter = true;
for (ParenState &Paren : State.Stack)
Paren.BreakBeforeParameter = true;
}
if (Current.is(TT_BlockComment))

View File

@ -44,6 +44,7 @@
#include <algorithm>
#include <memory>
#include <mutex>
#include <numeric>
#include <string>
#include <unordered_map>
@ -532,11 +533,9 @@ template <> struct MappingTraits<FormatStyle> {
IO.mapOptional("Language", Style.Language);
if (IO.outputting()) {
StringRef StylesArray[] = {"LLVM", "Google", "Chromium", "Mozilla",
"WebKit", "GNU", "Microsoft"};
ArrayRef<StringRef> Styles(StylesArray);
for (size_t i = 0, e = Styles.size(); i < e; ++i) {
StringRef StyleName(Styles[i]);
StringRef Styles[] = {"LLVM", "Google", "Chromium", "Mozilla",
"WebKit", "GNU", "Microsoft"};
for (StringRef StyleName : Styles) {
FormatStyle PredefinedStyle;
if (getPredefinedStyle(StyleName, Style.Language, &PredefinedStyle) &&
Style == PredefinedStyle) {
@ -1681,10 +1680,10 @@ std::error_code parseConfiguration(llvm::MemoryBufferRef Config,
// configuration (which can only be at slot 0) after it.
FormatStyle::FormatStyleSet StyleSet;
bool LanguageFound = false;
for (int i = Styles.size() - 1; i >= 0; --i) {
if (Styles[i].Language != FormatStyle::LK_None)
StyleSet.Add(Styles[i]);
if (Styles[i].Language == Language)
for (const FormatStyle &Style : llvm::reverse(Styles)) {
if (Style.Language != FormatStyle::LK_None)
StyleSet.Add(Style);
if (Style.Language == Language)
LanguageFound = true;
}
if (!LanguageFound) {
@ -1890,9 +1889,8 @@ class Formatter : public TokenAnalyzer {
tooling::Replacements Result;
deriveLocalStyle(AnnotatedLines);
AffectedRangeMgr.computeAffectedLines(AnnotatedLines);
for (unsigned i = 0, e = AnnotatedLines.size(); i != e; ++i) {
Annotator.calculateFormattingInformation(*AnnotatedLines[i]);
}
for (AnnotatedLine *Line : AnnotatedLines)
Annotator.calculateFormattingInformation(*Line);
Annotator.setCommentLineLevels(AnnotatedLines);
WhitespaceManager Whitespaces(
@ -1962,10 +1960,10 @@ class Formatter : public TokenAnalyzer {
deriveLocalStyle(const SmallVectorImpl<AnnotatedLine *> &AnnotatedLines) {
bool HasBinPackedFunction = false;
bool HasOnePerLineFunction = false;
for (unsigned i = 0, e = AnnotatedLines.size(); i != e; ++i) {
if (!AnnotatedLines[i]->First->Next)
for (AnnotatedLine *Line : AnnotatedLines) {
if (!Line->First->Next)
continue;
FormatToken *Tok = AnnotatedLines[i]->First->Next;
FormatToken *Tok = Line->First->Next;
while (Tok->Next) {
if (Tok->is(PPK_BinPacked))
HasBinPackedFunction = true;
@ -2524,9 +2522,8 @@ static void sortCppIncludes(const FormatStyle &Style,
if (!affectsRange(Ranges, IncludesBeginOffset, IncludesEndOffset))
return;
SmallVector<unsigned, 16> Indices;
for (unsigned i = 0, e = Includes.size(); i != e; ++i) {
Indices.push_back(i);
}
Indices.resize(Includes.size());
std::iota(Indices.begin(), Indices.end(), 0);
if (Style.SortIncludes == FormatStyle::SI_CaseInsensitive) {
llvm::stable_sort(Indices, [&](unsigned LHSI, unsigned RHSI) {
@ -2678,6 +2675,15 @@ tooling::Replacements sortCppIncludes(const FormatStyle &Style, StringRef Code,
if (!FormattingOff && !MergeWithNextLine) {
if (IncludeRegex.match(Line, &Matches)) {
StringRef IncludeName = Matches[2];
if (Line.contains("/*") && !Line.contains("*/")) {
// #include with a start of a block comment, but without the end.
// Need to keep all the lines until the end of the comment together.
// FIXME: This is somehow simplified check that probably does not work
// correctly if there are multiple comments on a line.
Pos = Code.find("*/", SearchFrom);
Line = Code.substr(
Prev, (Pos != StringRef::npos ? Pos + 2 : Code.size()) - Prev);
}
int Category = Categories.getIncludePriority(
IncludeName,
/*CheckMainHeader=*/!MainIncludeFound && FirstIncludeBlock);
@ -2718,7 +2724,7 @@ static unsigned findJavaImportGroup(const FormatStyle &Style,
unsigned LongestMatchIndex = UINT_MAX;
unsigned LongestMatchLength = 0;
for (unsigned I = 0; I < Style.JavaImportGroups.size(); I++) {
std::string GroupPrefix = Style.JavaImportGroups[I];
const std::string &GroupPrefix = Style.JavaImportGroups[I];
if (ImportIdentifier.startswith(GroupPrefix) &&
GroupPrefix.length() > LongestMatchLength) {
LongestMatchIndex = I;
@ -2743,13 +2749,16 @@ static void sortJavaImports(const FormatStyle &Style,
unsigned ImportsBlockSize = ImportsEndOffset - ImportsBeginOffset;
if (!affectsRange(Ranges, ImportsBeginOffset, ImportsEndOffset))
return;
SmallVector<unsigned, 16> Indices;
Indices.resize(Imports.size());
std::iota(Indices.begin(), Indices.end(), 0);
SmallVector<unsigned, 16> JavaImportGroups;
for (unsigned i = 0, e = Imports.size(); i != e; ++i) {
Indices.push_back(i);
JavaImportGroups.push_back(
findJavaImportGroup(Style, Imports[i].Identifier));
}
JavaImportGroups.reserve(Imports.size());
for (const JavaImportDirective &Import : Imports)
JavaImportGroups.push_back(findJavaImportGroup(Style, Import.Identifier));
bool StaticImportAfterNormalImport =
Style.SortJavaStaticImport == FormatStyle::SJSIO_After;
llvm::sort(Indices, [&](unsigned LHSI, unsigned RHSI) {

View File

@ -123,6 +123,34 @@ namespace format {
TYPE(CSharpGenericTypeConstraintComma) \
TYPE(Unknown)
/// Sorted operators that can follow a C variable.
static const std::vector<clang::tok::TokenKind> COperatorsFollowingVar = [] {
std::vector<clang::tok::TokenKind> ReturnVal = {
tok::l_square, tok::r_square,
tok::l_paren, tok::r_paren,
tok::r_brace, tok::period,
tok::ellipsis, tok::ampamp,
tok::ampequal, tok::star,
tok::starequal, tok::plus,
tok::plusplus, tok::plusequal,
tok::minus, tok::arrow,
tok::minusminus, tok::minusequal,
tok::exclaim, tok::exclaimequal,
tok::slash, tok::slashequal,
tok::percent, tok::percentequal,
tok::less, tok::lessless,
tok::lessequal, tok::lesslessequal,
tok::greater, tok::greatergreater,
tok::greaterequal, tok::greatergreaterequal,
tok::caret, tok::caretequal,
tok::pipe, tok::pipepipe,
tok::pipeequal, tok::question,
tok::semi, tok::equal,
tok::equalequal, tok::comma};
assert(std::is_sorted(ReturnVal.begin(), ReturnVal.end()));
return ReturnVal;
}();
/// Determines the semantic type of a syntactic token, e.g. whether "<" is a
/// template opener or binary operator.
enum TokenType : uint8_t {

View File

@ -210,8 +210,8 @@ std::pair<tooling::Replacements, unsigned> NamespaceEndCommentsFixer::analyze(
// Spin through the lines and ensure we have balanced braces.
int Braces = 0;
for (size_t I = 0, E = AnnotatedLines.size(); I != E; ++I) {
FormatToken *Tok = AnnotatedLines[I]->First;
for (AnnotatedLine *Line : AnnotatedLines) {
FormatToken *Tok = Line->First;
while (Tok) {
Braces += Tok->is(tok::l_brace) ? 1 : Tok->is(tok::r_brace) ? -1 : 0;
Tok = Tok->Next;

View File

@ -133,7 +133,10 @@ class JavaScriptImportSorter : public TokenAnalyzer {
public:
JavaScriptImportSorter(const Environment &Env, const FormatStyle &Style)
: TokenAnalyzer(Env, Style),
FileContents(Env.getSourceManager().getBufferData(Env.getFileID())) {}
FileContents(Env.getSourceManager().getBufferData(Env.getFileID())) {
// FormatToken.Tok starts out in an uninitialized state.
invalidToken.Tok.startToken();
}
std::pair<tooling::Replacements, unsigned>
analyze(TokenAnnotator &Annotator,
@ -232,7 +235,6 @@ class JavaScriptImportSorter : public TokenAnalyzer {
if (!Current || Current == LineEnd->Next) {
// Set the current token to an invalid token, so that further parsing on
// this line fails.
invalidToken.Tok.setKind(tok::unknown);
Current = &invalidToken;
}
}
@ -510,7 +512,6 @@ class JavaScriptImportSorter : public TokenAnalyzer {
while (Current->is(tok::identifier)) {
nextToken();
if (Current->is(tok::semi)) {
nextToken();
return true;
}
if (!Current->is(tok::period))

View File

@ -113,12 +113,13 @@ std::pair<tooling::Replacements, unsigned> TokenAnalyzer::process() {
assert(UnwrappedLines.rbegin()->empty());
unsigned Penalty = 0;
for (unsigned Run = 0, RunE = UnwrappedLines.size(); Run + 1 != RunE; ++Run) {
const auto &Lines = UnwrappedLines[Run];
LLVM_DEBUG(llvm::dbgs() << "Run " << Run << "...\n");
SmallVector<AnnotatedLine *, 16> AnnotatedLines;
TokenAnnotator Annotator(Style, Lex.getKeywords());
for (unsigned i = 0, e = UnwrappedLines[Run].size(); i != e; ++i) {
AnnotatedLines.push_back(new AnnotatedLine(UnwrappedLines[Run][i]));
for (const UnwrappedLine &Line : Lines) {
AnnotatedLines.push_back(new AnnotatedLine(Line));
Annotator.annotate(*AnnotatedLines.back());
}
@ -130,9 +131,8 @@ std::pair<tooling::Replacements, unsigned> TokenAnalyzer::process() {
for (const tooling::Replacement &Fix : RunResult.first)
llvm::dbgs() << Fix.toString() << "\n";
});
for (unsigned i = 0, e = AnnotatedLines.size(); i != e; ++i) {
delete AnnotatedLines[i];
}
for (AnnotatedLine *Line : AnnotatedLines)
delete Line;
Penalty += RunResult.second;
for (const auto &R : RunResult.first) {

View File

@ -66,9 +66,8 @@ class AnnotatedLine {
}
~AnnotatedLine() {
for (unsigned i = 0, e = Children.size(); i != e; ++i) {
delete Children[i];
}
for (AnnotatedLine *Child : Children)
delete Child;
FormatToken *Current = First;
while (Current) {
Current->Children.clear();

View File

@ -100,10 +100,27 @@ class LevelIndentTracker {
if (Style.Language == FormatStyle::LK_Java || Style.isJavaScript() ||
Style.isCSharp())
return 0;
if (RootToken.isAccessSpecifier(false) ||
RootToken.isObjCAccessSpecifier() ||
(RootToken.isOneOf(Keywords.kw_signals, Keywords.kw_qsignals) &&
RootToken.Next && RootToken.Next->is(tok::colon))) {
auto IsAccessModifier = [this, &RootToken]() {
if (RootToken.isAccessSpecifier(Style.isCpp()))
return true;
else if (RootToken.isObjCAccessSpecifier())
return true;
// Handle Qt signals.
else if ((RootToken.isOneOf(Keywords.kw_signals, Keywords.kw_qsignals) &&
RootToken.Next && RootToken.Next->is(tok::colon)))
return true;
else if (RootToken.Next &&
RootToken.Next->isOneOf(Keywords.kw_slots, Keywords.kw_qslots) &&
RootToken.Next->Next && RootToken.Next->Next->is(tok::colon))
return true;
// Handle malformed access specifier e.g. 'private' without trailing ':'.
else if (!RootToken.Next && RootToken.isAccessSpecifier(false))
return true;
return false;
};
if (IsAccessModifier()) {
// The AccessModifierOffset may be overridden by IndentAccessModifiers,
// in which case we take a negative value of the IndentWidth to simulate
// the upper indent level.

View File

@ -687,9 +687,9 @@ void UnwrappedLineParser::calculateBraceTypes(bool ExpectClassBody) {
} while (Tok->Tok.isNot(tok::eof) && !LBraceStack.empty());
// Assume other blocks for all unclosed opening braces.
for (unsigned i = 0, e = LBraceStack.size(); i != e; ++i) {
if (LBraceStack[i]->is(BK_Unknown))
LBraceStack[i]->setBlockKind(BK_Block);
for (FormatToken *LBrace : LBraceStack) {
if (LBrace->is(BK_Unknown))
LBrace->setBlockKind(BK_Block);
}
FormatTok = Tokens->setPosition(StoredPosition);
@ -2708,14 +2708,25 @@ void UnwrappedLineParser::parseSwitch() {
}
void UnwrappedLineParser::parseAccessSpecifier() {
FormatToken *AccessSpecifierCandidate = FormatTok;
nextToken();
// Understand Qt's slots.
if (FormatTok->isOneOf(Keywords.kw_slots, Keywords.kw_qslots))
nextToken();
// Otherwise, we don't know what it is, and we'd better keep the next token.
if (FormatTok->Tok.is(tok::colon))
if (FormatTok->Tok.is(tok::colon)) {
nextToken();
addUnwrappedLine();
addUnwrappedLine();
} else if (!FormatTok->Tok.is(tok::coloncolon) &&
!std::binary_search(COperatorsFollowingVar.begin(),
COperatorsFollowingVar.end(),
FormatTok->Tok.getKind())) {
// Not a variable name nor namespace name.
addUnwrappedLine();
} else if (AccessSpecifierCandidate) {
// Consider the access specifier to be a C identifier.
AccessSpecifierCandidate->Tok.setKind(tok::identifier);
}
}
void UnwrappedLineParser::parseConcept() {

View File

@ -188,10 +188,10 @@ std::pair<tooling::Replacements, unsigned> UsingDeclarationsSorter::analyze(
AffectedRangeMgr.computeAffectedLines(AnnotatedLines);
tooling::Replacements Fixes;
SmallVector<UsingDeclaration, 4> UsingDeclarations;
for (size_t I = 0, E = AnnotatedLines.size(); I != E; ++I) {
const auto *FirstTok = AnnotatedLines[I]->First;
if (AnnotatedLines[I]->InPPDirective ||
!AnnotatedLines[I]->startsWith(tok::kw_using) || FirstTok->Finalized) {
for (const AnnotatedLine *Line : AnnotatedLines) {
const auto *FirstTok = Line->First;
if (Line->InPPDirective || !Line->startsWith(tok::kw_using) ||
FirstTok->Finalized) {
endUsingDeclarationBlock(&UsingDeclarations, SourceMgr, &Fixes);
continue;
}
@ -204,7 +204,7 @@ std::pair<tooling::Replacements, unsigned> UsingDeclarationsSorter::analyze(
endUsingDeclarationBlock(&UsingDeclarations, SourceMgr, &Fixes);
continue;
}
UsingDeclarations.push_back(UsingDeclaration(AnnotatedLines[I], Label));
UsingDeclarations.push_back(UsingDeclaration(Line, Label));
}
endUsingDeclarationBlock(&UsingDeclarations, SourceMgr, &Fixes);
return {Fixes, 0};

View File

@ -344,6 +344,10 @@ AlignTokenSequence(const FormatStyle &Style, unsigned Start, unsigned End,
if (Changes[ScopeStart - 1].Tok->is(TT_FunctionDeclarationName))
return true;
// Lambda.
if (Changes[ScopeStart - 1].Tok->is(TT_LambdaLBrace))
return false;
// Continued function declaration
if (ScopeStart > Start + 1 &&
Changes[ScopeStart - 2].Tok->is(TT_FunctionDeclarationName))
@ -352,8 +356,13 @@ AlignTokenSequence(const FormatStyle &Style, unsigned Start, unsigned End,
// Continued function call
if (ScopeStart > Start + 1 &&
Changes[ScopeStart - 2].Tok->is(tok::identifier) &&
Changes[ScopeStart - 1].Tok->is(tok::l_paren))
Changes[ScopeStart - 1].Tok->is(tok::l_paren) &&
Changes[ScopeStart].Tok->isNot(TT_LambdaLSquare)) {
if (Changes[i].Tok->MatchingParen &&
Changes[i].Tok->MatchingParen->is(TT_LambdaLBrace))
return false;
return Style.BinPackArguments;
}
// Ternary operator
if (Changes[i].Tok->is(TT_ConditionalExpr))
@ -372,8 +381,15 @@ AlignTokenSequence(const FormatStyle &Style, unsigned Start, unsigned End,
if (ScopeStart > Start + 1 &&
Changes[ScopeStart - 2].Tok->isNot(tok::identifier) &&
Changes[ScopeStart - 1].Tok->is(tok::l_brace) &&
Changes[i].Tok->isNot(tok::r_brace))
Changes[i].Tok->isNot(tok::r_brace)) {
for (unsigned OuterScopeStart : llvm::reverse(ScopeStack)) {
// Lambda.
if (OuterScopeStart > Start &&
Changes[OuterScopeStart - 1].Tok->is(TT_LambdaLBrace))
return false;
}
return true;
}
return false;
};
@ -1014,7 +1030,7 @@ void WhitespaceManager::alignArrayInitializersRightJustified(
// Now go through and fixup the spaces.
auto *CellIter = Cells.begin();
for (auto i = 0U; i < CellDescs.CellCount; i++, ++CellIter) {
for (auto i = 0U; i < CellDescs.CellCount; ++i, ++CellIter) {
unsigned NetWidth = 0U;
if (isSplitCell(*CellIter))
NetWidth = getNetWidth(Cells.begin(), CellIter, CellDescs.InitialSpaces);
@ -1331,8 +1347,13 @@ void WhitespaceManager::storeReplacement(SourceRange Range, StringRef Text) {
void WhitespaceManager::appendNewlineText(std::string &Text,
unsigned Newlines) {
for (unsigned i = 0; i < Newlines; ++i)
Text.append(UseCRLF ? "\r\n" : "\n");
if (UseCRLF) {
Text.reserve(Text.size() + 2 * Newlines);
for (unsigned i = 0; i < Newlines; ++i)
Text.append("\r\n");
} else {
Text.append(Newlines, '\n');
}
}
void WhitespaceManager::appendEscapedNewlineText(

View File

@ -3560,6 +3560,8 @@ void CompilerInvocation::GenerateLangArgs(const LangOptions &Opts,
GenerateArg(Args, OPT_fclang_abi_compat_EQ, "11.0", SA);
else if (Opts.getClangABICompat() == LangOptions::ClangABI::Ver12)
GenerateArg(Args, OPT_fclang_abi_compat_EQ, "12.0", SA);
else if (Opts.getClangABICompat() == LangOptions::ClangABI::Ver13)
GenerateArg(Args, OPT_fclang_abi_compat_EQ, "13.0", SA);
if (Opts.getSignReturnAddressScope() ==
LangOptions::SignReturnAddressScopeKind::All)
@ -4062,6 +4064,8 @@ bool CompilerInvocation::ParseLangArgs(LangOptions &Opts, ArgList &Args,
Opts.setClangABICompat(LangOptions::ClangABI::Ver11);
else if (Major <= 12)
Opts.setClangABICompat(LangOptions::ClangABI::Ver12);
else if (Major <= 13)
Opts.setClangABICompat(LangOptions::ClangABI::Ver13);
} else if (Ver != "latest") {
Diags.Report(diag::err_drv_invalid_value)
<< A->getAsString(Args) << A->getValue();

View File

@ -25,6 +25,7 @@
#include "clang/Serialization/ASTReader.h"
#include "llvm/ADT/APFloat.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/DerivedTypes.h"
using namespace clang;
static bool MacroBodyEndsInBackslash(StringRef MacroBody) {
@ -914,6 +915,13 @@ static void InitializePredefinedMacros(const TargetInfo &TI,
Builder.defineMacro("__LONG_WIDTH__", Twine(TI.getLongWidth()));
Builder.defineMacro("__LLONG_WIDTH__", Twine(TI.getLongLongWidth()));
size_t BitIntMaxWidth = TI.getMaxBitIntWidth();
assert(BitIntMaxWidth <= llvm::IntegerType::MAX_INT_BITS &&
"Target defined a max bit width larger than LLVM can support!");
assert(BitIntMaxWidth >= TI.getLongLongWidth() &&
"Target defined a max bit width smaller than the C standard allows!");
Builder.defineMacro("__BITINT_MAXWIDTH__", Twine(BitIntMaxWidth));
DefineTypeSize("__SCHAR_MAX__", TargetInfo::SignedChar, TI, Builder);
DefineTypeSize("__SHRT_MAX__", TargetInfo::SignedShort, TI, Builder);
DefineTypeSize("__INT_MAX__", TargetInfo::SignedInt, TI, Builder);

View File

@ -730,6 +730,12 @@ __arm_st64bv0(void *__addr, data512_t __value) {
#define __arm_mte_ptrdiff(__ptra, __ptrb) __builtin_arm_subp(__ptra, __ptrb)
#endif
/* Memory Operations Intrinsics */
#if __ARM_FEATURE_MOPS && __ARM_FEATURE_MEMORY_TAGGING
#define __arm_mops_memset_tag(__tagged_address, __value, __size) \
__builtin_arm_mops_memset_tag(__tagged_address, __value, __size)
#endif
/* Transactional Memory Extension (TME) Intrinsics */
#if __ARM_FEATURE_TME

View File

@ -14,10 +14,11 @@
* additional definitions provided for Windows.
* For more details see http://msdn.microsoft.com/en-us/library/y0ybw9fy.aspx
*
* Also fall back on Darwin to allow additional definitions and
* Also fall back on Darwin and AIX to allow additional definitions and
* implementation-defined values.
*/
#if (defined(__APPLE__) || (defined(__MINGW32__) || defined(_MSC_VER))) && \
#if (defined(__APPLE__) || defined(__MINGW32__) || defined(_MSC_VER) || \
defined(_AIX)) && \
__STDC_HOSTED__ && __has_include_next(<float.h>)
/* Prior to Apple's 10.7 SDK, float.h SDK header used to apply an extra level
@ -37,7 +38,9 @@
# undef FLT_MANT_DIG
# undef DBL_MANT_DIG
# undef LDBL_MANT_DIG
# if __STDC_VERSION__ >= 199901L || !defined(__STRICT_ANSI__) || __cplusplus >= 201103L
# if __STDC_VERSION__ >= 199901L || !defined(__STRICT_ANSI__) || \
__cplusplus >= 201103L || \
(__STDC_HOSTED__ && defined(_AIX) && defined(_ALL_SOURCE))
# undef DECIMAL_DIG
# endif
# undef FLT_DIG
@ -64,7 +67,9 @@
# undef FLT_MIN
# undef DBL_MIN
# undef LDBL_MIN
# if __STDC_VERSION__ >= 201112L || !defined(__STRICT_ANSI__) || __cplusplus >= 201703L
# if __STDC_VERSION__ >= 201112L || !defined(__STRICT_ANSI__) || \
__cplusplus >= 201703L || \
(__STDC_HOSTED__ && defined(_AIX) && defined(_ALL_SOURCE))
# undef FLT_TRUE_MIN
# undef DBL_TRUE_MIN
# undef LDBL_TRUE_MIN
@ -87,7 +92,9 @@
#define DBL_MANT_DIG __DBL_MANT_DIG__
#define LDBL_MANT_DIG __LDBL_MANT_DIG__
#if __STDC_VERSION__ >= 199901L || !defined(__STRICT_ANSI__) || __cplusplus >= 201103L
#if __STDC_VERSION__ >= 199901L || !defined(__STRICT_ANSI__) || \
__cplusplus >= 201103L || \
(__STDC_HOSTED__ && defined(_AIX) && defined(_ALL_SOURCE))
# define DECIMAL_DIG __DECIMAL_DIG__
#endif
@ -123,7 +130,9 @@
#define DBL_MIN __DBL_MIN__
#define LDBL_MIN __LDBL_MIN__
#if __STDC_VERSION__ >= 201112L || !defined(__STRICT_ANSI__) || __cplusplus >= 201703L
#if __STDC_VERSION__ >= 201112L || !defined(__STRICT_ANSI__) || \
__cplusplus >= 201703L || \
(__STDC_HOSTED__ && defined(_AIX) && defined(_ALL_SOURCE))
# define FLT_TRUE_MIN __FLT_DENORM_MIN__
# define DBL_TRUE_MIN __DBL_DENORM_MIN__
# define LDBL_TRUE_MIN __LDBL_DENORM_MIN__

View File

@ -78,6 +78,8 @@
#define LONG_WIDTH __LONG_WIDTH__
#define ULLONG_WIDTH __LLONG_WIDTH__
#define LLONG_WIDTH __LLONG_WIDTH__
#define BITINT_MAXWIDTH __BITINT_MAXWIDTH__
#endif
#ifdef __CHAR_UNSIGNED__ /* -funsigned-char */

View File

@ -72,6 +72,12 @@
#endif // defined(__SPIR__)
#endif // (__OPENCL_CPP_VERSION__ == 202100 || __OPENCL_C_VERSION__ == 300)
#if !defined(__opencl_c_generic_address_space)
// Internal feature macro to provide named (global, local, private) address
// space overloads for builtin functions that take a pointer argument.
#define __opencl_c_named_address_space_builtins 1
#endif // !defined(__opencl_c_generic_address_space)
// built-in scalar data types:
/**

View File

@ -7285,7 +7285,9 @@ half4 __ovld fract(half4 x, half4 *iptr);
half8 __ovld fract(half8 x, half8 *iptr);
half16 __ovld fract(half16 x, half16 *iptr);
#endif //cl_khr_fp16
#else
#endif //defined(__opencl_c_generic_address_space)
#if defined(__opencl_c_named_address_space_builtins)
float __ovld fract(float x, __global float *iptr);
float2 __ovld fract(float2 x, __global float2 *iptr);
float3 __ovld fract(float3 x, __global float3 *iptr);
@ -7344,7 +7346,7 @@ half4 __ovld fract(half4 x, __private half4 *iptr);
half8 __ovld fract(half8 x, __private half8 *iptr);
half16 __ovld fract(half16 x, __private half16 *iptr);
#endif //cl_khr_fp16
#endif //defined(__opencl_c_generic_address_space)
#endif //defined(__opencl_c_named_address_space_builtins)
/**
* Extract mantissa and exponent from x. For each
@ -7375,7 +7377,9 @@ half4 __ovld frexp(half4 x, int4 *exp);
half8 __ovld frexp(half8 x, int8 *exp);
half16 __ovld frexp(half16 x, int16 *exp);
#endif //cl_khr_fp16
#else
#endif //defined(__opencl_c_generic_address_space)
#if defined(__opencl_c_named_address_space_builtins)
float __ovld frexp(float x, __global int *exp);
float2 __ovld frexp(float2 x, __global int2 *exp);
float3 __ovld frexp(float3 x, __global int3 *exp);
@ -7434,7 +7438,7 @@ half4 __ovld frexp(half4 x, __private int4 *exp);
half8 __ovld frexp(half8 x, __private int8 *exp);
half16 __ovld frexp(half16 x, __private int16 *exp);
#endif //cl_khr_fp16
#endif //defined(__opencl_c_generic_address_space)
#endif //defined(__opencl_c_named_address_space_builtins)
/**
* Compute the value of the square root of x^2 + y^2
@ -7582,7 +7586,9 @@ half4 __ovld lgamma_r(half4 x, int4 *signp);
half8 __ovld lgamma_r(half8 x, int8 *signp);
half16 __ovld lgamma_r(half16 x, int16 *signp);
#endif //cl_khr_fp16
#else
#endif //defined(__opencl_c_generic_address_space)
#if defined(__opencl_c_named_address_space_builtins)
float __ovld lgamma_r(float x, __global int *signp);
float2 __ovld lgamma_r(float2 x, __global int2 *signp);
float3 __ovld lgamma_r(float3 x, __global int3 *signp);
@ -7641,7 +7647,7 @@ half4 __ovld lgamma_r(half4 x, __private int4 *signp);
half8 __ovld lgamma_r(half8 x, __private int8 *signp);
half16 __ovld lgamma_r(half16 x, __private int16 *signp);
#endif //cl_khr_fp16
#endif //defined(__opencl_c_generic_address_space)
#endif //defined(__opencl_c_named_address_space_builtins)
/**
* Compute natural logarithm.
@ -7888,7 +7894,9 @@ half4 __ovld modf(half4 x, half4 *iptr);
half8 __ovld modf(half8 x, half8 *iptr);
half16 __ovld modf(half16 x, half16 *iptr);
#endif //cl_khr_fp16
#else
#endif //defined(__opencl_c_generic_address_space)
#if defined(__opencl_c_named_address_space_builtins)
float __ovld modf(float x, __global float *iptr);
float2 __ovld modf(float2 x, __global float2 *iptr);
float3 __ovld modf(float3 x, __global float3 *iptr);
@ -7947,7 +7955,7 @@ half4 __ovld modf(half4 x, __private half4 *iptr);
half8 __ovld modf(half8 x, __private half8 *iptr);
half16 __ovld modf(half16 x, __private half16 *iptr);
#endif //cl_khr_fp16
#endif //defined(__opencl_c_generic_address_space)
#endif //defined(__opencl_c_named_address_space_builtins)
/**
* Returns a quiet NaN. The nancode may be placed
@ -8147,9 +8155,10 @@ half3 __ovld remquo(half3 x, half3 y, int3 *quo);
half4 __ovld remquo(half4 x, half4 y, int4 *quo);
half8 __ovld remquo(half8 x, half8 y, int8 *quo);
half16 __ovld remquo(half16 x, half16 y, int16 *quo);
#endif //cl_khr_fp16
#else
#endif //defined(__opencl_c_generic_address_space)
#if defined(__opencl_c_named_address_space_builtins)
float __ovld remquo(float x, float y, __global int *quo);
float2 __ovld remquo(float2 x, float2 y, __global int2 *quo);
float3 __ovld remquo(float3 x, float3 y, __global int3 *quo);
@ -8208,7 +8217,7 @@ half4 __ovld remquo(half4 x, half4 y, __private int4 *quo);
half8 __ovld remquo(half8 x, half8 y, __private int8 *quo);
half16 __ovld remquo(half16 x, half16 y, __private int16 *quo);
#endif //cl_khr_fp16
#endif //defined(__opencl_c_generic_address_space)
#endif //defined(__opencl_c_named_address_space_builtins)
/**
* Round to integral value (using round to nearest
* even rounding mode) in floating-point format.
@ -8372,7 +8381,9 @@ half4 __ovld sincos(half4 x, half4 *cosval);
half8 __ovld sincos(half8 x, half8 *cosval);
half16 __ovld sincos(half16 x, half16 *cosval);
#endif //cl_khr_fp16
#else
#endif //defined(__opencl_c_generic_address_space)
#if defined(__opencl_c_named_address_space_builtins)
float __ovld sincos(float x, __global float *cosval);
float2 __ovld sincos(float2 x, __global float2 *cosval);
float3 __ovld sincos(float3 x, __global float3 *cosval);
@ -8431,7 +8442,7 @@ half4 __ovld sincos(half4 x, __private half4 *cosval);
half8 __ovld sincos(half8 x, __private half8 *cosval);
half16 __ovld sincos(half16 x, __private half16 *cosval);
#endif //cl_khr_fp16
#endif //defined(__opencl_c_generic_address_space)
#endif //defined(__opencl_c_named_address_space_builtins)
/**
* Compute hyperbolic sine.
@ -11315,7 +11326,9 @@ half4 __ovld __purefn vload4(size_t offset, const half *p);
half8 __ovld __purefn vload8(size_t offset, const half *p);
half16 __ovld __purefn vload16(size_t offset, const half *p);
#endif //cl_khr_fp16
#else
#endif //defined(__opencl_c_generic_address_space)
#if defined(__opencl_c_named_address_space_builtins)
char2 __ovld __purefn vload2(size_t offset, const __global char *p);
uchar2 __ovld __purefn vload2(size_t offset, const __global uchar *p);
short2 __ovld __purefn vload2(size_t offset, const __global short *p);
@ -11490,7 +11503,7 @@ half4 __ovld __purefn vload4(size_t offset, const __private half *p);
half8 __ovld __purefn vload8(size_t offset, const __private half *p);
half16 __ovld __purefn vload16(size_t offset, const __private half *p);
#endif //cl_khr_fp16
#endif //defined(__opencl_c_generic_address_space)
#endif //defined(__opencl_c_named_address_space_builtins)
#if defined(__opencl_c_generic_address_space)
void __ovld vstore2(char2 data, size_t offset, char *p);
@ -11553,7 +11566,9 @@ void __ovld vstore4(half4 data, size_t offset, half *p);
void __ovld vstore8(half8 data, size_t offset, half *p);
void __ovld vstore16(half16 data, size_t offset, half *p);
#endif //cl_khr_fp16
#else
#endif //defined(__opencl_c_generic_address_space)
#if defined(__opencl_c_named_address_space_builtins)
void __ovld vstore2(char2 data, size_t offset, __global char *p);
void __ovld vstore2(uchar2 data, size_t offset, __global uchar *p);
void __ovld vstore2(short2 data, size_t offset, __global short *p);
@ -11726,7 +11741,7 @@ void __ovld vstore4(half4 data, size_t offset, __private half *p);
void __ovld vstore8(half8 data, size_t offset, __private half *p);
void __ovld vstore16(half16 data, size_t offset, __private half *p);
#endif //cl_khr_fp16
#endif //defined(__opencl_c_generic_address_space)
#endif //defined(__opencl_c_named_address_space_builtins)
/**
* Read sizeof (half) bytes of data from address
@ -11739,11 +11754,13 @@ void __ovld vstore16(half16 data, size_t offset, __private half *p);
float __ovld __purefn vload_half(size_t offset, const __constant half *p);
#if defined(__opencl_c_generic_address_space)
float __ovld __purefn vload_half(size_t offset, const half *p);
#else
#endif //defined(__opencl_c_generic_address_space)
#if defined(__opencl_c_named_address_space_builtins)
float __ovld __purefn vload_half(size_t offset, const __global half *p);
float __ovld __purefn vload_half(size_t offset, const __local half *p);
float __ovld __purefn vload_half(size_t offset, const __private half *p);
#endif //defined(__opencl_c_generic_address_space)
#endif //defined(__opencl_c_named_address_space_builtins)
/**
* Read sizeof (halfn) bytes of data from address
@ -11764,7 +11781,9 @@ float3 __ovld __purefn vload_half3(size_t offset, const half *p);
float4 __ovld __purefn vload_half4(size_t offset, const half *p);
float8 __ovld __purefn vload_half8(size_t offset, const half *p);
float16 __ovld __purefn vload_half16(size_t offset, const half *p);
#else
#endif //defined(__opencl_c_generic_address_space)
#if defined(__opencl_c_named_address_space_builtins)
float2 __ovld __purefn vload_half2(size_t offset, const __global half *p);
float3 __ovld __purefn vload_half3(size_t offset, const __global half *p);
float4 __ovld __purefn vload_half4(size_t offset, const __global half *p);
@ -11780,7 +11799,7 @@ float3 __ovld __purefn vload_half3(size_t offset, const __private half *p);
float4 __ovld __purefn vload_half4(size_t offset, const __private half *p);
float8 __ovld __purefn vload_half8(size_t offset, const __private half *p);
float16 __ovld __purefn vload_half16(size_t offset, const __private half *p);
#endif //defined(__opencl_c_generic_address_space)
#endif //defined(__opencl_c_named_address_space_builtins)
/**
* The float value given by data is first
@ -11806,7 +11825,9 @@ void __ovld vstore_half_rtz(double data, size_t offset, half *p);
void __ovld vstore_half_rtp(double data, size_t offset, half *p);
void __ovld vstore_half_rtn(double data, size_t offset, half *p);
#endif //cl_khr_fp64
#else
#endif //defined(__opencl_c_generic_address_space)
#if defined(__opencl_c_named_address_space_builtins)
void __ovld vstore_half(float data, size_t offset, __global half *p);
void __ovld vstore_half_rte(float data, size_t offset, __global half *p);
void __ovld vstore_half_rtz(float data, size_t offset, __global half *p);
@ -11839,7 +11860,7 @@ void __ovld vstore_half_rtz(double data, size_t offset, __private half *p);
void __ovld vstore_half_rtp(double data, size_t offset, __private half *p);
void __ovld vstore_half_rtn(double data, size_t offset, __private half *p);
#endif //cl_khr_fp64
#endif //defined(__opencl_c_generic_address_space)
#endif //defined(__opencl_c_named_address_space_builtins)
/**
* The floatn value given by data is converted to
@ -11905,7 +11926,9 @@ void __ovld vstore_half4_rtn(double4 data, size_t offset, half *p);
void __ovld vstore_half8_rtn(double8 data, size_t offset, half *p);
void __ovld vstore_half16_rtn(double16 data, size_t offset, half *p);
#endif //cl_khr_fp64
#else
#endif //defined(__opencl_c_generic_address_space)
#if defined(__opencl_c_named_address_space_builtins)
void __ovld vstore_half2(float2 data, size_t offset, __global half *p);
void __ovld vstore_half3(float3 data, size_t offset, __global half *p);
void __ovld vstore_half4(float4 data, size_t offset, __global half *p);
@ -12058,7 +12081,7 @@ void __ovld vstore_half4_rtn(double4 data, size_t offset, __private half *p);
void __ovld vstore_half8_rtn(double8 data, size_t offset, __private half *p);
void __ovld vstore_half16_rtn(double16 data, size_t offset, __private half *p);
#endif //cl_khr_fp64
#endif //defined(__opencl_c_generic_address_space)
#endif //defined(__opencl_c_named_address_space_builtins)
/**
* For n = 1, 2, 4, 8 and 16 read sizeof (halfn)
@ -12084,7 +12107,9 @@ float3 __ovld __purefn vloada_half3(size_t offset, const half *p);
float4 __ovld __purefn vloada_half4(size_t offset, const half *p);
float8 __ovld __purefn vloada_half8(size_t offset, const half *p);
float16 __ovld __purefn vloada_half16(size_t offset, const half *p);
#else
#endif //defined(__opencl_c_generic_address_space)
#if defined(__opencl_c_named_address_space_builtins)
float2 __ovld __purefn vloada_half2(size_t offset, const __global half *p);
float3 __ovld __purefn vloada_half3(size_t offset, const __global half *p);
float4 __ovld __purefn vloada_half4(size_t offset, const __global half *p);
@ -12100,7 +12125,7 @@ float3 __ovld __purefn vloada_half3(size_t offset, const __private half *p);
float4 __ovld __purefn vloada_half4(size_t offset, const __private half *p);
float8 __ovld __purefn vloada_half8(size_t offset, const __private half *p);
float16 __ovld __purefn vloada_half16(size_t offset, const __private half *p);
#endif //defined(__opencl_c_generic_address_space)
#endif //defined(__opencl_c_named_address_space_builtins)
/**
* The floatn value given by data is converted to
@ -12180,8 +12205,9 @@ void __ovld vstorea_half4_rtn(double4 data, size_t offset, half *p);
void __ovld vstorea_half8_rtn(double8 data, size_t offset, half *p);
void __ovld vstorea_half16_rtn(double16 data, size_t offset, half *p);
#endif //cl_khr_fp64
#endif //defined(__opencl_c_generic_address_space)
#else
#if defined(__opencl_c_named_address_space_builtins)
void __ovld vstorea_half2(float2 data, size_t offset, __global half *p);
void __ovld vstorea_half3(float3 data, size_t offset, __global half *p);
void __ovld vstorea_half4(float4 data, size_t offset, __global half *p);
@ -12363,7 +12389,7 @@ void __ovld vstorea_half4_rtn(double4 data,size_t offset, __private half *p);
void __ovld vstorea_half8_rtn(double8 data,size_t offset, __private half *p);
void __ovld vstorea_half16_rtn(double16 data,size_t offset, __private half *p);
#endif //cl_khr_fp64
#endif //defined(__opencl_c_generic_address_space)
#endif //defined(__opencl_c_named_address_space_builtins)
// OpenCL v1.1 s6.11.8, v1.2 s6.12.8, v2.0 s6.13.8 - Synchronization Functions
@ -18513,6 +18539,8 @@ int __ovld arm_dot_acc_sat(char4 a, char4 b, int c);
// Disable any extensions we may have enabled previously.
#pragma OPENCL EXTENSION all : disable
#undef __opencl_c_named_address_space_builtins
#undef __cnfn
#undef __ovld
#endif //_OPENCL_H_

View File

@ -2378,8 +2378,9 @@ bool Lexer::SkipLineComment(Token &Result, const char *CurPtr,
bool &TokAtPhysicalStartOfLine) {
// If Line comments aren't explicitly enabled for this language, emit an
// extension warning.
if (!LangOpts.LineComment && !isLexingRawMode()) {
Diag(BufferPtr, diag::ext_line_comment);
if (!LangOpts.LineComment) {
if (!isLexingRawMode()) // There's no PP in raw mode, so can't emit diags.
Diag(BufferPtr, diag::ext_line_comment);
// Mark them enabled so we only emit one warning for this translation
// unit.

View File

@ -85,6 +85,8 @@ def FuncExtKhrMipmapImageWrites : FunctionExtension<"cl_khr_mipmap_imag
def FuncExtKhrGlMsaaSharing : FunctionExtension<"cl_khr_gl_msaa_sharing">;
def FuncExtKhrGlMsaaSharingReadWrite : FunctionExtension<"cl_khr_gl_msaa_sharing __opencl_c_read_write_images">;
def FuncExtOpenCLCGenericAddressSpace : FunctionExtension<"__opencl_c_generic_address_space">;
def FuncExtOpenCLCNamedAddressSpaceBuiltins : FunctionExtension<"__opencl_c_named_address_space_builtins">;
def FuncExtOpenCLCPipes : FunctionExtension<"__opencl_c_pipes">;
def FuncExtOpenCLCWGCollectiveFunctions : FunctionExtension<"__opencl_c_work_group_collective_functions">;
def FuncExtOpenCLCReadWriteImages : FunctionExtension<"__opencl_c_read_write_images">;
@ -591,10 +593,10 @@ multiclass MathWithPointer<list<AddressSpace> addrspaces> {
}
}
let MaxVersion = CL20 in {
let Extension = FuncExtOpenCLCNamedAddressSpaceBuiltins in {
defm : MathWithPointer<[GlobalAS, LocalAS, PrivateAS]>;
}
let MinVersion = CL20 in {
let Extension = FuncExtOpenCLCGenericAddressSpace in {
defm : MathWithPointer<[GenericAS]>;
}
@ -840,10 +842,10 @@ multiclass VloadVstore<list<AddressSpace> addrspaces, bit defStores> {
}
}
let MaxVersion = CL20 in {
let Extension = FuncExtOpenCLCNamedAddressSpaceBuiltins in {
defm : VloadVstore<[GlobalAS, LocalAS, PrivateAS], 1>;
}
let MinVersion = CL20 in {
let Extension = FuncExtOpenCLCGenericAddressSpace in {
defm : VloadVstore<[GenericAS], 1>;
}
// vload with constant address space is available regardless of version.
@ -874,10 +876,10 @@ multiclass VloadVstoreHalf<list<AddressSpace> addrspaces, bit defStores> {
}
}
let MaxVersion = CL20 in {
let Extension = FuncExtOpenCLCNamedAddressSpaceBuiltins in {
defm : VloadVstoreHalf<[GlobalAS, LocalAS, PrivateAS], 1>;
}
let MinVersion = CL20 in {
let Extension = FuncExtOpenCLCGenericAddressSpace in {
defm : VloadVstoreHalf<[GenericAS], 1>;
}
// vload_half and vloada_half with constant address space are available regardless of version.

View File

@ -2554,39 +2554,38 @@ static bool IsCPUDispatchCPUSpecificMultiVersion(const Expr *E) {
bool Sema::tryToRecoverWithCall(ExprResult &E, const PartialDiagnostic &PD,
bool ForceComplain,
bool (*IsPlausibleResult)(QualType)) {
if (isSFINAEContext()) {
// If this is a SFINAE context, don't try anything that might trigger ADL
// prematurely.
return false;
}
SourceLocation Loc = E.get()->getExprLoc();
SourceRange Range = E.get()->getSourceRange();
QualType ZeroArgCallTy;
UnresolvedSet<4> Overloads;
if (tryExprAsCall(*E.get(), ZeroArgCallTy, Overloads) &&
!ZeroArgCallTy.isNull() &&
(!IsPlausibleResult || IsPlausibleResult(ZeroArgCallTy))) {
// At this point, we know E is potentially callable with 0
// arguments and that it returns something of a reasonable type,
// so we can emit a fixit and carry on pretending that E was
// actually a CallExpr.
SourceLocation ParenInsertionLoc = getLocForEndOfToken(Range.getEnd());
bool IsMV = IsCPUDispatchCPUSpecificMultiVersion(E.get());
Diag(Loc, PD) << /*zero-arg*/ 1 << IsMV << Range
<< (IsCallableWithAppend(E.get())
? FixItHint::CreateInsertion(ParenInsertionLoc, "()")
: FixItHint());
if (!IsMV)
notePlausibleOverloads(*this, Loc, Overloads, IsPlausibleResult);
// FIXME: Try this before emitting the fixit, and suppress diagnostics
// while doing so.
E = BuildCallExpr(nullptr, E.get(), Range.getEnd(), None,
Range.getEnd().getLocWithOffset(1));
return true;
// If this is a SFINAE context, don't try anything that might trigger ADL
// prematurely.
if (!isSFINAEContext()) {
QualType ZeroArgCallTy;
if (tryExprAsCall(*E.get(), ZeroArgCallTy, Overloads) &&
!ZeroArgCallTy.isNull() &&
(!IsPlausibleResult || IsPlausibleResult(ZeroArgCallTy))) {
// At this point, we know E is potentially callable with 0
// arguments and that it returns something of a reasonable type,
// so we can emit a fixit and carry on pretending that E was
// actually a CallExpr.
SourceLocation ParenInsertionLoc = getLocForEndOfToken(Range.getEnd());
bool IsMV = IsCPUDispatchCPUSpecificMultiVersion(E.get());
Diag(Loc, PD) << /*zero-arg*/ 1 << IsMV << Range
<< (IsCallableWithAppend(E.get())
? FixItHint::CreateInsertion(ParenInsertionLoc,
"()")
: FixItHint());
if (!IsMV)
notePlausibleOverloads(*this, Loc, Overloads, IsPlausibleResult);
// FIXME: Try this before emitting the fixit, and suppress diagnostics
// while doing so.
E = BuildCallExpr(nullptr, E.get(), Range.getEnd(), None,
Range.getEnd().getLocWithOffset(1));
return true;
}
}
if (!ForceComplain) return false;
bool IsMV = IsCPUDispatchCPUSpecificMultiVersion(E.get());

View File

@ -590,6 +590,8 @@ bool HasAllowedCUDADeviceStaticInitializer(Sema &S, VarDecl *VD,
};
auto IsConstantInit = [&](const Expr *Init) {
assert(Init);
ASTContext::CUDAConstantEvalContextRAII EvalCtx(S.Context,
/*NoWronSidedVars=*/true);
return Init->isConstantInitializer(S.Context,
VD->getType()->isReferenceType());
};

View File

@ -3987,7 +3987,7 @@ bool Sema::CheckRISCVBuiltinFunctionCall(const TargetInfo &TI,
}
if (!HasFeature) {
std::string FeatureStrs = "";
std::string FeatureStrs;
for (StringRef OF : ReqOpFeatures) {
// If the feature is 64bit, alter the string so it will print better in
// the diagnostic.

View File

@ -810,7 +810,7 @@ ExprResult Sema::ActOnCoawaitExpr(Scope *S, SourceLocation Loc, Expr *E) {
checkSuspensionContext(*this, Loc, "co_await");
if (E->getType()->isPlaceholderType()) {
if (E->hasPlaceholderType()) {
ExprResult R = CheckPlaceholderExpr(E);
if (R.isInvalid()) return ExprError();
E = R.get();
@ -828,7 +828,7 @@ ExprResult Sema::BuildUnresolvedCoawaitExpr(SourceLocation Loc, Expr *E,
if (!FSI)
return ExprError();
if (E->getType()->isPlaceholderType()) {
if (E->hasPlaceholderType()) {
ExprResult R = CheckPlaceholderExpr(E);
if (R.isInvalid())
return ExprError();
@ -866,7 +866,7 @@ ExprResult Sema::BuildResolvedCoawaitExpr(SourceLocation Loc, Expr *E,
if (!Coroutine)
return ExprError();
if (E->getType()->isPlaceholderType()) {
if (E->hasPlaceholderType()) {
ExprResult R = CheckPlaceholderExpr(E);
if (R.isInvalid()) return ExprError();
E = R.get();
@ -927,7 +927,7 @@ ExprResult Sema::BuildCoyieldExpr(SourceLocation Loc, Expr *E) {
if (!Coroutine)
return ExprError();
if (E->getType()->isPlaceholderType()) {
if (E->hasPlaceholderType()) {
ExprResult R = CheckPlaceholderExpr(E);
if (R.isInvalid()) return ExprError();
E = R.get();
@ -970,8 +970,8 @@ StmtResult Sema::BuildCoreturnStmt(SourceLocation Loc, Expr *E,
if (!FSI)
return StmtError();
if (E && E->getType()->isPlaceholderType() &&
!E->getType()->isSpecificPlaceholderType(BuiltinType::Overload)) {
if (E && E->hasPlaceholderType() &&
!E->hasPlaceholderType(BuiltinType::Overload)) {
ExprResult R = CheckPlaceholderExpr(E);
if (R.isInvalid()) return StmtError();
E = R.get();

View File

@ -5703,6 +5703,13 @@ static bool RebuildDeclaratorInCurrentInstantiation(Sema &S, Declarator &D,
return false;
}
/// Returns true if the declaration is declared in a system header or from a
/// system macro.
static bool isFromSystemHeader(SourceManager &SM, const Decl *D) {
return SM.isInSystemHeader(D->getLocation()) ||
SM.isInSystemMacro(D->getLocation());
}
void Sema::warnOnReservedIdentifier(const NamedDecl *D) {
// Avoid warning twice on the same identifier, and don't warn on redeclaration
// of system decl.
@ -5710,9 +5717,10 @@ void Sema::warnOnReservedIdentifier(const NamedDecl *D) {
return;
ReservedIdentifierStatus Status = D->isReserved(getLangOpts());
if (Status != ReservedIdentifierStatus::NotReserved &&
!Context.getSourceManager().isInSystemHeader(D->getLocation()))
!isFromSystemHeader(Context.getSourceManager(), D)) {
Diag(D->getLocation(), diag::warn_reserved_extern_symbol)
<< D << static_cast<int>(Status);
}
}
Decl *Sema::ActOnDeclarator(Scope *S, Declarator &D) {
@ -14188,6 +14196,9 @@ ShouldWarnAboutMissingPrototype(const FunctionDecl *FD,
if (!FD->isGlobal())
return false;
if (!FD->isExternallyVisible())
return false;
// Don't warn about C++ member functions.
if (isa<CXXMethodDecl>(FD))
return false;

View File

@ -3393,7 +3393,8 @@ bool Sema::checkTargetAttr(SourceLocation LiteralLoc, StringRef AttrStr) {
if (ParsedAttrs.BranchProtection.empty())
return false;
if (!Context.getTargetInfo().validateBranchProtection(
ParsedAttrs.BranchProtection, BPI, DiagMsg)) {
ParsedAttrs.BranchProtection, ParsedAttrs.Architecture, BPI,
DiagMsg)) {
if (DiagMsg.empty())
return Diag(LiteralLoc, diag::warn_unsupported_target_attribute)
<< Unsupported << None << "branch-protection" << Target;

View File

@ -497,7 +497,7 @@ SourceRange Sema::getExprRange(Expr *E) const {
/// DefaultFunctionArrayConversion (C99 6.3.2.1p3, C99 6.3.2.1p4).
ExprResult Sema::DefaultFunctionArrayConversion(Expr *E, bool Diagnose) {
// Handle any placeholder expressions which made it here.
if (E->getType()->isPlaceholderType()) {
if (E->hasPlaceholderType()) {
ExprResult result = CheckPlaceholderExpr(E);
if (result.isInvalid()) return ExprError();
E = result.get();
@ -621,7 +621,7 @@ static void DiagnoseDirectIsaAccess(Sema &S, const ObjCIvarRefExpr *OIRE,
ExprResult Sema::DefaultLvalueConversion(Expr *E) {
// Handle any placeholder expressions which made it here.
if (E->getType()->isPlaceholderType()) {
if (E->hasPlaceholderType()) {
ExprResult result = CheckPlaceholderExpr(E);
if (result.isInvalid()) return ExprError();
E = result.get();
@ -4685,7 +4685,7 @@ ExprResult
Sema::ActOnArraySubscriptExpr(Scope *S, Expr *base, SourceLocation lbLoc,
Expr *idx, SourceLocation rbLoc) {
if (base && !base->getType().isNull() &&
base->getType()->isSpecificPlaceholderType(BuiltinType::OMPArraySection))
base->hasPlaceholderType(BuiltinType::OMPArraySection))
return ActOnOMPArraySectionExpr(base, lbLoc, idx, SourceLocation(),
SourceLocation(), /*Length*/ nullptr,
/*Stride=*/nullptr, rbLoc);
@ -4711,8 +4711,7 @@ Sema::ActOnArraySubscriptExpr(Scope *S, Expr *base, SourceLocation lbLoc,
};
// The matrix subscript operator ([][])is considered a single operator.
// Separating the index expressions by parenthesis is not allowed.
if (base->getType()->isSpecificPlaceholderType(
BuiltinType::IncompleteMatrixIdx) &&
if (base->hasPlaceholderType(BuiltinType::IncompleteMatrixIdx) &&
!isa<MatrixSubscriptExpr>(base)) {
Diag(base->getExprLoc(), diag::err_matrix_separate_incomplete_index)
<< SourceRange(base->getBeginLoc(), rbLoc);
@ -4944,9 +4943,8 @@ ExprResult Sema::ActOnOMPArraySectionExpr(Expr *Base, SourceLocation LBLoc,
SourceLocation ColonLocSecond,
Expr *Length, Expr *Stride,
SourceLocation RBLoc) {
if (Base->getType()->isPlaceholderType() &&
!Base->getType()->isSpecificPlaceholderType(
BuiltinType::OMPArraySection)) {
if (Base->hasPlaceholderType() &&
!Base->hasPlaceholderType(BuiltinType::OMPArraySection)) {
ExprResult Result = CheckPlaceholderExpr(Base);
if (Result.isInvalid())
return ExprError();
@ -5114,8 +5112,7 @@ ExprResult Sema::ActOnOMPArraySectionExpr(Expr *Base, SourceLocation LBLoc,
}
}
if (!Base->getType()->isSpecificPlaceholderType(
BuiltinType::OMPArraySection)) {
if (!Base->hasPlaceholderType(BuiltinType::OMPArraySection)) {
ExprResult Result = DefaultFunctionArrayLvalueConversion(Base);
if (Result.isInvalid())
return ExprError();
@ -5130,7 +5127,7 @@ ExprResult Sema::ActOnOMPArrayShapingExpr(Expr *Base, SourceLocation LParenLoc,
SourceLocation RParenLoc,
ArrayRef<Expr *> Dims,
ArrayRef<SourceRange> Brackets) {
if (Base->getType()->isPlaceholderType()) {
if (Base->hasPlaceholderType()) {
ExprResult Result = CheckPlaceholderExpr(Base);
if (Result.isInvalid())
return ExprError();
@ -5155,7 +5152,7 @@ ExprResult Sema::ActOnOMPArrayShapingExpr(Expr *Base, SourceLocation LParenLoc,
SmallVector<Expr *, 4> NewDims;
bool ErrorFound = false;
for (Expr *Dim : Dims) {
if (Dim->getType()->isPlaceholderType()) {
if (Dim->hasPlaceholderType()) {
ExprResult Result = CheckPlaceholderExpr(Dim);
if (Result.isInvalid()) {
ErrorFound = true;
@ -13653,7 +13650,7 @@ QualType Sema::CheckAddressOfOperand(ExprResult &OrigOp, SourceLocation OpLoc) {
if (OrigOp.get()->isTypeDependent())
return Context.DependentTy;
assert(!OrigOp.get()->getType()->isPlaceholderType());
assert(!OrigOp.get()->hasPlaceholderType());
// Make sure to ignore parentheses in subsequent checks
Expr *op = OrigOp.get()->IgnoreParens();

View File

@ -564,7 +564,7 @@ ExprResult Sema::BuildCXXTypeId(QualType TypeInfoType,
SourceLocation RParenLoc) {
bool WasEvaluated = false;
if (E && !E->isTypeDependent()) {
if (E->getType()->isPlaceholderType()) {
if (E->hasPlaceholderType()) {
ExprResult result = CheckPlaceholderExpr(E);
if (result.isInvalid()) return ExprError();
E = result.get();
@ -5704,7 +5704,7 @@ ExprResult Sema::BuildExpressionTrait(ExpressionTrait ET,
SourceLocation RParen) {
if (Queried->isTypeDependent()) {
// Delay type-checking for type-dependent expressions.
} else if (Queried->getType()->isPlaceholderType()) {
} else if (Queried->hasPlaceholderType()) {
ExprResult PE = CheckPlaceholderExpr(Queried);
if (PE.isInvalid()) return ExprError();
return BuildExpressionTrait(ET, KWLoc, PE.get(), RParen);
@ -5720,8 +5720,7 @@ QualType Sema::CheckPointerToMemberOperands(ExprResult &LHS, ExprResult &RHS,
ExprValueKind &VK,
SourceLocation Loc,
bool isIndirect) {
assert(!LHS.get()->getType()->isPlaceholderType() &&
!RHS.get()->getType()->isPlaceholderType() &&
assert(!LHS.get()->hasPlaceholderType() && !RHS.get()->hasPlaceholderType() &&
"placeholders should have been weeded out by now");
// The LHS undergoes lvalue conversions if this is ->*, and undergoes the

View File

@ -5327,6 +5327,8 @@ static CapturedStmt *buildDistanceFunc(Sema &Actions, QualType LogicalTy,
IntegerLiteral *Zero = IntegerLiteral::Create(
Ctx, llvm::APInt(Ctx.getIntWidth(LogicalTy), 0), LogicalTy, {});
IntegerLiteral *One = IntegerLiteral::Create(
Ctx, llvm::APInt(Ctx.getIntWidth(LogicalTy), 1), LogicalTy, {});
Expr *Dist;
if (Rel == BO_NE) {
// When using a != comparison, the increment can be +1 or -1. This can be
@ -5381,18 +5383,25 @@ static CapturedStmt *buildDistanceFunc(Sema &Actions, QualType LogicalTy,
if (Rel == BO_LE || Rel == BO_GE) {
// Add one to the range if the relational operator is inclusive.
Range = AssertSuccess(Actions.BuildBinOp(
nullptr, {}, BO_Add, Range,
Actions.ActOnIntegerConstant(SourceLocation(), 1).get()));
Range =
AssertSuccess(Actions.BuildBinOp(nullptr, {}, BO_Add, Range, One));
}
// Divide by the absolute step amount.
// Divide by the absolute step amount. If the range is not a multiple of
// the step size, rounding-up the effective upper bound ensures that the
// last iteration is included.
// Note that the rounding-up may cause an overflow in a temporry that
// could be avoided, but would have occured in a C-style for-loop as well.
Expr *Divisor = BuildVarRef(NewStep);
if (Rel == BO_GE || Rel == BO_GT)
Divisor =
AssertSuccess(Actions.BuildUnaryOp(nullptr, {}, UO_Minus, Divisor));
Expr *DivisorMinusOne =
AssertSuccess(Actions.BuildBinOp(nullptr, {}, BO_Sub, Divisor, One));
Expr *RangeRoundUp = AssertSuccess(
Actions.BuildBinOp(nullptr, {}, BO_Add, Range, DivisorMinusOne));
Dist = AssertSuccess(
Actions.BuildBinOp(nullptr, {}, BO_Div, Range, Divisor));
Actions.BuildBinOp(nullptr, {}, BO_Div, RangeRoundUp, Divisor));
// If there is not at least one iteration, the range contains garbage. Fix
// to zero in this case.

View File

@ -14320,7 +14320,8 @@ ExprResult Sema::BuildCallToMemberFunction(Scope *S, Expr *MemExprE,
FoundDecl = MemExpr->getFoundDecl();
Qualifier = MemExpr->getQualifier();
UnbridgedCasts.restore();
} else if (auto *UnresExpr = dyn_cast<UnresolvedMemberExpr>(NakedMemExpr)) {
} else {
UnresolvedMemberExpr *UnresExpr = cast<UnresolvedMemberExpr>(NakedMemExpr);
Qualifier = UnresExpr->getQualifier();
QualType ObjectType = UnresExpr->getBaseType();
@ -14433,9 +14434,7 @@ ExprResult Sema::BuildCallToMemberFunction(Scope *S, Expr *MemExprE,
}
MemExpr = cast<MemberExpr>(MemExprE->IgnoreParens());
} else
// Unimaged NakedMemExpr type.
return ExprError();
}
QualType ResultType = Method->getReturnType();
ExprValueKind VK = Expr::getValueKindForType(ResultType);

View File

@ -1943,6 +1943,9 @@ TemplateInstantiator::TransformExprRequirement(concepts::ExprRequirement *Req) {
if (ExprInst.isInvalid())
return nullptr;
ExprResult TransExprRes = TransformExpr(E);
if (!TransExprRes.isInvalid() && !Trap.hasErrorOccurred() &&
TransExprRes.get()->hasPlaceholderType())
TransExprRes = SemaRef.CheckPlaceholderExpr(TransExprRes.get());
if (TransExprRes.isInvalid() || Trap.hasErrorOccurred())
TransExpr = createSubstDiag(SemaRef, Info, [&](llvm::raw_ostream &OS) {
E->printPretty(OS, nullptr, SemaRef.getPrintingPolicy());

View File

@ -2257,7 +2257,7 @@ QualType Sema::BuildBitIntType(bool IsUnsigned, Expr *BitWidth,
if (ICE.isInvalid())
return QualType();
int64_t NumBits = Bits.getSExtValue();
size_t NumBits = Bits.getZExtValue();
if (!IsUnsigned && NumBits < 2) {
Diag(Loc, diag::err_bit_int_bad_size) << 0;
return QualType();
@ -2268,9 +2268,10 @@ QualType Sema::BuildBitIntType(bool IsUnsigned, Expr *BitWidth,
return QualType();
}
if (NumBits > llvm::IntegerType::MAX_INT_BITS) {
const TargetInfo &TI = getASTContext().getTargetInfo();
if (NumBits > TI.getMaxBitIntWidth()) {
Diag(Loc, diag::err_bit_int_max_size)
<< IsUnsigned << llvm::IntegerType::MAX_INT_BITS;
<< IsUnsigned << static_cast<uint64_t>(TI.getMaxBitIntWidth());
return QualType();
}

View File

@ -12494,6 +12494,8 @@ TreeTransform<Derived>::TransformExprRequirement(concepts::ExprRequirement *Req)
TransExpr = Req->getExprSubstitutionDiagnostic();
else {
ExprResult TransExprRes = getDerived().TransformExpr(Req->getExpr());
if (TransExprRes.isUsable() && TransExprRes.get()->hasPlaceholderType())
TransExprRes = SemaRef.CheckPlaceholderExpr(TransExprRes.get());
if (TransExprRes.isInvalid())
return nullptr;
TransExpr = TransExprRes.get();

View File

@ -660,6 +660,8 @@ serializeValueProfDataFrom(ValueProfRecordClosure *Closure,
* generated profile, and 0 if this is a Clang FE generated profile.
* 1 in bit 57 indicates there are context-sensitive records in the profile.
* The 59th bit indicates whether to use debug info to correlate profiles.
* The 60th bit indicates single byte coverage instrumentation.
* The 61st bit indicates function entry instrumentation only.
*/
#define VARIANT_MASKS_ALL 0xff00000000000000ULL
#define GET_VERSION(V) ((V) & ~VARIANT_MASKS_ALL)
@ -667,6 +669,8 @@ serializeValueProfDataFrom(ValueProfRecordClosure *Closure,
#define VARIANT_MASK_CSIR_PROF (0x1ULL << 57)
#define VARIANT_MASK_INSTR_ENTRY (0x1ULL << 58)
#define VARIANT_MASK_DBG_CORRELATE (0x1ULL << 59)
#define VARIANT_MASK_BYTE_COVERAGE (0x1ULL << 60)
#define VARIANT_MASK_FUNCTION_ENTRY_ONLY (0x1ULL << 61)
#define INSTR_PROF_RAW_VERSION_VAR __llvm_profile_raw_version
#define INSTR_PROF_PROFILE_RUNTIME_VAR __llvm_profile_runtime
#define INSTR_PROF_PROFILE_COUNTER_BIAS_VAR __llvm_profile_counter_bias

View File

@ -20,11 +20,10 @@
*
\*===----------------------------------------------------------------------===*/
#ifdef _MSC_VER
#define PACKED(__decl__) __pragma(pack(push,1)) __decl__ __pragma(pack(pop))
#define PACKED(...) __pragma(pack(push,1)) __VA_ARGS__ __pragma(pack(pop))
#else
#define PACKED(__decl__) __decl__ __attribute__((__packed__))
#define PACKED(...) __VA_ARGS__ __attribute__((__packed__))
#endif
// A 64-bit magic number to uniquely identify the raw binary memprof profile file.
@ -47,14 +46,106 @@ PACKED(struct Header {
uint64_t StackOffset;
});
// A struct describing the information necessary to describe a /proc/maps
// segment entry for a particular binary/library identified by its build id.
PACKED(struct SegmentEntry {
uint64_t Start;
uint64_t End;
uint64_t Offset;
uint8_t BuildId[32];
// This field is unused until sanitizer procmaps support for build ids for
// Linux-Elf is implemented.
uint8_t BuildId[32] = {0};
SegmentEntry(uint64_t S, uint64_t E, uint64_t O) :
Start(S), End(E), Offset(O) {}
SegmentEntry(const SegmentEntry& S) {
Start = S.Start;
End = S.End;
Offset = S.Offset;
}
SegmentEntry& operator=(const SegmentEntry& S) {
Start = S.Start;
End = S.End;
Offset = S.Offset;
return *this;
}
bool operator==(const SegmentEntry& S) const {
return Start == S.Start &&
End == S.End &&
Offset == S.Offset;
}
});
// A struct representing the heap allocation characteristics of a particular
// runtime context. This struct is shared between the compiler-rt runtime and
// the raw profile reader. The indexed format uses a separate, self-describing
// backwards compatible format.
PACKED(struct MemInfoBlock {
uint32_t alloc_count;
uint64_t total_access_count, min_access_count, max_access_count;
uint64_t total_size;
uint32_t min_size, max_size;
uint32_t alloc_timestamp, dealloc_timestamp;
uint64_t total_lifetime;
uint32_t min_lifetime, max_lifetime;
uint32_t alloc_cpu_id, dealloc_cpu_id;
uint32_t num_migrated_cpu;
// Only compared to prior deallocated object currently.
uint32_t num_lifetime_overlaps;
uint32_t num_same_alloc_cpu;
uint32_t num_same_dealloc_cpu;
uint64_t data_type_id; // TODO: hash of type name
MemInfoBlock() : alloc_count(0) {}
MemInfoBlock(uint32_t size, uint64_t access_count, uint32_t alloc_timestamp,
uint32_t dealloc_timestamp, uint32_t alloc_cpu, uint32_t dealloc_cpu)
: alloc_count(1), total_access_count(access_count),
min_access_count(access_count), max_access_count(access_count),
total_size(size), min_size(size), max_size(size),
alloc_timestamp(alloc_timestamp), dealloc_timestamp(dealloc_timestamp),
total_lifetime(dealloc_timestamp - alloc_timestamp),
min_lifetime(total_lifetime), max_lifetime(total_lifetime),
alloc_cpu_id(alloc_cpu), dealloc_cpu_id(dealloc_cpu),
num_lifetime_overlaps(0), num_same_alloc_cpu(0),
num_same_dealloc_cpu(0) {
num_migrated_cpu = alloc_cpu_id != dealloc_cpu_id;
}
void Merge(const MemInfoBlock &newMIB) {
alloc_count += newMIB.alloc_count;
total_access_count += newMIB.total_access_count;
min_access_count = newMIB.min_access_count < min_access_count ? newMIB.min_access_count : min_access_count;
max_access_count = newMIB.max_access_count < max_access_count ? newMIB.max_access_count : max_access_count;
total_size += newMIB.total_size;
min_size = newMIB.min_size < min_size ? newMIB.min_size : min_size;
max_size = newMIB.max_size < max_size ? newMIB.max_size : max_size;
total_lifetime += newMIB.total_lifetime;
min_lifetime = newMIB.min_lifetime < min_lifetime ? newMIB.min_lifetime : min_lifetime;
max_lifetime = newMIB.max_lifetime > max_lifetime ? newMIB.max_lifetime : max_lifetime;
// We know newMIB was deallocated later, so just need to check if it was
// allocated before last one deallocated.
num_lifetime_overlaps += newMIB.alloc_timestamp < dealloc_timestamp;
alloc_timestamp = newMIB.alloc_timestamp;
dealloc_timestamp = newMIB.dealloc_timestamp;
num_same_alloc_cpu += alloc_cpu_id == newMIB.alloc_cpu_id;
num_same_dealloc_cpu += dealloc_cpu_id == newMIB.dealloc_cpu_id;
alloc_cpu_id = newMIB.alloc_cpu_id;
dealloc_cpu_id = newMIB.dealloc_cpu_id;
}
});
} // namespace memprof
} // namespace llvm

View File

@ -211,6 +211,15 @@ void __sanitizer_symbolize_pc(void *pc, const char *fmt, char *out_buf,
// Same as __sanitizer_symbolize_pc, but for data section (i.e. globals).
void __sanitizer_symbolize_global(void *data_ptr, const char *fmt,
char *out_buf, size_t out_buf_size);
// Determine the return address.
#if !defined(_MSC_VER) || defined(__clang__)
#define __sanitizer_return_address() \
__builtin_extract_return_addr(__builtin_return_address(0))
#else
extern "C" void *_ReturnAddress(void);
#pragma intrinsic(_ReturnAddress)
#define __sanitizer_return_address() _ReturnAddress()
#endif
/// Sets the callback to be called immediately before death on error.
///

View File

@ -17,7 +17,7 @@
#include "int_lib.h"
COMPILER_RT_ABI fp_t __floatsisf(int a) {
COMPILER_RT_ABI fp_t __floatsisf(si_int a) {
const int aWidth = sizeof a * CHAR_BIT;
@ -33,7 +33,7 @@ COMPILER_RT_ABI fp_t __floatsisf(int a) {
}
// Exponent of (fp_t)a is the width of abs(a).
const int exponent = (aWidth - 1) - __builtin_clz(a);
const int exponent = (aWidth - 1) - clzsi(a);
rep_t result;
// Shift a into the significand field, rounding if it is a right-shift

View File

@ -16,7 +16,7 @@
#include "fp_lib.h"
#if defined(CRT_HAS_128BIT) && defined(CRT_LDBL_128BIT)
COMPILER_RT_ABI fp_t __floatsitf(int a) {
COMPILER_RT_ABI fp_t __floatsitf(si_int a) {
const int aWidth = sizeof a * CHAR_BIT;
@ -26,14 +26,14 @@ COMPILER_RT_ABI fp_t __floatsitf(int a) {
// All other cases begin by extracting the sign and absolute value of a
rep_t sign = 0;
unsigned aAbs = (unsigned)a;
su_int aAbs = (su_int)a;
if (a < 0) {
sign = signBit;
aAbs = ~(unsigned)a + 1U;
aAbs = ~(su_int)a + (su_int)1U;
}
// Exponent of (fp_t)a is the width of abs(a).
const int exponent = (aWidth - 1) - __builtin_clz(aAbs);
const int exponent = (aWidth - 1) - clzsi(aAbs);
rep_t result;
// Shift a into the significand field and clear the implicit bit.

View File

@ -17,7 +17,7 @@
#include "int_lib.h"
COMPILER_RT_ABI fp_t __floatunsisf(unsigned int a) {
COMPILER_RT_ABI fp_t __floatunsisf(su_int a) {
const int aWidth = sizeof a * CHAR_BIT;
@ -26,7 +26,7 @@ COMPILER_RT_ABI fp_t __floatunsisf(unsigned int a) {
return fromRep(0);
// Exponent of (fp_t)a is the width of abs(a).
const int exponent = (aWidth - 1) - __builtin_clz(a);
const int exponent = (aWidth - 1) - clzsi(a);
rep_t result;
// Shift a into the significand field, rounding if it is a right-shift

View File

@ -16,7 +16,7 @@
#include "fp_lib.h"
#if defined(CRT_HAS_128BIT) && defined(CRT_LDBL_128BIT)
COMPILER_RT_ABI fp_t __floatunsitf(unsigned int a) {
COMPILER_RT_ABI fp_t __floatunsitf(su_int a) {
const int aWidth = sizeof a * CHAR_BIT;
@ -25,7 +25,7 @@ COMPILER_RT_ABI fp_t __floatunsitf(unsigned int a) {
return fromRep(0);
// Exponent of (fp_t)a is the width of abs(a).
const int exponent = (aWidth - 1) - __builtin_clz(a);
const int exponent = (aWidth - 1) - clzsi(a);
rep_t result;
// Shift a into the significand field and clear the implicit bit.

View File

@ -33,9 +33,9 @@ static __inline int src_rep_t_clz(src_rep_t a) {
return __builtin_clzl(a);
#else
if (a & REP_C(0xffffffff00000000))
return __builtin_clz(a >> 32);
return clzsi(a >> 32);
else
return 32 + __builtin_clz(a & REP_C(0xffffffff));
return 32 + clzsi(a & REP_C(0xffffffff));
#endif
}

View File

@ -82,7 +82,7 @@ COMPILER_RT_ABI du_int __udivmoddi4(du_int a, du_int b, du_int *rem) {
r.s.high = n.s.high & (d.s.high - 1);
*rem = r.all;
}
return n.s.high >> __builtin_ctz(d.s.high);
return n.s.high >> ctzsi(d.s.high);
}
// K K
// ---
@ -112,7 +112,7 @@ COMPILER_RT_ABI du_int __udivmoddi4(du_int a, du_int b, du_int *rem) {
*rem = n.s.low & (d.s.low - 1);
if (d.s.low == 1)
return n.all;
sr = __builtin_ctz(d.s.low);
sr = ctzsi(d.s.low);
q.s.high = n.s.high >> sr;
q.s.low = (n.s.high << (n_uword_bits - sr)) | (n.s.low >> sr);
return q.all;

Some files were not shown because too many files have changed in this diff Show More