Vendor import of clang trunk r301441:

https://llvm.org/svn/llvm-project/cfe/trunk@301441
This commit is contained in:
Dimitry Andric 2017-04-26 19:24:09 +00:00
parent 583e75cce4
commit f0c55418e2
160 changed files with 7053 additions and 768 deletions

View File

@ -1,7 +1,7 @@
# This file sets up a CMakeCache for the second stage of a Fuchsia toolchain
# build.
set(LLVM_TARGETS_TO_BUILD X86;AArch64 CACHE STRING "")
set(LLVM_TARGETS_TO_BUILD X86;ARM;AArch64 CACHE STRING "")
set(PACKAGE_VENDOR Fuchsia CACHE STRING "")
@ -36,6 +36,7 @@ set(BUILTINS_aarch64-fuchsia-none_CMAKE_SYSTEM_NAME Fuchsia CACHE STRING "")
# Setup toolchain.
set(LLVM_INSTALL_TOOLCHAIN_ONLY ON CACHE BOOL "")
set(LLVM_TOOLCHAIN_TOOLS
llc
llvm-ar
llvm-cov
llvm-cxxfilt
@ -49,6 +50,7 @@ set(LLVM_TOOLCHAIN_TOOLS
llvm-readobj
llvm-size
llvm-symbolizer
opt
CACHE STRING "")
set(LLVM_DISTRIBUTION_COMPONENTS

View File

@ -162,8 +162,9 @@ Download the latest Visual Studio extension from the `alpha build site
Script for patch reformatting
=============================
The python script `clang/tools/clang-format-diff.py` parses the output of
a unified diff and reformats all contained lines with :program:`clang-format`.
The python script `clang/tools/clang-format/clang-format-diff.py` parses the
output of a unified diff and reformats all contained lines with
:program:`clang-format`.
.. code-block:: console

View File

@ -5600,7 +5600,8 @@ typedef enum {
CXIdxEntityLang_None = 0,
CXIdxEntityLang_C = 1,
CXIdxEntityLang_ObjC = 2,
CXIdxEntityLang_CXX = 3
CXIdxEntityLang_CXX = 3,
CXIdxEntityLang_Swift = 4
} CXIdxEntityLanguage;
/**

View File

@ -208,6 +208,10 @@ public:
/// \returns \c true if declaration that this comment is attached to declares
/// a function pointer.
bool isFunctionPointerVarDecl();
/// \returns \c true if the declaration that this comment is attached to
/// declares a variable or a field whose type is a function or a block
/// pointer.
bool isFunctionOrBlockPointerVarLikeDecl();
bool isFunctionOrMethodVariadic();
bool isObjCMethodDecl();
bool isObjCPropertyDecl();

View File

@ -1406,7 +1406,7 @@ const internal::VariadicDynCastAllOfMatcher<
///
/// Example: Given
/// \code
/// struct T {void func()};
/// struct T {void func();};
/// T f();
/// void g(T);
/// \endcode

View File

@ -12,6 +12,8 @@
//
//===----------------------------------------------------------------------===//
// See the Internals Manual, section The Diagnostics Subsystem for an overview.
// Define the diagnostic severities.
class Severity<string N> {
string Name = N;
@ -100,10 +102,20 @@ class SuppressInSystemHeader {
class Error<string str> : Diagnostic<str, CLASS_ERROR, SEV_Error>, SFINAEFailure {
bit ShowInSystemHeader = 1;
}
// Warnings default to on (but can be default-off'd with DefaultIgnore).
// This is used for warnings about questionable code; warnings about
// accepted language extensions should use Extension or ExtWarn below instead.
class Warning<string str> : Diagnostic<str, CLASS_WARNING, SEV_Warning>;
// Remarks can be turned on with -R flags and provide commentary, e.g. on
// optimizer decisions.
class Remark<string str> : Diagnostic<str, CLASS_REMARK, SEV_Ignored>;
// Extensions are warnings about accepted language extensions.
// Extension warnings are default-off but enabled by -pedantic.
class Extension<string str> : Diagnostic<str, CLASS_EXTENSION, SEV_Ignored>;
// ExtWarns are warnings about accepted language extensions.
// ExtWarn warnings are default-on.
class ExtWarn<string str> : Diagnostic<str, CLASS_EXTENSION, SEV_Warning>;
// Notes can provide supplementary information on errors, warnings, and remarks.
class Note<string str> : Diagnostic<str, CLASS_NOTE, SEV_Fatal/*ignored*/>;

View File

@ -1114,14 +1114,12 @@ def err_pragma_cannot_end_force_cuda_host_device : Error<
} // end of Parse Issue category.
let CategoryName = "Modules Issue" in {
def err_expected_module_interface_decl : Error<
"expected module declaration at start of module interface">;
def err_unexpected_module_decl : Error<
"module declaration must be the first declaration in the translation unit">;
"module declaration can only appear at the top level">;
def err_module_expected_ident : Error<
"expected a module name after module%select{| import}0">;
def err_unexpected_module_kind : Error<
"unexpected module kind %0; expected 'implementation' or 'partition'">;
"expected a module name after '%select{module|import}0'">;
def err_module_implementation_partition : Error<
"module partition must be declared 'export'">;
def err_attribute_not_module_attr : Error<
"%0 attribute cannot be applied to a module">;
def err_attribute_not_import_attr : Error<

View File

@ -8801,9 +8801,11 @@ def err_invalid_type_for_program_scope_var : Error<
}
let CategoryName = "Modules Issue" in {
def err_module_decl_in_module_map_module : Error<
"'module' declaration found while building module from module map">;
def err_module_interface_implementation_mismatch : Error<
"%select{'module'|'module partition'|'module implementation'}0 declaration "
"found while %select{not |not |}0building module interface">;
"missing 'export' specifier in module declaration while "
"building module interface">;
def err_current_module_name_mismatch : Error<
"module name '%0' specified on command line does not match name of module">;
def err_module_redefinition : Error<
@ -8846,8 +8848,13 @@ def err_module_self_import : Error<
"import of module '%0' appears within same top-level module '%1'">;
def err_module_import_in_implementation : Error<
"@import of module '%0' in implementation of '%1'; use #import">;
// C++ Modules TS
def err_export_within_export : Error<
"export declaration appears within another export declaration">;
def err_export_not_in_module_interface : Error<
"export declaration can only be used within a module interface unit after "
"the module declaration">;
def ext_equivalent_internal_linkage_decl_in_modules : ExtWarn<
"ambiguous use of internal linkage declaration %0 defined in multiple modules">,

View File

@ -62,6 +62,18 @@ public:
/// \brief The location of the module definition.
SourceLocation DefinitionLoc;
enum ModuleKind {
/// \brief This is a module that was defined by a module map and built out
/// of header files.
ModuleMapModule,
/// \brief This is a C++ Modules TS module interface unit.
ModuleInterfaceUnit
};
/// \brief The kind of this module.
ModuleKind Kind = ModuleMapModule;
/// \brief The parent of this module. This will be NULL for the top-level
/// module.
Module *Parent;

View File

@ -47,6 +47,9 @@ SANITIZER("kernel-address", KernelAddress)
// MemorySanitizer
SANITIZER("memory", Memory)
// libFuzzer
SANITIZER("fuzzer", Fuzzer)
// ThreadSanitizer
SANITIZER("thread", Thread)

View File

@ -573,6 +573,8 @@ def fblocks_runtime_optional : Flag<["-"], "fblocks-runtime-optional">,
HelpText<"Weakly link in the blocks runtime">;
def fexternc_nounwind : Flag<["-"], "fexternc-nounwind">,
HelpText<"Assume all functions with C linkage do not unwind">;
def enable_split_dwarf : Flag<["-"], "enable-split-dwarf">,
HelpText<"Use split dwarf/Fission">;
def split_dwarf_file : Separate<["-"], "split-dwarf-file">,
HelpText<"File name to use for split dwarf debug info output">;
def fno_wchar : Flag<["-"], "fno-wchar">,

View File

@ -50,6 +50,7 @@ class SanitizerArgs {
bool needsSharedAsanRt() const { return AsanSharedRuntime; }
bool needsTsanRt() const { return Sanitizers.has(SanitizerKind::Thread); }
bool needsMsanRt() const { return Sanitizers.has(SanitizerKind::Memory); }
bool needsFuzzer() const { return Sanitizers.has(SanitizerKind::Fuzzer); }
bool needsLsanRt() const {
return Sanitizers.has(SanitizerKind::Leak) &&
!Sanitizers.has(SanitizerKind::Address);

View File

@ -1512,6 +1512,18 @@ llvm::Expected<tooling::Replacements>
cleanupAroundReplacements(StringRef Code, const tooling::Replacements &Replaces,
const FormatStyle &Style);
/// \brief Represents the status of a formatting attempt.
struct FormattingAttemptStatus {
/// \brief A value of ``false`` means that any of the affected ranges were not
/// formatted due to a non-recoverable syntax error.
bool FormatComplete = true;
/// \brief If ``FormatComplete`` is false, ``Line`` records a one-based
/// original line number at which a syntax error might have occurred. This is
/// based on a best-effort analysis and could be imprecise.
unsigned Line = 0;
};
/// \brief Reformats the given \p Ranges in \p Code.
///
/// Each range is extended on either end to its next bigger logic unit, i.e.
@ -1521,13 +1533,20 @@ cleanupAroundReplacements(StringRef Code, const tooling::Replacements &Replaces,
/// Returns the ``Replacements`` necessary to make all \p Ranges comply with
/// \p Style.
///
/// If ``IncompleteFormat`` is non-null, its value will be set to true if any
/// of the affected ranges were not formatted due to a non-recoverable syntax
/// error.
/// If ``Status`` is non-null, its value will be populated with the status of
/// this formatting attempt. See \c FormattingAttemptStatus.
tooling::Replacements reformat(const FormatStyle &Style, StringRef Code,
ArrayRef<tooling::Range> Ranges,
StringRef FileName = "<stdin>",
bool *IncompleteFormat = nullptr);
FormattingAttemptStatus *Status = nullptr);
/// \brief Same as above, except if ``IncompleteFormat`` is non-null, its value
/// will be set to true if any of the affected ranges were not formatted due to
/// a non-recoverable syntax error.
tooling::Replacements reformat(const FormatStyle &Style, StringRef Code,
ArrayRef<tooling::Range> Ranges,
StringRef FileName,
bool *IncompleteFormat);
/// \brief Clean up any erroneous/redundant code in the given \p Ranges in \p
/// Code.

View File

@ -199,6 +199,7 @@ CODEGENOPT(DebugTypeExtRefs, 1, 0) ///< Whether or not debug info should contain
CODEGENOPT(DebugExplicitImport, 1, 0) ///< Whether or not debug info should
///< contain explicit imports for
///< anonymous namespaces
CODEGENOPT(EnableSplitDwarf, 1, 0) ///< Whether to enable split DWARF
CODEGENOPT(SplitDwarfInlining, 1, 1) ///< Whether to include inlining info in the
///< skeleton CU to allow for symbolication
///< of inline stack frames without .dwo files.

View File

@ -59,6 +59,7 @@ enum class SymbolLanguage {
C,
ObjC,
CXX,
Swift,
};
/// Language specific sub-kinds.

View File

@ -30,10 +30,14 @@ static inline StringRef getUSRSpacePrefix() {
bool generateUSRForDecl(const Decl *D, SmallVectorImpl<char> &Buf);
/// \brief Generate a USR fragment for an Objective-C class.
void generateUSRForObjCClass(StringRef Cls, raw_ostream &OS);
void generateUSRForObjCClass(StringRef Cls, raw_ostream &OS,
StringRef ExtSymbolDefinedIn = "",
StringRef CategoryContextExtSymbolDefinedIn = "");
/// \brief Generate a USR fragment for an Objective-C class category.
void generateUSRForObjCCategory(StringRef Cls, StringRef Cat, raw_ostream &OS);
void generateUSRForObjCCategory(StringRef Cls, StringRef Cat, raw_ostream &OS,
StringRef ClsExtSymbolDefinedIn = "",
StringRef CatExtSymbolDefinedIn = "");
/// \brief Generate a USR fragment for an Objective-C instance variable. The
/// complete USR can be created by concatenating the USR for the
@ -48,7 +52,15 @@ void generateUSRForObjCMethod(StringRef Sel, bool IsInstanceMethod,
void generateUSRForObjCProperty(StringRef Prop, bool isClassProp, raw_ostream &OS);
/// \brief Generate a USR fragment for an Objective-C protocol.
void generateUSRForObjCProtocol(StringRef Prot, raw_ostream &OS);
void generateUSRForObjCProtocol(StringRef Prot, raw_ostream &OS,
StringRef ExtSymbolDefinedIn = "");
/// Generate USR fragment for a global (non-nested) enum.
void generateUSRForGlobalEnum(StringRef EnumName, raw_ostream &OS,
StringRef ExtSymbolDefinedIn = "");
/// Generate a USR fragment for an enum constant.
void generateUSRForEnumConstant(StringRef EnumConstantName, raw_ostream &OS);
/// \brief Generate a USR for a macro, including the USR prefix.
///

View File

@ -1934,7 +1934,8 @@ public:
/// The parser has processed a module-declaration that begins the definition
/// of a module interface or implementation.
DeclGroupPtrTy ActOnModuleDecl(SourceLocation ModuleLoc, ModuleDeclKind MDK,
DeclGroupPtrTy ActOnModuleDecl(SourceLocation StartLoc,
SourceLocation ModuleLoc, ModuleDeclKind MDK,
ModuleIdPath Path);
/// \brief The parser has processed a module import declaration.
@ -8326,6 +8327,12 @@ private:
/// Returns OpenMP nesting level for current directive.
unsigned getOpenMPNestingLevel() const;
/// Push new OpenMP function region for non-capturing function.
void pushOpenMPFunctionRegion();
/// Pop OpenMP function region for non-capturing function.
void popOpenMPFunctionRegion(const sema::FunctionScopeInfo *OldFSI);
/// Checks if a type or a declaration is disabled due to the owning extension
/// being disabled, and emits diagnostic messages if it is disabled.
/// \param D type or declaration to be checked.

View File

@ -116,6 +116,9 @@ bool ParagraphComment::isWhitespaceNoCache() const {
static TypeLoc lookThroughTypedefOrTypeAliasLocs(TypeLoc &SrcTL) {
TypeLoc TL = SrcTL.IgnoreParens();
// Look through attribute types.
if (AttributedTypeLoc AttributeTL = TL.getAs<AttributedTypeLoc>())
return AttributeTL.getModifiedLoc();
// Look through qualified types.
if (QualifiedTypeLoc QualifiedTL = TL.getAs<QualifiedTypeLoc>())
return QualifiedTL.getUnqualifiedLoc();
@ -280,8 +283,25 @@ void DeclInfo::fill() {
case Decl::EnumConstant:
case Decl::ObjCIvar:
case Decl::ObjCAtDefsField:
case Decl::ObjCProperty: {
const TypeSourceInfo *TSI;
if (const auto *VD = dyn_cast<DeclaratorDecl>(CommentDecl))
TSI = VD->getTypeSourceInfo();
else if (const auto *PD = dyn_cast<ObjCPropertyDecl>(CommentDecl))
TSI = PD->getTypeSourceInfo();
else
TSI = nullptr;
if (TSI) {
TypeLoc TL = TSI->getTypeLoc().getUnqualifiedLoc();
FunctionTypeLoc FTL;
if (getFunctionTypeLoc(TL, FTL)) {
ParamVars = FTL.getParams();
ReturnType = FTL.getReturnLoc().getType();
}
}
Kind = VariableKind;
break;
}
case Decl::Namespace:
Kind = NamespaceKind;
break;

View File

@ -86,7 +86,7 @@ ParamCommandComment *Sema::actOnParamCommandStart(
new (Allocator) ParamCommandComment(LocBegin, LocEnd, CommandID,
CommandMarker);
if (!isFunctionDecl())
if (!isFunctionDecl() && !isFunctionOrBlockPointerVarLikeDecl())
Diag(Command->getLocation(),
diag::warn_doc_param_not_attached_to_a_function_decl)
<< CommandMarker
@ -584,7 +584,11 @@ void Sema::checkReturnsCommand(const BlockCommandComment *Command) {
assert(ThisDeclInfo && "should not call this check on a bare comment");
if (isFunctionDecl()) {
// We allow the return command for all @properties because it can be used
// to document the value that the property getter returns.
if (isObjCPropertyDecl())
return;
if (isFunctionDecl() || isFunctionOrBlockPointerVarLikeDecl()) {
if (ThisDeclInfo->ReturnType->isVoidType()) {
unsigned DiagKind;
switch (ThisDeclInfo->CommentDecl->getKind()) {
@ -610,8 +614,6 @@ void Sema::checkReturnsCommand(const BlockCommandComment *Command) {
}
return;
}
else if (isObjCPropertyDecl())
return;
Diag(Command->getLocation(),
diag::warn_doc_returns_not_attached_to_a_function_decl)
@ -844,6 +846,30 @@ bool Sema::isFunctionPointerVarDecl() {
return false;
}
bool Sema::isFunctionOrBlockPointerVarLikeDecl() {
if (!ThisDeclInfo)
return false;
if (!ThisDeclInfo->IsFilled)
inspectThisDecl();
if (ThisDeclInfo->getKind() != DeclInfo::VariableKind ||
!ThisDeclInfo->CurrentDecl)
return false;
QualType QT;
if (const auto *VD = dyn_cast<DeclaratorDecl>(ThisDeclInfo->CurrentDecl))
QT = VD->getType();
else if (const auto *PD =
dyn_cast<ObjCPropertyDecl>(ThisDeclInfo->CurrentDecl))
QT = PD->getType();
else
return false;
// We would like to warn about the 'returns'/'param' commands for
// variables that don't directly specify the function type, so type aliases
// can be ignored.
if (QT->getAs<TypedefType>())
return false;
return QT->isFunctionPointerType() || QT->isBlockPointerType();
}
bool Sema::isObjCPropertyDecl() {
if (!ThisDeclInfo)
return false;

View File

@ -2251,6 +2251,14 @@ bool VarDecl::checkInitIsICE() const {
return Eval->IsICE;
}
template<typename DeclT>
static DeclT *getDefinitionOrSelf(DeclT *D) {
assert(D);
if (auto *Def = D->getDefinition())
return Def;
return D;
}
VarDecl *VarDecl::getTemplateInstantiationPattern() const {
// If it's a variable template specialization, find the template or partial
// specialization from which it was instantiated.
@ -2262,7 +2270,7 @@ VarDecl *VarDecl::getTemplateInstantiationPattern() const {
break;
VTD = NewVTD;
}
return VTD->getTemplatedDecl()->getDefinition();
return getDefinitionOrSelf(VTD->getTemplatedDecl());
}
if (auto *VTPSD =
From.dyn_cast<VarTemplatePartialSpecializationDecl *>()) {
@ -2271,7 +2279,7 @@ VarDecl *VarDecl::getTemplateInstantiationPattern() const {
break;
VTPSD = NewVTPSD;
}
return VTPSD->getDefinition();
return getDefinitionOrSelf<VarDecl>(VTPSD);
}
}
@ -2280,23 +2288,18 @@ VarDecl *VarDecl::getTemplateInstantiationPattern() const {
VarDecl *VD = getInstantiatedFromStaticDataMember();
while (auto *NewVD = VD->getInstantiatedFromStaticDataMember())
VD = NewVD;
return VD->getDefinition();
return getDefinitionOrSelf(VD);
}
}
if (VarTemplateDecl *VarTemplate = getDescribedVarTemplate()) {
while (VarTemplate->getInstantiatedFromMemberTemplate()) {
if (VarTemplate->isMemberSpecialization())
break;
VarTemplate = VarTemplate->getInstantiatedFromMemberTemplate();
}
assert((!VarTemplate->getTemplatedDecl() ||
!isTemplateInstantiation(getTemplateSpecializationKind())) &&
"couldn't find pattern for variable instantiation");
return VarTemplate->getTemplatedDecl();
return getDefinitionOrSelf(VarTemplate->getTemplatedDecl());
}
return nullptr;
}
@ -3198,8 +3201,11 @@ bool FunctionDecl::isTemplateInstantiation() const {
FunctionDecl *FunctionDecl::getTemplateInstantiationPattern() const {
// Handle class scope explicit specialization special case.
if (getTemplateSpecializationKind() == TSK_ExplicitSpecialization)
return getClassScopeSpecializationPattern();
if (getTemplateSpecializationKind() == TSK_ExplicitSpecialization) {
if (auto *Spec = getClassScopeSpecializationPattern())
return getDefinitionOrSelf(Spec);
return nullptr;
}
// If this is a generic lambda call operator specialization, its
// instantiation pattern is always its primary template's pattern
@ -3212,14 +3218,8 @@ FunctionDecl *FunctionDecl::getTemplateInstantiationPattern() const {
if (isGenericLambdaCallOperatorSpecialization(
dyn_cast<CXXMethodDecl>(this))) {
assert(getPrimaryTemplate() && "A generic lambda specialization must be "
"generated from a primary call operator "
"template");
assert(getPrimaryTemplate()->getTemplatedDecl()->getBody() &&
"A generic lambda call operator template must always have a body - "
"even if instantiated from a prototype (i.e. as written) member "
"template");
return getPrimaryTemplate()->getTemplatedDecl();
assert(getPrimaryTemplate() && "not a generic lambda call operator?");
return getDefinitionOrSelf(getPrimaryTemplate()->getTemplatedDecl());
}
if (FunctionTemplateDecl *Primary = getPrimaryTemplate()) {
@ -3231,10 +3231,13 @@ FunctionDecl *FunctionDecl::getTemplateInstantiationPattern() const {
Primary = Primary->getInstantiatedFromMemberTemplate();
}
return Primary->getTemplatedDecl();
return getDefinitionOrSelf(Primary->getTemplatedDecl());
}
return getInstantiatedFromMemberFunction();
if (auto *MFD = getInstantiatedFromMemberFunction())
return getDefinitionOrSelf(MFD);
return nullptr;
}
FunctionTemplateDecl *FunctionDecl::getPrimaryTemplate() const {
@ -3778,7 +3781,7 @@ EnumDecl *EnumDecl::getTemplateInstantiationPattern() const {
EnumDecl *ED = getInstantiatedFromMemberEnum();
while (auto *NewED = ED->getInstantiatedFromMemberEnum())
ED = NewED;
return ED;
return getDefinitionOrSelf(ED);
}
}

View File

@ -1364,6 +1364,13 @@ CXXRecordDecl::setTemplateSpecializationKind(TemplateSpecializationKind TSK) {
}
const CXXRecordDecl *CXXRecordDecl::getTemplateInstantiationPattern() const {
auto GetDefinitionOrSelf =
[](const CXXRecordDecl *D) -> const CXXRecordDecl * {
if (auto *Def = D->getDefinition())
return Def;
return D;
};
// If it's a class template specialization, find the template or partial
// specialization from which it was instantiated.
if (auto *TD = dyn_cast<ClassTemplateSpecializationDecl>(this)) {
@ -1374,7 +1381,7 @@ const CXXRecordDecl *CXXRecordDecl::getTemplateInstantiationPattern() const {
break;
CTD = NewCTD;
}
return CTD->getTemplatedDecl()->getDefinition();
return GetDefinitionOrSelf(CTD->getTemplatedDecl());
}
if (auto *CTPSD =
From.dyn_cast<ClassTemplatePartialSpecializationDecl *>()) {
@ -1383,7 +1390,7 @@ const CXXRecordDecl *CXXRecordDecl::getTemplateInstantiationPattern() const {
break;
CTPSD = NewCTPSD;
}
return CTPSD->getDefinition();
return GetDefinitionOrSelf(CTPSD);
}
}
@ -1392,7 +1399,7 @@ const CXXRecordDecl *CXXRecordDecl::getTemplateInstantiationPattern() const {
const CXXRecordDecl *RD = this;
while (auto *NewRD = RD->getInstantiatedFromMemberClass())
RD = NewRD;
return RD->getDefinition();
return GetDefinitionOrSelf(RD);
}
}

View File

@ -539,9 +539,18 @@ void ObjCInterfaceDecl::getDesignatedInitializers(
bool ObjCInterfaceDecl::isDesignatedInitializer(Selector Sel,
const ObjCMethodDecl **InitMethod) const {
bool HasCompleteDef = isThisDeclarationADefinition();
// During deserialization the data record for the ObjCInterfaceDecl could
// be made invariant by reusing the canonical decl. Take this into account
// when checking for the complete definition.
if (!HasCompleteDef && getCanonicalDecl()->hasDefinition() &&
getCanonicalDecl()->getDefinition() == getDefinition())
HasCompleteDef = true;
// Check for a complete definition and recover if not so.
if (!isThisDeclarationADefinition())
if (!HasCompleteDef)
return false;
if (data().ExternallyCompleted)
LoadExternalDefinition();

View File

@ -129,16 +129,20 @@ public:
// that we add to the PassManagerBuilder.
class PassManagerBuilderWrapper : public PassManagerBuilder {
public:
PassManagerBuilderWrapper(const CodeGenOptions &CGOpts,
PassManagerBuilderWrapper(const Triple &TargetTriple,
const CodeGenOptions &CGOpts,
const LangOptions &LangOpts)
: PassManagerBuilder(), CGOpts(CGOpts), LangOpts(LangOpts) {}
: PassManagerBuilder(), TargetTriple(TargetTriple), CGOpts(CGOpts),
LangOpts(LangOpts) {}
const Triple &getTargetTriple() const { return TargetTriple; }
const CodeGenOptions &getCGOpts() const { return CGOpts; }
const LangOptions &getLangOpts() const { return LangOpts; }
private:
const Triple &TargetTriple;
const CodeGenOptions &CGOpts;
const LangOptions &LangOpts;
};
}
static void addObjCARCAPElimPass(const PassManagerBuilder &Builder, PassManagerBase &PM) {
@ -185,16 +189,35 @@ static void addSanitizerCoveragePass(const PassManagerBuilder &Builder,
PM.add(createSanitizerCoverageModulePass(Opts));
}
// Check if ASan should use GC-friendly instrumentation for globals.
// First of all, there is no point if -fdata-sections is off (expect for MachO,
// where this is not a factor). Also, on ELF this feature requires an assembler
// extension that only works with -integrated-as at the moment.
static bool asanUseGlobalsGC(const Triple &T, const CodeGenOptions &CGOpts) {
switch (T.getObjectFormat()) {
case Triple::MachO:
case Triple::COFF:
return true;
case Triple::ELF:
return CGOpts.DataSections && !CGOpts.DisableIntegratedAS;
default:
return false;
}
}
static void addAddressSanitizerPasses(const PassManagerBuilder &Builder,
legacy::PassManagerBase &PM) {
const PassManagerBuilderWrapper &BuilderWrapper =
static_cast<const PassManagerBuilderWrapper&>(Builder);
const Triple &T = BuilderWrapper.getTargetTriple();
const CodeGenOptions &CGOpts = BuilderWrapper.getCGOpts();
bool Recover = CGOpts.SanitizeRecover.has(SanitizerKind::Address);
bool UseAfterScope = CGOpts.SanitizeAddressUseAfterScope;
bool UseGlobalsGC = asanUseGlobalsGC(T, CGOpts);
PM.add(createAddressSanitizerFunctionPass(/*CompileKernel*/ false, Recover,
UseAfterScope));
PM.add(createAddressSanitizerModulePass(/*CompileKernel*/false, Recover));
PM.add(createAddressSanitizerModulePass(/*CompileKernel*/ false, Recover,
UseGlobalsGC));
}
static void addKernelAddressSanitizerPasses(const PassManagerBuilder &Builder,
@ -407,6 +430,8 @@ static void initTargetOptions(llvm::TargetOptions &Options,
Options.EmulatedTLS = CodeGenOpts.EmulatedTLS;
Options.DebuggerTuning = CodeGenOpts.getDebuggerTuning();
if (CodeGenOpts.EnableSplitDwarf)
Options.MCOptions.SplitDwarfFile = CodeGenOpts.SplitDwarfFile;
Options.MCOptions.MCRelaxAll = CodeGenOpts.RelaxAll;
Options.MCOptions.MCSaveTempLabels = CodeGenOpts.SaveTempLabels;
Options.MCOptions.MCUseDwarfDirectory = !CodeGenOpts.NoDwarfDirectoryAsm;
@ -434,8 +459,6 @@ void EmitAssemblyHelper::CreatePasses(legacy::PassManager &MPM,
if (CodeGenOpts.DisableLLVMPasses)
return;
PassManagerBuilderWrapper PMBuilder(CodeGenOpts, LangOpts);
// Figure out TargetLibraryInfo. This needs to be added to MPM and FPM
// manually (and not via PMBuilder), since some passes (eg. InstrProfiling)
// are inserted before PMBuilder ones - they'd get the default-constructed
@ -444,6 +467,8 @@ void EmitAssemblyHelper::CreatePasses(legacy::PassManager &MPM,
std::unique_ptr<TargetLibraryInfoImpl> TLII(
createTLII(TargetTriple, CodeGenOpts));
PassManagerBuilderWrapper PMBuilder(TargetTriple, CodeGenOpts, LangOpts);
// At O0 and O1 we only run the always inliner which is more efficient. At
// higher optimization levels we run the normal inliner.
if (CodeGenOpts.OptimizationLevel <= 1) {

View File

@ -528,12 +528,14 @@ void CGDebugInfo::CreateCompileUnit() {
// Create new compile unit.
// FIXME - Eliminate TheCU.
TheCU = DBuilder.createCompileUnit(
LangTag, DBuilder.createFile(remapDIPath(MainFileName),
remapDIPath(getCurrentDirname()), CSKind,
Checksum),
LangTag,
DBuilder.createFile(remapDIPath(MainFileName),
remapDIPath(getCurrentDirname()), CSKind, Checksum),
Producer, LO.Optimize, CGM.getCodeGenOpts().DwarfDebugFlags, RuntimeVers,
CGM.getCodeGenOpts().SplitDwarfFile, EmissionKind, 0 /* DWOid */,
CGM.getCodeGenOpts().SplitDwarfInlining,
CGM.getCodeGenOpts().EnableSplitDwarf
? ""
: CGM.getCodeGenOpts().SplitDwarfFile,
EmissionKind, 0 /* DWOid */, CGM.getCodeGenOpts().SplitDwarfInlining,
CGM.getCodeGenOpts().DebugInfoForProfiling);
}

View File

@ -533,15 +533,6 @@ bool CodeGenFunction::sanitizePerformTypeCheck() const {
SanOpts.has(SanitizerKind::Vptr);
}
/// Check if a runtime null check for \p Ptr can be omitted.
static bool canOmitPointerNullCheck(llvm::Value *Ptr) {
// Note: do not perform any constant-folding in this function. That is best
// left to the IR builder.
// Pointers to alloca'd memory are non-null.
return isa<llvm::AllocaInst>(Ptr->stripPointerCastsNoFollowAliases());
}
void CodeGenFunction::EmitTypeCheck(TypeCheckKind TCK, SourceLocation Loc,
llvm::Value *Ptr, QualType Ty,
CharUnits Alignment,
@ -560,11 +551,16 @@ void CodeGenFunction::EmitTypeCheck(TypeCheckKind TCK, SourceLocation Loc,
SmallVector<std::pair<llvm::Value *, SanitizerMask>, 3> Checks;
llvm::BasicBlock *Done = nullptr;
// Quickly determine whether we have a pointer to an alloca. It's possible
// to skip null checks, and some alignment checks, for these pointers. This
// can reduce compile-time significantly.
auto PtrToAlloca =
dyn_cast<llvm::AllocaInst>(Ptr->stripPointerCastsNoFollowAliases());
bool AllowNullPointers = TCK == TCK_DowncastPointer || TCK == TCK_Upcast ||
TCK == TCK_UpcastToVirtualBase;
if ((SanOpts.has(SanitizerKind::Null) || AllowNullPointers) &&
!SkippedChecks.has(SanitizerKind::Null) &&
!canOmitPointerNullCheck(Ptr)) {
!SkippedChecks.has(SanitizerKind::Null) && !PtrToAlloca) {
// The glvalue must not be an empty glvalue.
llvm::Value *IsNonNull = Builder.CreateIsNotNull(Ptr);
@ -617,7 +613,8 @@ void CodeGenFunction::EmitTypeCheck(TypeCheckKind TCK, SourceLocation Loc,
AlignVal = getContext().getTypeAlignInChars(Ty).getQuantity();
// The glvalue must be suitably aligned.
if (AlignVal > 1) {
if (AlignVal > 1 &&
(!PtrToAlloca || PtrToAlloca->getAlignment() < AlignVal)) {
llvm::Value *Align =
Builder.CreateAnd(Builder.CreatePtrToInt(Ptr, IntPtrTy),
llvm::ConstantInt::get(IntPtrTy, AlignVal - 1));

View File

@ -2466,16 +2466,14 @@ static int addMonoNonMonoModifier(OpenMPSchedType Schedule,
return Schedule | Modifier;
}
void CGOpenMPRuntime::emitForDispatchInit(CodeGenFunction &CGF,
SourceLocation Loc,
const OpenMPScheduleTy &ScheduleKind,
unsigned IVSize, bool IVSigned,
bool Ordered, llvm::Value *UB,
llvm::Value *Chunk) {
void CGOpenMPRuntime::emitForDispatchInit(
CodeGenFunction &CGF, SourceLocation Loc,
const OpenMPScheduleTy &ScheduleKind, unsigned IVSize, bool IVSigned,
bool Ordered, const DispatchRTInput &DispatchValues) {
if (!CGF.HaveInsertPoint())
return;
OpenMPSchedType Schedule =
getRuntimeSchedule(ScheduleKind.Schedule, Chunk != nullptr, Ordered);
OpenMPSchedType Schedule = getRuntimeSchedule(
ScheduleKind.Schedule, DispatchValues.Chunk != nullptr, Ordered);
assert(Ordered ||
(Schedule != OMP_sch_static && Schedule != OMP_sch_static_chunked &&
Schedule != OMP_ord_static && Schedule != OMP_ord_static_chunked &&
@ -2486,14 +2484,14 @@ void CGOpenMPRuntime::emitForDispatchInit(CodeGenFunction &CGF,
// kmp_int[32|64] stride, kmp_int[32|64] chunk);
// If the Chunk was not specified in the clause - use default value 1.
if (Chunk == nullptr)
Chunk = CGF.Builder.getIntN(IVSize, 1);
llvm::Value *Chunk = DispatchValues.Chunk ? DispatchValues.Chunk
: CGF.Builder.getIntN(IVSize, 1);
llvm::Value *Args[] = {
emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc),
CGF.Builder.getInt32(addMonoNonMonoModifier(
Schedule, ScheduleKind.M1, ScheduleKind.M2)), // Schedule type
CGF.Builder.getIntN(IVSize, 0), // Lower
UB, // Upper
DispatchValues.LB, // Lower
DispatchValues.UB, // Upper
CGF.Builder.getIntN(IVSize, 1), // Stride
Chunk // Chunk
};

View File

@ -672,16 +672,50 @@ public:
///
virtual bool isDynamic(OpenMPScheduleClauseKind ScheduleKind) const;
/// struct with the values to be passed to the dispatch runtime function
struct DispatchRTInput {
/// Loop lower bound
llvm::Value *LB = nullptr;
/// Loop upper bound
llvm::Value *UB = nullptr;
/// Chunk size specified using 'schedule' clause (nullptr if chunk
/// was not specified)
llvm::Value *Chunk = nullptr;
DispatchRTInput() = default;
DispatchRTInput(llvm::Value *LB, llvm::Value *UB, llvm::Value *Chunk)
: LB(LB), UB(UB), Chunk(Chunk) {}
};
/// Call the appropriate runtime routine to initialize it before start
/// of loop.
/// This is used for non static scheduled types and when the ordered
/// clause is present on the loop construct.
/// Depending on the loop schedule, it is necessary to call some runtime
/// routine before start of the OpenMP loop to get the loop upper / lower
/// bounds \a LB and \a UB and stride \a ST.
///
/// \param CGF Reference to current CodeGenFunction.
/// \param Loc Clang source location.
/// \param ScheduleKind Schedule kind, specified by the 'schedule' clause.
/// \param IVSize Size of the iteration variable in bits.
/// \param IVSigned Sign of the interation variable.
/// \param Ordered true if loop is ordered, false otherwise.
/// \param DispatchValues struct containing llvm values for lower bound, upper
/// bound, and chunk expression.
/// For the default (nullptr) value, the chunk 1 will be used.
///
virtual void emitForDispatchInit(CodeGenFunction &CGF, SourceLocation Loc,
const OpenMPScheduleTy &ScheduleKind,
unsigned IVSize, bool IVSigned, bool Ordered,
llvm::Value *UB,
llvm::Value *Chunk = nullptr);
const DispatchRTInput &DispatchValues);
/// \brief Call the appropriate runtime routine to initialize it before start
/// of loop.
///
/// Depending on the loop schedule, it is nesessary to call some runtime
/// This is used only in case of static schedule, when the user did not
/// specify a ordered clause on the loop construct.
/// Depending on the loop schedule, it is necessary to call some runtime
/// routine before start of the OpenMP loop to get the loop upper / lower
/// bounds \a LB and \a UB and stride \a ST.
///

View File

@ -87,7 +87,8 @@ public:
class OMPParallelScope final : public OMPLexicalScope {
bool EmitPreInitStmt(const OMPExecutableDirective &S) {
OpenMPDirectiveKind Kind = S.getDirectiveKind();
return !isOpenMPTargetExecutionDirective(Kind) &&
return !(isOpenMPTargetExecutionDirective(Kind) ||
isOpenMPLoopBoundSharingDirective(Kind)) &&
isOpenMPParallelDirective(Kind);
}
@ -1249,10 +1250,20 @@ static void emitPostUpdateForReductionClause(
CGF.EmitBlock(DoneBB, /*IsFinished=*/true);
}
static void emitCommonOMPParallelDirective(CodeGenFunction &CGF,
const OMPExecutableDirective &S,
OpenMPDirectiveKind InnermostKind,
const RegionCodeGenTy &CodeGen) {
namespace {
/// Codegen lambda for appending distribute lower and upper bounds to outlined
/// parallel function. This is necessary for combined constructs such as
/// 'distribute parallel for'
typedef llvm::function_ref<void(CodeGenFunction &,
const OMPExecutableDirective &,
llvm::SmallVectorImpl<llvm::Value *> &)>
CodeGenBoundParametersTy;
} // anonymous namespace
static void emitCommonOMPParallelDirective(
CodeGenFunction &CGF, const OMPExecutableDirective &S,
OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen,
const CodeGenBoundParametersTy &CodeGenBoundParameters) {
const CapturedStmt *CS = S.getCapturedStmt(OMPD_parallel);
auto OutlinedFn = CGF.CGM.getOpenMPRuntime().emitParallelOutlinedFunction(
S, *CS->getCapturedDecl()->param_begin(), InnermostKind, CodeGen);
@ -1279,11 +1290,20 @@ static void emitCommonOMPParallelDirective(CodeGenFunction &CGF,
OMPParallelScope Scope(CGF, S);
llvm::SmallVector<llvm::Value *, 16> CapturedVars;
// Combining 'distribute' with 'for' requires sharing each 'distribute' chunk
// lower and upper bounds with the pragma 'for' chunking mechanism.
// The following lambda takes care of appending the lower and upper bound
// parameters when necessary
CodeGenBoundParameters(CGF, S, CapturedVars);
CGF.GenerateOpenMPCapturedVars(*CS, CapturedVars);
CGF.CGM.getOpenMPRuntime().emitParallelCall(CGF, S.getLocStart(), OutlinedFn,
CapturedVars, IfCond);
}
static void emitEmptyBoundParameters(CodeGenFunction &,
const OMPExecutableDirective &,
llvm::SmallVectorImpl<llvm::Value *> &) {}
void CodeGenFunction::EmitOMPParallelDirective(const OMPParallelDirective &S) {
// Emit parallel region as a standalone region.
auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &) {
@ -1304,7 +1324,8 @@ void CodeGenFunction::EmitOMPParallelDirective(const OMPParallelDirective &S) {
CGF.EmitStmt(cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt());
CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_parallel);
};
emitCommonOMPParallelDirective(*this, S, OMPD_parallel, CodeGen);
emitCommonOMPParallelDirective(*this, S, OMPD_parallel, CodeGen,
emitEmptyBoundParameters);
emitPostUpdateForReductionClause(
*this, S, [](CodeGenFunction &) -> llvm::Value * { return nullptr; });
}
@ -1649,6 +1670,13 @@ void CodeGenFunction::EmitOMPSimdFinal(
EmitBlock(DoneBB, /*IsFinished=*/true);
}
static void emitOMPLoopBodyWithStopPoint(CodeGenFunction &CGF,
const OMPLoopDirective &S,
CodeGenFunction::JumpDest LoopExit) {
CGF.EmitOMPLoopBody(S, LoopExit);
CGF.EmitStopPoint(&S);
};
void CodeGenFunction::EmitOMPSimdDirective(const OMPSimdDirective &S) {
auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &) {
OMPLoopScope PreInitScope(CGF, S);
@ -1731,9 +1759,12 @@ void CodeGenFunction::EmitOMPSimdDirective(const OMPSimdDirective &S) {
CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_simd, CodeGen);
}
void CodeGenFunction::EmitOMPOuterLoop(bool DynamicOrOrdered, bool IsMonotonic,
const OMPLoopDirective &S, OMPPrivateScope &LoopScope, bool Ordered,
Address LB, Address UB, Address ST, Address IL, llvm::Value *Chunk) {
void CodeGenFunction::EmitOMPOuterLoop(
bool DynamicOrOrdered, bool IsMonotonic, const OMPLoopDirective &S,
CodeGenFunction::OMPPrivateScope &LoopScope,
const CodeGenFunction::OMPLoopArguments &LoopArgs,
const CodeGenFunction::CodeGenLoopTy &CodeGenLoop,
const CodeGenFunction::CodeGenOrderedTy &CodeGenOrdered) {
auto &RT = CGM.getOpenMPRuntime();
const Expr *IVExpr = S.getIterationVariable();
@ -1751,15 +1782,18 @@ void CodeGenFunction::EmitOMPOuterLoop(bool DynamicOrOrdered, bool IsMonotonic,
llvm::Value *BoolCondVal = nullptr;
if (!DynamicOrOrdered) {
// UB = min(UB, GlobalUB)
EmitIgnoredExpr(S.getEnsureUpperBound());
// UB = min(UB, GlobalUB) or
// UB = min(UB, PrevUB) for combined loop sharing constructs (e.g.
// 'distribute parallel for')
EmitIgnoredExpr(LoopArgs.EUB);
// IV = LB
EmitIgnoredExpr(S.getInit());
EmitIgnoredExpr(LoopArgs.Init);
// IV < UB
BoolCondVal = EvaluateExprAsBool(S.getCond());
BoolCondVal = EvaluateExprAsBool(LoopArgs.Cond);
} else {
BoolCondVal = RT.emitForNext(*this, S.getLocStart(), IVSize, IVSigned, IL,
LB, UB, ST);
BoolCondVal =
RT.emitForNext(*this, S.getLocStart(), IVSize, IVSigned, LoopArgs.IL,
LoopArgs.LB, LoopArgs.UB, LoopArgs.ST);
}
// If there are any cleanups between here and the loop-exit scope,
@ -1779,7 +1813,7 @@ void CodeGenFunction::EmitOMPOuterLoop(bool DynamicOrOrdered, bool IsMonotonic,
// Emit "IV = LB" (in case of static schedule, we have already calculated new
// LB for loop condition and emitted it above).
if (DynamicOrOrdered)
EmitIgnoredExpr(S.getInit());
EmitIgnoredExpr(LoopArgs.Init);
// Create a block for the increment.
auto Continue = getJumpDestInCurrentScope("omp.dispatch.inc");
@ -1793,24 +1827,27 @@ void CodeGenFunction::EmitOMPOuterLoop(bool DynamicOrOrdered, bool IsMonotonic,
EmitOMPSimdInit(S, IsMonotonic);
SourceLocation Loc = S.getLocStart();
EmitOMPInnerLoop(S, LoopScope.requiresCleanups(), S.getCond(), S.getInc(),
[&S, LoopExit](CodeGenFunction &CGF) {
CGF.EmitOMPLoopBody(S, LoopExit);
CGF.EmitStopPoint(&S);
},
[Ordered, IVSize, IVSigned, Loc](CodeGenFunction &CGF) {
if (Ordered) {
CGF.CGM.getOpenMPRuntime().emitForOrderedIterationEnd(
CGF, Loc, IVSize, IVSigned);
}
});
// when 'distribute' is not combined with a 'for':
// while (idx <= UB) { BODY; ++idx; }
// when 'distribute' is combined with a 'for'
// (e.g. 'distribute parallel for')
// while (idx <= UB) { <CodeGen rest of pragma>; idx += ST; }
EmitOMPInnerLoop(
S, LoopScope.requiresCleanups(), LoopArgs.Cond, LoopArgs.IncExpr,
[&S, LoopExit, &CodeGenLoop](CodeGenFunction &CGF) {
CodeGenLoop(CGF, S, LoopExit);
},
[IVSize, IVSigned, Loc, &CodeGenOrdered](CodeGenFunction &CGF) {
CodeGenOrdered(CGF, Loc, IVSize, IVSigned);
});
EmitBlock(Continue.getBlock());
BreakContinueStack.pop_back();
if (!DynamicOrOrdered) {
// Emit "LB = LB + Stride", "UB = UB + Stride".
EmitIgnoredExpr(S.getNextLowerBound());
EmitIgnoredExpr(S.getNextUpperBound());
EmitIgnoredExpr(LoopArgs.NextLB);
EmitIgnoredExpr(LoopArgs.NextUB);
}
EmitBranch(CondBlock);
@ -1829,7 +1866,8 @@ void CodeGenFunction::EmitOMPOuterLoop(bool DynamicOrOrdered, bool IsMonotonic,
void CodeGenFunction::EmitOMPForOuterLoop(
const OpenMPScheduleTy &ScheduleKind, bool IsMonotonic,
const OMPLoopDirective &S, OMPPrivateScope &LoopScope, bool Ordered,
Address LB, Address UB, Address ST, Address IL, llvm::Value *Chunk) {
const OMPLoopArguments &LoopArgs,
const CodeGenDispatchBoundsTy &CGDispatchBounds) {
auto &RT = CGM.getOpenMPRuntime();
// Dynamic scheduling of the outer loop (dynamic, guided, auto, runtime).
@ -1838,7 +1876,7 @@ void CodeGenFunction::EmitOMPForOuterLoop(
assert((Ordered ||
!RT.isStaticNonchunked(ScheduleKind.Schedule,
/*Chunked=*/Chunk != nullptr)) &&
LoopArgs.Chunk != nullptr)) &&
"static non-chunked schedule does not need outer loop");
// Emit outer loop.
@ -1896,22 +1934,46 @@ void CodeGenFunction::EmitOMPForOuterLoop(
const bool IVSigned = IVExpr->getType()->hasSignedIntegerRepresentation();
if (DynamicOrOrdered) {
llvm::Value *UBVal = EmitScalarExpr(S.getLastIteration());
auto DispatchBounds = CGDispatchBounds(*this, S, LoopArgs.LB, LoopArgs.UB);
llvm::Value *LBVal = DispatchBounds.first;
llvm::Value *UBVal = DispatchBounds.second;
CGOpenMPRuntime::DispatchRTInput DipatchRTInputValues = {LBVal, UBVal,
LoopArgs.Chunk};
RT.emitForDispatchInit(*this, S.getLocStart(), ScheduleKind, IVSize,
IVSigned, Ordered, UBVal, Chunk);
IVSigned, Ordered, DipatchRTInputValues);
} else {
RT.emitForStaticInit(*this, S.getLocStart(), ScheduleKind, IVSize, IVSigned,
Ordered, IL, LB, UB, ST, Chunk);
Ordered, LoopArgs.IL, LoopArgs.LB, LoopArgs.UB,
LoopArgs.ST, LoopArgs.Chunk);
}
EmitOMPOuterLoop(DynamicOrOrdered, IsMonotonic, S, LoopScope, Ordered, LB, UB,
ST, IL, Chunk);
auto &&CodeGenOrdered = [Ordered](CodeGenFunction &CGF, SourceLocation Loc,
const unsigned IVSize,
const bool IVSigned) {
if (Ordered) {
CGF.CGM.getOpenMPRuntime().emitForOrderedIterationEnd(CGF, Loc, IVSize,
IVSigned);
}
};
OMPLoopArguments OuterLoopArgs(LoopArgs.LB, LoopArgs.UB, LoopArgs.ST,
LoopArgs.IL, LoopArgs.Chunk, LoopArgs.EUB);
OuterLoopArgs.IncExpr = S.getInc();
OuterLoopArgs.Init = S.getInit();
OuterLoopArgs.Cond = S.getCond();
OuterLoopArgs.NextLB = S.getNextLowerBound();
OuterLoopArgs.NextUB = S.getNextUpperBound();
EmitOMPOuterLoop(DynamicOrOrdered, IsMonotonic, S, LoopScope, OuterLoopArgs,
emitOMPLoopBodyWithStopPoint, CodeGenOrdered);
}
static void emitEmptyOrdered(CodeGenFunction &, SourceLocation Loc,
const unsigned IVSize, const bool IVSigned) {}
void CodeGenFunction::EmitOMPDistributeOuterLoop(
OpenMPDistScheduleClauseKind ScheduleKind,
const OMPDistributeDirective &S, OMPPrivateScope &LoopScope,
Address LB, Address UB, Address ST, Address IL, llvm::Value *Chunk) {
OpenMPDistScheduleClauseKind ScheduleKind, const OMPLoopDirective &S,
OMPPrivateScope &LoopScope, const OMPLoopArguments &LoopArgs,
const CodeGenLoopTy &CodeGenLoopContent) {
auto &RT = CGM.getOpenMPRuntime();
@ -1924,26 +1986,159 @@ void CodeGenFunction::EmitOMPDistributeOuterLoop(
const unsigned IVSize = getContext().getTypeSize(IVExpr->getType());
const bool IVSigned = IVExpr->getType()->hasSignedIntegerRepresentation();
RT.emitDistributeStaticInit(*this, S.getLocStart(), ScheduleKind,
IVSize, IVSigned, /* Ordered = */ false,
IL, LB, UB, ST, Chunk);
RT.emitDistributeStaticInit(*this, S.getLocStart(), ScheduleKind, IVSize,
IVSigned, /* Ordered = */ false, LoopArgs.IL,
LoopArgs.LB, LoopArgs.UB, LoopArgs.ST,
LoopArgs.Chunk);
EmitOMPOuterLoop(/* DynamicOrOrdered = */ false, /* IsMonotonic = */ false,
S, LoopScope, /* Ordered = */ false, LB, UB, ST, IL, Chunk);
// for combined 'distribute' and 'for' the increment expression of distribute
// is store in DistInc. For 'distribute' alone, it is in Inc.
Expr *IncExpr;
if (isOpenMPLoopBoundSharingDirective(S.getDirectiveKind()))
IncExpr = S.getDistInc();
else
IncExpr = S.getInc();
// this routine is shared by 'omp distribute parallel for' and
// 'omp distribute': select the right EUB expression depending on the
// directive
OMPLoopArguments OuterLoopArgs;
OuterLoopArgs.LB = LoopArgs.LB;
OuterLoopArgs.UB = LoopArgs.UB;
OuterLoopArgs.ST = LoopArgs.ST;
OuterLoopArgs.IL = LoopArgs.IL;
OuterLoopArgs.Chunk = LoopArgs.Chunk;
OuterLoopArgs.EUB = isOpenMPLoopBoundSharingDirective(S.getDirectiveKind())
? S.getCombinedEnsureUpperBound()
: S.getEnsureUpperBound();
OuterLoopArgs.IncExpr = IncExpr;
OuterLoopArgs.Init = isOpenMPLoopBoundSharingDirective(S.getDirectiveKind())
? S.getCombinedInit()
: S.getInit();
OuterLoopArgs.Cond = isOpenMPLoopBoundSharingDirective(S.getDirectiveKind())
? S.getCombinedCond()
: S.getCond();
OuterLoopArgs.NextLB = isOpenMPLoopBoundSharingDirective(S.getDirectiveKind())
? S.getCombinedNextLowerBound()
: S.getNextLowerBound();
OuterLoopArgs.NextUB = isOpenMPLoopBoundSharingDirective(S.getDirectiveKind())
? S.getCombinedNextUpperBound()
: S.getNextUpperBound();
EmitOMPOuterLoop(/* DynamicOrOrdered = */ false, /* IsMonotonic = */ false, S,
LoopScope, OuterLoopArgs, CodeGenLoopContent,
emitEmptyOrdered);
}
/// Emit a helper variable and return corresponding lvalue.
static LValue EmitOMPHelperVar(CodeGenFunction &CGF,
const DeclRefExpr *Helper) {
auto VDecl = cast<VarDecl>(Helper->getDecl());
CGF.EmitVarDecl(*VDecl);
return CGF.EmitLValue(Helper);
}
static std::pair<LValue, LValue>
emitDistributeParallelForInnerBounds(CodeGenFunction &CGF,
const OMPExecutableDirective &S) {
const OMPLoopDirective &LS = cast<OMPLoopDirective>(S);
LValue LB =
EmitOMPHelperVar(CGF, cast<DeclRefExpr>(LS.getLowerBoundVariable()));
LValue UB =
EmitOMPHelperVar(CGF, cast<DeclRefExpr>(LS.getUpperBoundVariable()));
// When composing 'distribute' with 'for' (e.g. as in 'distribute
// parallel for') we need to use the 'distribute'
// chunk lower and upper bounds rather than the whole loop iteration
// space. These are parameters to the outlined function for 'parallel'
// and we copy the bounds of the previous schedule into the
// the current ones.
LValue PrevLB = CGF.EmitLValue(LS.getPrevLowerBoundVariable());
LValue PrevUB = CGF.EmitLValue(LS.getPrevUpperBoundVariable());
llvm::Value *PrevLBVal = CGF.EmitLoadOfScalar(PrevLB, SourceLocation());
PrevLBVal = CGF.EmitScalarConversion(
PrevLBVal, LS.getPrevLowerBoundVariable()->getType(),
LS.getIterationVariable()->getType(), SourceLocation());
llvm::Value *PrevUBVal = CGF.EmitLoadOfScalar(PrevUB, SourceLocation());
PrevUBVal = CGF.EmitScalarConversion(
PrevUBVal, LS.getPrevUpperBoundVariable()->getType(),
LS.getIterationVariable()->getType(), SourceLocation());
CGF.EmitStoreOfScalar(PrevLBVal, LB);
CGF.EmitStoreOfScalar(PrevUBVal, UB);
return {LB, UB};
}
/// if the 'for' loop has a dispatch schedule (e.g. dynamic, guided) then
/// we need to use the LB and UB expressions generated by the worksharing
/// code generation support, whereas in non combined situations we would
/// just emit 0 and the LastIteration expression
/// This function is necessary due to the difference of the LB and UB
/// types for the RT emission routines for 'for_static_init' and
/// 'for_dispatch_init'
static std::pair<llvm::Value *, llvm::Value *>
emitDistributeParallelForDispatchBounds(CodeGenFunction &CGF,
const OMPExecutableDirective &S,
Address LB, Address UB) {
const OMPLoopDirective &LS = cast<OMPLoopDirective>(S);
const Expr *IVExpr = LS.getIterationVariable();
// when implementing a dynamic schedule for a 'for' combined with a
// 'distribute' (e.g. 'distribute parallel for'), the 'for' loop
// is not normalized as each team only executes its own assigned
// distribute chunk
QualType IteratorTy = IVExpr->getType();
llvm::Value *LBVal = CGF.EmitLoadOfScalar(LB, /*Volatile=*/false, IteratorTy,
SourceLocation());
llvm::Value *UBVal = CGF.EmitLoadOfScalar(UB, /*Volatile=*/false, IteratorTy,
SourceLocation());
return {LBVal, UBVal};
};
static void emitDistributeParallelForDistributeInnerBoundParams(
CodeGenFunction &CGF, const OMPExecutableDirective &S,
llvm::SmallVectorImpl<llvm::Value *> &CapturedVars) {
const auto &Dir = cast<OMPLoopDirective>(S);
LValue LB =
CGF.EmitLValue(cast<DeclRefExpr>(Dir.getCombinedLowerBoundVariable()));
auto LBCast = CGF.Builder.CreateIntCast(
CGF.Builder.CreateLoad(LB.getAddress()), CGF.SizeTy, /*isSigned=*/false);
CapturedVars.push_back(LBCast);
LValue UB =
CGF.EmitLValue(cast<DeclRefExpr>(Dir.getCombinedUpperBoundVariable()));
auto UBCast = CGF.Builder.CreateIntCast(
CGF.Builder.CreateLoad(UB.getAddress()), CGF.SizeTy, /*isSigned=*/false);
CapturedVars.push_back(UBCast);
};
static void
emitInnerParallelForWhenCombined(CodeGenFunction &CGF,
const OMPLoopDirective &S,
CodeGenFunction::JumpDest LoopExit) {
auto &&CGInlinedWorksharingLoop = [&S](CodeGenFunction &CGF,
PrePostActionTy &) {
CGF.EmitOMPWorksharingLoop(S, S.getPrevEnsureUpperBound(),
emitDistributeParallelForInnerBounds,
emitDistributeParallelForDispatchBounds);
};
emitCommonOMPParallelDirective(
CGF, S, OMPD_for, CGInlinedWorksharingLoop,
emitDistributeParallelForDistributeInnerBoundParams);
}
void CodeGenFunction::EmitOMPDistributeParallelForDirective(
const OMPDistributeParallelForDirective &S) {
auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &) {
CGF.EmitOMPDistributeLoop(S, emitInnerParallelForWhenCombined,
S.getDistInc());
};
OMPLexicalScope Scope(*this, S, /*AsInlined=*/true);
CGM.getOpenMPRuntime().emitInlinedDirective(
*this, OMPD_distribute_parallel_for,
[&S](CodeGenFunction &CGF, PrePostActionTy &) {
OMPLoopScope PreInitScope(CGF, S);
OMPCancelStackRAII CancelRegion(CGF, OMPD_distribute_parallel_for,
/*HasCancel=*/false);
CGF.EmitStmt(
cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt());
});
OMPCancelStackRAII CancelRegion(*this, OMPD_distribute_parallel_for,
/*HasCancel=*/false);
CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_distribute, CodeGen,
/*HasCancel=*/false);
}
void CodeGenFunction::EmitOMPDistributeParallelForSimdDirective(
@ -2081,14 +2276,6 @@ void CodeGenFunction::EmitOMPTargetTeamsDistributeSimdDirective(
});
}
/// \brief Emit a helper variable and return corresponding lvalue.
static LValue EmitOMPHelperVar(CodeGenFunction &CGF,
const DeclRefExpr *Helper) {
auto VDecl = cast<VarDecl>(Helper->getDecl());
CGF.EmitVarDecl(*VDecl);
return CGF.EmitLValue(Helper);
}
namespace {
struct ScheduleKindModifiersTy {
OpenMPScheduleClauseKind Kind;
@ -2101,7 +2288,10 @@ namespace {
};
} // namespace
bool CodeGenFunction::EmitOMPWorksharingLoop(const OMPLoopDirective &S) {
bool CodeGenFunction::EmitOMPWorksharingLoop(
const OMPLoopDirective &S, Expr *EUB,
const CodeGenLoopBoundsTy &CodeGenLoopBounds,
const CodeGenDispatchBoundsTy &CGDispatchBounds) {
// Emit the loop iteration variable.
auto IVExpr = cast<DeclRefExpr>(S.getIterationVariable());
auto IVDecl = cast<VarDecl>(IVExpr->getDecl());
@ -2151,10 +2341,10 @@ bool CodeGenFunction::EmitOMPWorksharingLoop(const OMPLoopDirective &S) {
emitAlignedClause(*this, S);
EmitOMPLinearClauseInit(S);
// Emit helper vars inits.
LValue LB =
EmitOMPHelperVar(*this, cast<DeclRefExpr>(S.getLowerBoundVariable()));
LValue UB =
EmitOMPHelperVar(*this, cast<DeclRefExpr>(S.getUpperBoundVariable()));
std::pair<LValue, LValue> Bounds = CodeGenLoopBounds(*this, S);
LValue LB = Bounds.first;
LValue UB = Bounds.second;
LValue ST =
EmitOMPHelperVar(*this, cast<DeclRefExpr>(S.getStrideVariable()));
LValue IL =
@ -2240,9 +2430,11 @@ bool CodeGenFunction::EmitOMPWorksharingLoop(const OMPLoopDirective &S) {
ScheduleKind.M2 == OMPC_SCHEDULE_MODIFIER_monotonic;
// Emit the outer loop, which requests its work chunk [LB..UB] from
// runtime and runs the inner loop to process it.
const OMPLoopArguments LoopArguments(LB.getAddress(), UB.getAddress(),
ST.getAddress(), IL.getAddress(),
Chunk, EUB);
EmitOMPForOuterLoop(ScheduleKind, IsMonotonic, S, LoopScope, Ordered,
LB.getAddress(), UB.getAddress(), ST.getAddress(),
IL.getAddress(), Chunk);
LoopArguments, CGDispatchBounds);
}
if (isOpenMPSimdDirective(S.getDirectiveKind())) {
EmitOMPSimdFinal(S,
@ -2280,12 +2472,42 @@ bool CodeGenFunction::EmitOMPWorksharingLoop(const OMPLoopDirective &S) {
return HasLastprivateClause;
}
/// The following two functions generate expressions for the loop lower
/// and upper bounds in case of static and dynamic (dispatch) schedule
/// of the associated 'for' or 'distribute' loop.
static std::pair<LValue, LValue>
emitForLoopBounds(CodeGenFunction &CGF, const OMPExecutableDirective &S) {
const OMPLoopDirective &LS = cast<OMPLoopDirective>(S);
LValue LB =
EmitOMPHelperVar(CGF, cast<DeclRefExpr>(LS.getLowerBoundVariable()));
LValue UB =
EmitOMPHelperVar(CGF, cast<DeclRefExpr>(LS.getUpperBoundVariable()));
return {LB, UB};
}
/// When dealing with dispatch schedules (e.g. dynamic, guided) we do not
/// consider the lower and upper bound expressions generated by the
/// worksharing loop support, but we use 0 and the iteration space size as
/// constants
static std::pair<llvm::Value *, llvm::Value *>
emitDispatchForLoopBounds(CodeGenFunction &CGF, const OMPExecutableDirective &S,
Address LB, Address UB) {
const OMPLoopDirective &LS = cast<OMPLoopDirective>(S);
const Expr *IVExpr = LS.getIterationVariable();
const unsigned IVSize = CGF.getContext().getTypeSize(IVExpr->getType());
llvm::Value *LBVal = CGF.Builder.getIntN(IVSize, 0);
llvm::Value *UBVal = CGF.EmitScalarExpr(LS.getLastIteration());
return {LBVal, UBVal};
}
void CodeGenFunction::EmitOMPForDirective(const OMPForDirective &S) {
bool HasLastprivates = false;
auto &&CodeGen = [&S, &HasLastprivates](CodeGenFunction &CGF,
PrePostActionTy &) {
OMPCancelStackRAII CancelRegion(CGF, OMPD_for, S.hasCancel());
HasLastprivates = CGF.EmitOMPWorksharingLoop(S);
HasLastprivates = CGF.EmitOMPWorksharingLoop(S, S.getEnsureUpperBound(),
emitForLoopBounds,
emitDispatchForLoopBounds);
};
{
OMPLexicalScope Scope(*this, S, /*AsInlined=*/true);
@ -2303,7 +2525,9 @@ void CodeGenFunction::EmitOMPForSimdDirective(const OMPForSimdDirective &S) {
bool HasLastprivates = false;
auto &&CodeGen = [&S, &HasLastprivates](CodeGenFunction &CGF,
PrePostActionTy &) {
HasLastprivates = CGF.EmitOMPWorksharingLoop(S);
HasLastprivates = CGF.EmitOMPWorksharingLoop(S, S.getEnsureUpperBound(),
emitForLoopBounds,
emitDispatchForLoopBounds);
};
{
OMPLexicalScope Scope(*this, S, /*AsInlined=*/true);
@ -2554,9 +2778,11 @@ void CodeGenFunction::EmitOMPParallelForDirective(
// directives: 'parallel' with 'for' directive.
auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &) {
OMPCancelStackRAII CancelRegion(CGF, OMPD_parallel_for, S.hasCancel());
CGF.EmitOMPWorksharingLoop(S);
CGF.EmitOMPWorksharingLoop(S, S.getEnsureUpperBound(), emitForLoopBounds,
emitDispatchForLoopBounds);
};
emitCommonOMPParallelDirective(*this, S, OMPD_for, CodeGen);
emitCommonOMPParallelDirective(*this, S, OMPD_for, CodeGen,
emitEmptyBoundParameters);
}
void CodeGenFunction::EmitOMPParallelForSimdDirective(
@ -2564,9 +2790,11 @@ void CodeGenFunction::EmitOMPParallelForSimdDirective(
// Emit directive as a combined directive that consists of two implicit
// directives: 'parallel' with 'for' directive.
auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &) {
CGF.EmitOMPWorksharingLoop(S);
CGF.EmitOMPWorksharingLoop(S, S.getEnsureUpperBound(), emitForLoopBounds,
emitDispatchForLoopBounds);
};
emitCommonOMPParallelDirective(*this, S, OMPD_simd, CodeGen);
emitCommonOMPParallelDirective(*this, S, OMPD_simd, CodeGen,
emitEmptyBoundParameters);
}
void CodeGenFunction::EmitOMPParallelSectionsDirective(
@ -2576,7 +2804,8 @@ void CodeGenFunction::EmitOMPParallelSectionsDirective(
auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &) {
CGF.EmitSections(S);
};
emitCommonOMPParallelDirective(*this, S, OMPD_sections, CodeGen);
emitCommonOMPParallelDirective(*this, S, OMPD_sections, CodeGen,
emitEmptyBoundParameters);
}
void CodeGenFunction::EmitOMPTaskBasedDirective(const OMPExecutableDirective &S,
@ -2794,7 +3023,9 @@ void CodeGenFunction::EmitOMPFlushDirective(const OMPFlushDirective &S) {
}(), S.getLocStart());
}
void CodeGenFunction::EmitOMPDistributeLoop(const OMPDistributeDirective &S) {
void CodeGenFunction::EmitOMPDistributeLoop(const OMPLoopDirective &S,
const CodeGenLoopTy &CodeGenLoop,
Expr *IncExpr) {
// Emit the loop iteration variable.
auto IVExpr = cast<DeclRefExpr>(S.getIterationVariable());
auto IVDecl = cast<VarDecl>(IVExpr->getDecl());
@ -2835,10 +3066,17 @@ void CodeGenFunction::EmitOMPDistributeLoop(const OMPDistributeDirective &S) {
// Emit 'then' code.
{
// Emit helper vars inits.
LValue LB =
EmitOMPHelperVar(*this, cast<DeclRefExpr>(S.getLowerBoundVariable()));
LValue UB =
EmitOMPHelperVar(*this, cast<DeclRefExpr>(S.getUpperBoundVariable()));
LValue LB = EmitOMPHelperVar(
*this, cast<DeclRefExpr>(
(isOpenMPLoopBoundSharingDirective(S.getDirectiveKind())
? S.getCombinedLowerBoundVariable()
: S.getLowerBoundVariable())));
LValue UB = EmitOMPHelperVar(
*this, cast<DeclRefExpr>(
(isOpenMPLoopBoundSharingDirective(S.getDirectiveKind())
? S.getCombinedUpperBoundVariable()
: S.getUpperBoundVariable())));
LValue ST =
EmitOMPHelperVar(*this, cast<DeclRefExpr>(S.getStrideVariable()));
LValue IL =
@ -2890,15 +3128,25 @@ void CodeGenFunction::EmitOMPDistributeLoop(const OMPDistributeDirective &S) {
auto LoopExit =
getJumpDestInCurrentScope(createBasicBlock("omp.loop.exit"));
// UB = min(UB, GlobalUB);
EmitIgnoredExpr(S.getEnsureUpperBound());
EmitIgnoredExpr(isOpenMPLoopBoundSharingDirective(S.getDirectiveKind())
? S.getCombinedEnsureUpperBound()
: S.getEnsureUpperBound());
// IV = LB;
EmitIgnoredExpr(S.getInit());
EmitIgnoredExpr(isOpenMPLoopBoundSharingDirective(S.getDirectiveKind())
? S.getCombinedInit()
: S.getInit());
Expr *Cond = isOpenMPLoopBoundSharingDirective(S.getDirectiveKind())
? S.getCombinedCond()
: S.getCond();
// for distribute alone, codegen
// while (idx <= UB) { BODY; ++idx; }
EmitOMPInnerLoop(S, LoopScope.requiresCleanups(), S.getCond(),
S.getInc(),
[&S, LoopExit](CodeGenFunction &CGF) {
CGF.EmitOMPLoopBody(S, LoopExit);
CGF.EmitStopPoint(&S);
// when combined with 'for' (e.g. as in 'distribute parallel for')
// while (idx <= UB) { <CodeGen rest of pragma>; idx += ST; }
EmitOMPInnerLoop(S, LoopScope.requiresCleanups(), Cond, IncExpr,
[&S, LoopExit, &CodeGenLoop](CodeGenFunction &CGF) {
CodeGenLoop(CGF, S, LoopExit);
},
[](CodeGenFunction &) {});
EmitBlock(LoopExit.getBlock());
@ -2907,9 +3155,11 @@ void CodeGenFunction::EmitOMPDistributeLoop(const OMPDistributeDirective &S) {
} else {
// Emit the outer loop, which requests its work chunk [LB..UB] from
// runtime and runs the inner loop to process it.
EmitOMPDistributeOuterLoop(ScheduleKind, S, LoopScope,
LB.getAddress(), UB.getAddress(), ST.getAddress(),
IL.getAddress(), Chunk);
const OMPLoopArguments LoopArguments = {
LB.getAddress(), UB.getAddress(), ST.getAddress(), IL.getAddress(),
Chunk};
EmitOMPDistributeOuterLoop(ScheduleKind, S, LoopScope, LoopArguments,
CodeGenLoop);
}
// Emit final copy of the lastprivate variables if IsLastIter != 0.
@ -2931,7 +3181,8 @@ void CodeGenFunction::EmitOMPDistributeLoop(const OMPDistributeDirective &S) {
void CodeGenFunction::EmitOMPDistributeDirective(
const OMPDistributeDirective &S) {
auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &) {
CGF.EmitOMPDistributeLoop(S);
CGF.EmitOMPDistributeLoop(S, emitOMPLoopBodyWithStopPoint, S.getInc());
};
OMPLexicalScope Scope(*this, S, /*AsInlined=*/true);
CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_distribute, CodeGen,
@ -3840,7 +4091,8 @@ static void emitTargetParallelRegion(CodeGenFunction &CGF,
CGF.EmitStmt(CS->getCapturedStmt());
CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_parallel);
};
emitCommonOMPParallelDirective(CGF, S, OMPD_parallel, CodeGen);
emitCommonOMPParallelDirective(CGF, S, OMPD_parallel, CodeGen,
emitEmptyBoundParameters);
emitPostUpdateForReductionClause(
CGF, S, [](CodeGenFunction &) -> llvm::Value * { return nullptr; });
}

View File

@ -175,6 +175,25 @@ public:
// because of jumps.
VarBypassDetector Bypasses;
// CodeGen lambda for loops and support for ordered clause
typedef llvm::function_ref<void(CodeGenFunction &, const OMPLoopDirective &,
JumpDest)>
CodeGenLoopTy;
typedef llvm::function_ref<void(CodeGenFunction &, SourceLocation,
const unsigned, const bool)>
CodeGenOrderedTy;
// Codegen lambda for loop bounds in worksharing loop constructs
typedef llvm::function_ref<std::pair<LValue, LValue>(
CodeGenFunction &, const OMPExecutableDirective &S)>
CodeGenLoopBoundsTy;
// Codegen lambda for loop bounds in dispatch-based loop implementation
typedef llvm::function_ref<std::pair<llvm::Value *, llvm::Value *>(
CodeGenFunction &, const OMPExecutableDirective &S, Address LB,
Address UB)>
CodeGenDispatchBoundsTy;
/// \brief CGBuilder insert helper. This function is called after an
/// instruction is created using Builder.
void InsertHelper(llvm::Instruction *I, const llvm::Twine &Name,
@ -2756,7 +2775,6 @@ public:
void EmitOMPTaskLoopDirective(const OMPTaskLoopDirective &S);
void EmitOMPTaskLoopSimdDirective(const OMPTaskLoopSimdDirective &S);
void EmitOMPDistributeDirective(const OMPDistributeDirective &S);
void EmitOMPDistributeLoop(const OMPDistributeDirective &S);
void EmitOMPDistributeParallelForDirective(
const OMPDistributeParallelForDirective &S);
void EmitOMPDistributeParallelForSimdDirective(
@ -2813,32 +2831,78 @@ public:
void EmitOMPPrivateLoopCounters(const OMPLoopDirective &S,
OMPPrivateScope &LoopScope);
/// Helper for the OpenMP loop directives.
void EmitOMPLoopBody(const OMPLoopDirective &D, JumpDest LoopExit);
/// \brief Emit code for the worksharing loop-based directive.
/// \return true, if this construct has any lastprivate clause, false -
/// otherwise.
bool EmitOMPWorksharingLoop(const OMPLoopDirective &S, Expr *EUB,
const CodeGenLoopBoundsTy &CodeGenLoopBounds,
const CodeGenDispatchBoundsTy &CGDispatchBounds);
private:
/// Helpers for blocks
llvm::Value *EmitBlockLiteral(const CGBlockInfo &Info);
/// Helpers for the OpenMP loop directives.
void EmitOMPLoopBody(const OMPLoopDirective &D, JumpDest LoopExit);
void EmitOMPSimdInit(const OMPLoopDirective &D, bool IsMonotonic = false);
void EmitOMPSimdFinal(
const OMPLoopDirective &D,
const llvm::function_ref<llvm::Value *(CodeGenFunction &)> &CondGen);
/// \brief Emit code for the worksharing loop-based directive.
/// \return true, if this construct has any lastprivate clause, false -
/// otherwise.
bool EmitOMPWorksharingLoop(const OMPLoopDirective &S);
void EmitOMPOuterLoop(bool IsMonotonic, bool DynamicOrOrdered,
const OMPLoopDirective &S, OMPPrivateScope &LoopScope, bool Ordered,
Address LB, Address UB, Address ST, Address IL, llvm::Value *Chunk);
void EmitOMPDistributeLoop(const OMPLoopDirective &S,
const CodeGenLoopTy &CodeGenLoop, Expr *IncExpr);
/// struct with the values to be passed to the OpenMP loop-related functions
struct OMPLoopArguments {
/// loop lower bound
Address LB = Address::invalid();
/// loop upper bound
Address UB = Address::invalid();
/// loop stride
Address ST = Address::invalid();
/// isLastIteration argument for runtime functions
Address IL = Address::invalid();
/// Chunk value generated by sema
llvm::Value *Chunk = nullptr;
/// EnsureUpperBound
Expr *EUB = nullptr;
/// IncrementExpression
Expr *IncExpr = nullptr;
/// Loop initialization
Expr *Init = nullptr;
/// Loop exit condition
Expr *Cond = nullptr;
/// Update of LB after a whole chunk has been executed
Expr *NextLB = nullptr;
/// Update of UB after a whole chunk has been executed
Expr *NextUB = nullptr;
OMPLoopArguments() = default;
OMPLoopArguments(Address LB, Address UB, Address ST, Address IL,
llvm::Value *Chunk = nullptr, Expr *EUB = nullptr,
Expr *IncExpr = nullptr, Expr *Init = nullptr,
Expr *Cond = nullptr, Expr *NextLB = nullptr,
Expr *NextUB = nullptr)
: LB(LB), UB(UB), ST(ST), IL(IL), Chunk(Chunk), EUB(EUB),
IncExpr(IncExpr), Init(Init), Cond(Cond), NextLB(NextLB),
NextUB(NextUB) {}
};
void EmitOMPOuterLoop(bool DynamicOrOrdered, bool IsMonotonic,
const OMPLoopDirective &S, OMPPrivateScope &LoopScope,
const OMPLoopArguments &LoopArgs,
const CodeGenLoopTy &CodeGenLoop,
const CodeGenOrderedTy &CodeGenOrdered);
void EmitOMPForOuterLoop(const OpenMPScheduleTy &ScheduleKind,
bool IsMonotonic, const OMPLoopDirective &S,
OMPPrivateScope &LoopScope, bool Ordered, Address LB,
Address UB, Address ST, Address IL,
llvm::Value *Chunk);
void EmitOMPDistributeOuterLoop(
OpenMPDistScheduleClauseKind ScheduleKind,
const OMPDistributeDirective &S, OMPPrivateScope &LoopScope,
Address LB, Address UB, Address ST, Address IL, llvm::Value *Chunk);
OMPPrivateScope &LoopScope, bool Ordered,
const OMPLoopArguments &LoopArgs,
const CodeGenDispatchBoundsTy &CGDispatchBounds);
void EmitOMPDistributeOuterLoop(OpenMPDistScheduleClauseKind ScheduleKind,
const OMPLoopDirective &S,
OMPPrivateScope &LoopScope,
const OMPLoopArguments &LoopArgs,
const CodeGenLoopTy &CodeGenLoopContent);
/// \brief Emit code for sections directive.
void EmitSections(const OMPExecutableDirective &S);

View File

@ -565,12 +565,8 @@ void CodeGenModule::DecorateInstructionWithTBAA(llvm::Instruction *Inst,
void CodeGenModule::DecorateInstructionWithInvariantGroup(
llvm::Instruction *I, const CXXRecordDecl *RD) {
llvm::Metadata *MD = CreateMetadataIdentifierForType(QualType(RD->getTypeForDecl(), 0));
auto *MetaDataNode = dyn_cast<llvm::MDNode>(MD);
// Check if we have to wrap MDString in MDNode.
if (!MetaDataNode)
MetaDataNode = llvm::MDNode::get(getLLVMContext(), MD);
I->setMetadata(llvm::LLVMContext::MD_invariant_group, MetaDataNode);
I->setMetadata(llvm::LLVMContext::MD_invariant_group,
llvm::MDNode::get(getLLVMContext(), {}));
}
void CodeGenModule::Error(SourceLocation loc, StringRef message) {

View File

@ -666,7 +666,7 @@ void CodeGenPGO::mapRegionCounters(const Decl *D) {
}
bool CodeGenPGO::skipRegionMappingForDecl(const Decl *D) {
if (SkipCoverageMapping)
if (!D->getBody())
return true;
// Don't map the functions in system headers.

View File

@ -40,14 +40,11 @@ private:
std::unique_ptr<llvm::InstrProfRecord> ProfRecord;
std::vector<uint64_t> RegionCounts;
uint64_t CurrentRegionCount;
/// \brief A flag that is set to true when this function doesn't need
/// to have coverage mapping data.
bool SkipCoverageMapping;
public:
CodeGenPGO(CodeGenModule &CGM)
: CGM(CGM), NumValueSites({{0}}), NumRegionCounters(0),
FunctionHash(0), CurrentRegionCount(0), SkipCoverageMapping(false) {}
: CGM(CGM), NumValueSites({{0}}), NumRegionCounters(0), FunctionHash(0),
CurrentRegionCount(0) {}
/// Whether or not we have PGO region data for the current function. This is
/// false both when we have no data at all and when our data has been

View File

@ -265,6 +265,10 @@ SanitizerArgs::SanitizerArgs(const ToolChain &TC,
Add &= ~InvalidTrappingKinds;
Add &= Supported;
// Enable coverage if the fuzzing flag is set.
if (Add & Fuzzer)
CoverageFeatures |= CoverageTracePCGuard | CoverageIndirCall | CoverageTraceCmp;
Kinds |= Add;
} else if (Arg->getOption().matches(options::OPT_fno_sanitize_EQ)) {
Arg->claim();

View File

@ -2778,8 +2778,7 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back("-fno-split-dwarf-inlining");
if (DebugInfoKind == codegenoptions::NoDebugInfo)
DebugInfoKind = codegenoptions::LimitedDebugInfo;
CmdArgs.push_back("-backend-option");
CmdArgs.push_back("-split-dwarf=Enable");
CmdArgs.push_back("-enable-split-dwarf");
}
// After we've dealt with all combinations of things that could

View File

@ -577,6 +577,17 @@ collectSanitizerRuntimes(const ToolChain &TC, const ArgList &Args,
StaticRuntimes.push_back("esan");
}
static void addLibFuzzerRuntime(const ToolChain &TC,
const ArgList &Args,
ArgStringList &CmdArgs) {
StringRef ParentDir = llvm::sys::path::parent_path(TC.getDriver().InstalledDir);
SmallString<128> P(ParentDir);
llvm::sys::path::append(P, "lib", "libLLVMFuzzer.a");
CmdArgs.push_back(Args.MakeArgString(P));
TC.AddCXXStdlibLibArgs(Args, CmdArgs);
}
// Should be called before we add system libraries (C++ ABI, libstdc++/libc++,
// C runtime, etc). Returns true if sanitizer system deps need to be linked in.
bool tools::addSanitizerRuntimes(const ToolChain &TC, const ArgList &Args,
@ -586,6 +597,11 @@ bool tools::addSanitizerRuntimes(const ToolChain &TC, const ArgList &Args,
collectSanitizerRuntimes(TC, Args, SharedRuntimes, StaticRuntimes,
NonWholeStaticRuntimes, HelperStaticRuntimes,
RequiredSymbols);
// Inject libfuzzer dependencies.
if (TC.getSanitizerArgs().needsFuzzer()) {
addLibFuzzerRuntime(TC, Args, CmdArgs);
}
for (auto RT : SharedRuntimes)
addSanitizerRuntime(TC, Args, CmdArgs, RT, true, false);
for (auto RT : HelperStaticRuntimes)

View File

@ -930,6 +930,18 @@ void MachO::AddLinkRuntimeLib(const ArgList &Args, ArgStringList &CmdArgs,
}
}
void MachO::AddFuzzerLinkArgs(const ArgList &Args, ArgStringList &CmdArgs) const {
// Go up one directory from Clang to find the libfuzzer archive file.
StringRef ParentDir = llvm::sys::path::parent_path(getDriver().InstalledDir);
SmallString<128> P(ParentDir);
llvm::sys::path::append(P, "lib", "libLLVMFuzzer.a");
CmdArgs.push_back(Args.MakeArgString(P));
// Libfuzzer is written in C++ and requires libcxx.
AddCXXStdlibLibArgs(Args, CmdArgs);
}
StringRef Darwin::getPlatformFamily() const {
switch (TargetPlatform) {
case DarwinPlatformKind::MacOS:
@ -1035,10 +1047,14 @@ void DarwinClang::AddLinkRuntimeLibArgs(const ArgList &Args,
const SanitizerArgs &Sanitize = getSanitizerArgs();
if (Sanitize.needsAsanRt())
AddLinkSanitizerLibArgs(Args, CmdArgs, "asan");
if (Sanitize.needsLsanRt())
AddLinkSanitizerLibArgs(Args, CmdArgs, "lsan");
if (Sanitize.needsUbsanRt())
AddLinkSanitizerLibArgs(Args, CmdArgs, "ubsan");
if (Sanitize.needsTsanRt())
AddLinkSanitizerLibArgs(Args, CmdArgs, "tsan");
if (Sanitize.needsFuzzer())
AddFuzzerLinkArgs(Args, CmdArgs);
if (Sanitize.needsStatsRt()) {
StringRef OS = isTargetMacOS() ? "osx" : "iossim";
AddLinkRuntimeLib(Args, CmdArgs,
@ -1892,6 +1908,8 @@ SanitizerMask Darwin::getSupportedSanitizers() const {
const bool IsX86_64 = getTriple().getArch() == llvm::Triple::x86_64;
SanitizerMask Res = ToolChain::getSupportedSanitizers();
Res |= SanitizerKind::Address;
Res |= SanitizerKind::Leak;
Res |= SanitizerKind::Fuzzer;
if (isTargetMacOS()) {
if (!isMacosxVersionLT(10, 9))
Res |= SanitizerKind::Vptr;

View File

@ -154,6 +154,8 @@ public:
/// Add the linker arguments to link the compiler runtime library.
virtual void AddLinkRuntimeLibArgs(const llvm::opt::ArgList &Args,
llvm::opt::ArgStringList &CmdArgs) const;
virtual void AddFuzzerLinkArgs(const llvm::opt::ArgList &Args,
llvm::opt::ArgStringList &CmdArgs) const;
virtual void addStartObjectFileArgs(const llvm::opt::ArgList &Args,
llvm::opt::ArgStringList &CmdArgs) const {

View File

@ -1747,7 +1747,9 @@ bool Generic_GCC::GCCInstallationDetector::getBiarchSibling(Multilib &M) const {
static const char *const ARMTriples[] = {"arm-linux-gnueabi",
"arm-linux-androideabi"};
static const char *const ARMHFTriples[] = {"arm-linux-gnueabihf",
"armv7hl-redhat-linux-gnueabi"};
"armv7hl-redhat-linux-gnueabi",
"armv6hl-suse-linux-gnueabi",
"armv7hl-suse-linux-gnueabi"};
static const char *const ARMebLibDirs[] = {"/lib"};
static const char *const ARMebTriples[] = {"armeb-linux-gnueabi",
"armeb-linux-androideabi"};

View File

@ -402,6 +402,40 @@ Tool *HexagonToolChain::buildLinker() const {
return new tools::hexagon::Linker(*this);
}
unsigned HexagonToolChain::getOptimizationLevel(
const llvm::opt::ArgList &DriverArgs) const {
// Copied in large part from lib/Frontend/CompilerInvocation.cpp.
Arg *A = DriverArgs.getLastArg(options::OPT_O_Group);
if (!A)
return 0;
if (A->getOption().matches(options::OPT_O0))
return 0;
if (A->getOption().matches(options::OPT_Ofast) ||
A->getOption().matches(options::OPT_O4))
return 3;
assert(A->getNumValues() != 0);
StringRef S(A->getValue());
if (S == "s" || S == "z" || S.empty())
return 2;
if (S == "g")
return 1;
unsigned OptLevel;
if (S.getAsInteger(10, OptLevel))
return 0;
return OptLevel;
}
void HexagonToolChain::addClangTargetOptions(const ArgList &DriverArgs,
ArgStringList &CC1Args) const {
if (DriverArgs.hasArg(options::OPT_ffp_contract))
return;
unsigned OptLevel = getOptimizationLevel(DriverArgs);
if (OptLevel >= 3)
CC1Args.push_back("-ffp-contract=fast");
}
void HexagonToolChain::AddClangSystemIncludeArgs(const ArgList &DriverArgs,
ArgStringList &CC1Args) const {
if (DriverArgs.hasArg(options::OPT_nostdinc) ||

View File

@ -61,11 +61,15 @@ protected:
Tool *buildAssembler() const override;
Tool *buildLinker() const override;
unsigned getOptimizationLevel(const llvm::opt::ArgList &DriverArgs) const;
public:
HexagonToolChain(const Driver &D, const llvm::Triple &Triple,
const llvm::opt::ArgList &Args);
~HexagonToolChain() override;
void addClangTargetOptions(const llvm::opt::ArgList &DriverArgs,
llvm::opt::ArgStringList &CC1Args) const override;
void
AddClangSystemIncludeArgs(const llvm::opt::ArgList &DriverArgs,
llvm::opt::ArgStringList &CC1Args) const override;

View File

@ -869,6 +869,7 @@ SanitizerMask Linux::getSupportedSanitizers() const {
llvm::Triple::thumbeb;
SanitizerMask Res = ToolChain::getSupportedSanitizers();
Res |= SanitizerKind::Address;
Res |= SanitizerKind::Fuzzer;
Res |= SanitizerKind::KernelAddress;
Res |= SanitizerKind::Vptr;
Res |= SanitizerKind::SafeStack;

View File

@ -792,7 +792,8 @@ unsigned ContinuationIndenter::moveStateToNextToken(LineState &State,
if (Previous && Previous->is(tok::question))
State.Stack.back().QuestionColumn = State.Column;
}
if (!Current.opensScope() && !Current.closesScope())
if (!Current.opensScope() && !Current.closesScope() &&
!Current.is(TT_PointerOrReference))
State.LowestLevelOnLine =
std::min(State.LowestLevelOnLine, Current.NestingLevel);
if (Current.isMemberAccess())

View File

@ -908,8 +908,8 @@ private:
class Formatter : public TokenAnalyzer {
public:
Formatter(const Environment &Env, const FormatStyle &Style,
bool *IncompleteFormat)
: TokenAnalyzer(Env, Style), IncompleteFormat(IncompleteFormat) {}
FormattingAttemptStatus *Status)
: TokenAnalyzer(Env, Style), Status(Status) {}
tooling::Replacements
analyze(TokenAnnotator &Annotator,
@ -931,7 +931,7 @@ public:
Env.getSourceManager(), Whitespaces, Encoding,
BinPackInconclusiveFunctions);
UnwrappedLineFormatter(&Indenter, &Whitespaces, Style, Tokens.getKeywords(),
IncompleteFormat)
Env.getSourceManager(), Status)
.format(AnnotatedLines);
for (const auto &R : Whitespaces.generateReplacements())
if (Result.add(R))
@ -1013,7 +1013,7 @@ private:
}
bool BinPackInconclusiveFunctions;
bool *IncompleteFormat;
FormattingAttemptStatus *Status;
};
// This class clean up the erroneous/redundant code around the given ranges in
@ -1830,7 +1830,8 @@ cleanupAroundReplacements(StringRef Code, const tooling::Replacements &Replaces,
tooling::Replacements reformat(const FormatStyle &Style, StringRef Code,
ArrayRef<tooling::Range> Ranges,
StringRef FileName, bool *IncompleteFormat) {
StringRef FileName,
FormattingAttemptStatus *Status) {
FormatStyle Expanded = expandPresets(Style);
if (Expanded.DisableFormat)
return tooling::Replacements();
@ -1846,11 +1847,11 @@ tooling::Replacements reformat(const FormatStyle &Style, StringRef Code,
auto NewEnv = Environment::CreateVirtualEnvironment(
*NewCode, FileName,
tooling::calculateRangesAfterReplacements(Fixes, Ranges));
Formatter Format(*NewEnv, Expanded, IncompleteFormat);
Formatter Format(*NewEnv, Expanded, Status);
return Fixes.merge(Format.process());
}
}
Formatter Format(*Env, Expanded, IncompleteFormat);
Formatter Format(*Env, Expanded, Status);
return Format.process();
};
@ -1866,7 +1867,7 @@ tooling::Replacements reformat(const FormatStyle &Style, StringRef Code,
return reformatAfterApplying(Requoter);
}
Formatter Format(*Env, Expanded, IncompleteFormat);
Formatter Format(*Env, Expanded, Status);
return Format.process();
}
@ -1879,6 +1880,16 @@ tooling::Replacements cleanup(const FormatStyle &Style, StringRef Code,
return Clean.process();
}
tooling::Replacements reformat(const FormatStyle &Style, StringRef Code,
ArrayRef<tooling::Range> Ranges,
StringRef FileName, bool *IncompleteFormat) {
FormattingAttemptStatus Status;
auto Result = reformat(Style, Code, Ranges, FileName, &Status);
if (!Status.FormatComplete)
*IncompleteFormat = true;
return Result;
}
tooling::Replacements fixNamespaceEndComments(const FormatStyle &Style,
StringRef Code,
ArrayRef<tooling::Range> Ranges,

View File

@ -617,10 +617,12 @@ struct AdditionalKeywords {
kw_finally = &IdentTable.get("finally");
kw_from = &IdentTable.get("from");
kw_function = &IdentTable.get("function");
kw_get = &IdentTable.get("get");
kw_import = &IdentTable.get("import");
kw_is = &IdentTable.get("is");
kw_let = &IdentTable.get("let");
kw_module = &IdentTable.get("module");
kw_set = &IdentTable.get("set");
kw_type = &IdentTable.get("type");
kw_var = &IdentTable.get("var");
kw_yield = &IdentTable.get("yield");
@ -675,10 +677,12 @@ struct AdditionalKeywords {
IdentifierInfo *kw_finally;
IdentifierInfo *kw_from;
IdentifierInfo *kw_function;
IdentifierInfo *kw_get;
IdentifierInfo *kw_import;
IdentifierInfo *kw_is;
IdentifierInfo *kw_let;
IdentifierInfo *kw_module;
IdentifierInfo *kw_set;
IdentifierInfo *kw_type;
IdentifierInfo *kw_var;
IdentifierInfo *kw_yield;

View File

@ -1120,7 +1120,11 @@ private:
Current.Type = TT_FunctionAnnotationRParen;
}
}
} else if (Current.is(tok::at) && Current.Next) {
} else if (Current.is(tok::at) && Current.Next &&
Style.Language != FormatStyle::LK_JavaScript &&
Style.Language != FormatStyle::LK_Java) {
// In Java & JavaScript, "@..." is a decorator or annotation. In ObjC, it
// marks declarations and properties that need special formatting.
switch (Current.Next->Tok.getObjCKeywordID()) {
case tok::objc_interface:
case tok::objc_implementation:
@ -2541,9 +2545,11 @@ bool TokenAnnotator::canBreakBefore(const AnnotatedLine &Line,
} else if (Style.Language == FormatStyle::LK_JavaScript) {
const FormatToken *NonComment = Right.getPreviousNonComment();
if (NonComment &&
NonComment->isOneOf(tok::kw_return, tok::kw_continue, tok::kw_break,
tok::kw_throw, Keywords.kw_interface,
Keywords.kw_type))
NonComment->isOneOf(
tok::kw_return, tok::kw_continue, tok::kw_break, tok::kw_throw,
Keywords.kw_interface, Keywords.kw_type, tok::kw_static,
tok::kw_public, tok::kw_private, tok::kw_protected,
Keywords.kw_abstract, Keywords.kw_get, Keywords.kw_set))
return false; // Otherwise a semicolon is inserted.
if (Left.is(TT_JsFatArrow) && Right.is(tok::l_brace))
return false;

View File

@ -835,8 +835,11 @@ UnwrappedLineFormatter::format(const SmallVectorImpl<AnnotatedLine *> &Lines,
bool ShouldFormat = TheLine.Affected || FixIndentation;
// We cannot format this line; if the reason is that the line had a
// parsing error, remember that.
if (ShouldFormat && TheLine.Type == LT_Invalid && IncompleteFormat)
*IncompleteFormat = true;
if (ShouldFormat && TheLine.Type == LT_Invalid && Status) {
Status->FormatComplete = false;
Status->Line =
SourceMgr.getSpellingLineNumber(TheLine.First->Tok.getLocation());
}
if (ShouldFormat && TheLine.Type != LT_Invalid) {
if (!DryRun)

View File

@ -32,9 +32,11 @@ public:
WhitespaceManager *Whitespaces,
const FormatStyle &Style,
const AdditionalKeywords &Keywords,
bool *IncompleteFormat)
const SourceManager &SourceMgr,
FormattingAttemptStatus *Status)
: Indenter(Indenter), Whitespaces(Whitespaces), Style(Style),
Keywords(Keywords), IncompleteFormat(IncompleteFormat) {}
Keywords(Keywords), SourceMgr(SourceMgr),
Status(Status) {}
/// \brief Format the current block and return the penalty.
unsigned format(const SmallVectorImpl<AnnotatedLine *> &Lines,
@ -63,7 +65,8 @@ private:
WhitespaceManager *Whitespaces;
const FormatStyle &Style;
const AdditionalKeywords &Keywords;
bool *IncompleteFormat;
const SourceManager &SourceMgr;
FormattingAttemptStatus *Status;
};
} // end namespace format
} // end namespace clang

View File

@ -519,6 +519,7 @@ static bool ParseCodeGenArgs(CodeGenOptions &Opts, ArgList &Args, InputKind IK,
Opts.MacroDebugInfo = Args.hasArg(OPT_debug_info_macro);
Opts.WholeProgramVTables = Args.hasArg(OPT_fwhole_program_vtables);
Opts.LTOVisibilityPublicStd = Args.hasArg(OPT_flto_visibility_public_std);
Opts.EnableSplitDwarf = Args.hasArg(OPT_enable_split_dwarf);
Opts.SplitDwarfFile = Args.getLastArgValue(OPT_split_dwarf_file);
Opts.SplitDwarfInlining = !Args.hasArg(OPT_fno_split_dwarf_inlining);
Opts.DebugTypeExtRefs = Args.hasArg(OPT_dwarf_ext_refs);

View File

@ -882,14 +882,16 @@ static void InitializePredefinedMacros(const TargetInfo &TI,
// The value written by __atomic_test_and_set.
// FIXME: This is target-dependent.
Builder.defineMacro("__GCC_ATOMIC_TEST_AND_SET_TRUEVAL", "1");
}
auto addLockFreeMacros = [&](const llvm::Twine &Prefix) {
// Used by libc++ and libstdc++ to implement ATOMIC_<foo>_LOCK_FREE.
unsigned InlineWidthBits = TI.getMaxAtomicInlineWidth();
#define DEFINE_LOCK_FREE_MACRO(TYPE, Type) \
Builder.defineMacro("__GCC_ATOMIC_" #TYPE "_LOCK_FREE", \
getLockFreeValue(TI.get##Type##Width(), \
TI.get##Type##Align(), \
InlineWidthBits));
#define DEFINE_LOCK_FREE_MACRO(TYPE, Type) \
Builder.defineMacro(Prefix + #TYPE "_LOCK_FREE", \
getLockFreeValue(TI.get##Type##Width(), \
TI.get##Type##Align(), \
InlineWidthBits));
DEFINE_LOCK_FREE_MACRO(BOOL, Bool);
DEFINE_LOCK_FREE_MACRO(CHAR, Char);
DEFINE_LOCK_FREE_MACRO(CHAR16_T, Char16);
@ -899,12 +901,15 @@ static void InitializePredefinedMacros(const TargetInfo &TI,
DEFINE_LOCK_FREE_MACRO(INT, Int);
DEFINE_LOCK_FREE_MACRO(LONG, Long);
DEFINE_LOCK_FREE_MACRO(LLONG, LongLong);
Builder.defineMacro("__GCC_ATOMIC_POINTER_LOCK_FREE",
Builder.defineMacro(Prefix + "POINTER_LOCK_FREE",
getLockFreeValue(TI.getPointerWidth(0),
TI.getPointerAlign(0),
InlineWidthBits));
#undef DEFINE_LOCK_FREE_MACRO
}
};
addLockFreeMacros("__CLANG_ATOMIC_");
if (!LangOpts.MSVCCompat)
addLockFreeMacros("__GCC_ATOMIC_");
if (LangOpts.NoInlineDefine)
Builder.defineMacro("__NO_INLINE__");

View File

@ -40,16 +40,16 @@ extern "C" {
/* 7.17.1 Introduction */
#define ATOMIC_BOOL_LOCK_FREE __GCC_ATOMIC_BOOL_LOCK_FREE
#define ATOMIC_CHAR_LOCK_FREE __GCC_ATOMIC_CHAR_LOCK_FREE
#define ATOMIC_CHAR16_T_LOCK_FREE __GCC_ATOMIC_CHAR16_T_LOCK_FREE
#define ATOMIC_CHAR32_T_LOCK_FREE __GCC_ATOMIC_CHAR32_T_LOCK_FREE
#define ATOMIC_WCHAR_T_LOCK_FREE __GCC_ATOMIC_WCHAR_T_LOCK_FREE
#define ATOMIC_SHORT_LOCK_FREE __GCC_ATOMIC_SHORT_LOCK_FREE
#define ATOMIC_INT_LOCK_FREE __GCC_ATOMIC_INT_LOCK_FREE
#define ATOMIC_LONG_LOCK_FREE __GCC_ATOMIC_LONG_LOCK_FREE
#define ATOMIC_LLONG_LOCK_FREE __GCC_ATOMIC_LLONG_LOCK_FREE
#define ATOMIC_POINTER_LOCK_FREE __GCC_ATOMIC_POINTER_LOCK_FREE
#define ATOMIC_BOOL_LOCK_FREE __CLANG_ATOMIC_BOOL_LOCK_FREE
#define ATOMIC_CHAR_LOCK_FREE __CLANG_ATOMIC_CHAR_LOCK_FREE
#define ATOMIC_CHAR16_T_LOCK_FREE __CLANG_ATOMIC_CHAR16_T_LOCK_FREE
#define ATOMIC_CHAR32_T_LOCK_FREE __CLANG_ATOMIC_CHAR32_T_LOCK_FREE
#define ATOMIC_WCHAR_T_LOCK_FREE __CLANG_ATOMIC_WCHAR_T_LOCK_FREE
#define ATOMIC_SHORT_LOCK_FREE __CLANG_ATOMIC_SHORT_LOCK_FREE
#define ATOMIC_INT_LOCK_FREE __CLANG_ATOMIC_INT_LOCK_FREE
#define ATOMIC_LONG_LOCK_FREE __CLANG_ATOMIC_LONG_LOCK_FREE
#define ATOMIC_LLONG_LOCK_FREE __CLANG_ATOMIC_LLONG_LOCK_FREE
#define ATOMIC_POINTER_LOCK_FREE __CLANG_ATOMIC_POINTER_LOCK_FREE
/* 7.17.2 Initialization */

View File

@ -14,6 +14,13 @@
using namespace clang;
using namespace index;
#define TRY_DECL(D,CALL_EXPR) \
do { \
if (!IndexCtx.shouldIndex(D)) return true; \
if (!CALL_EXPR) \
return false; \
} while (0)
#define TRY_TO(CALL_EXPR) \
do { \
if (!CALL_EXPR) \
@ -120,8 +127,7 @@ public:
D->getDeclContext(), 0);
}
if (!IndexCtx.handleDecl(D, MethodLoc, Roles, Relations))
return false;
TRY_DECL(D, IndexCtx.handleDecl(D, MethodLoc, Roles, Relations));
IndexCtx.indexTypeSourceInfo(D->getReturnTypeSourceInfo(), D);
bool hasIBActionAndFirst = D->hasAttr<IBActionAttr>();
for (const auto *I : D->parameters()) {
@ -138,6 +144,53 @@ public:
return true;
}
/// Gather the declarations which the given declaration \D overrides in a
/// pseudo-override manner.
///
/// Pseudo-overrides occur when a class template specialization declares
/// a declaration that has the same name as a similar declaration in the
/// non-specialized template.
void
gatherTemplatePseudoOverrides(const NamedDecl *D,
SmallVectorImpl<SymbolRelation> &Relations) {
if (!IndexCtx.getLangOpts().CPlusPlus)
return;
const auto *CTSD =
dyn_cast<ClassTemplateSpecializationDecl>(D->getLexicalDeclContext());
if (!CTSD)
return;
llvm::PointerUnion<ClassTemplateDecl *,
ClassTemplatePartialSpecializationDecl *>
Template = CTSD->getSpecializedTemplateOrPartial();
if (const auto *CTD = Template.dyn_cast<ClassTemplateDecl *>()) {
const CXXRecordDecl *Pattern = CTD->getTemplatedDecl();
bool TypeOverride = isa<TypeDecl>(D);
for (const NamedDecl *ND : Pattern->lookup(D->getDeclName())) {
if (const auto *CTD = dyn_cast<ClassTemplateDecl>(ND))
ND = CTD->getTemplatedDecl();
if (ND->isImplicit())
continue;
// Types can override other types.
if (!TypeOverride) {
if (ND->getKind() != D->getKind())
continue;
} else if (!isa<TypeDecl>(ND))
continue;
if (const auto *FD = dyn_cast<FunctionDecl>(ND)) {
const auto *DFD = cast<FunctionDecl>(D);
// Function overrides are approximated using the number of parameters.
if (FD->getStorageClass() != DFD->getStorageClass() ||
FD->getNumParams() != DFD->getNumParams())
continue;
}
Relations.emplace_back(
SymbolRoleSet(SymbolRole::RelationOverrideOf) |
SymbolRoleSet(SymbolRole::RelationSpecializationOf),
ND);
}
}
}
bool VisitFunctionDecl(const FunctionDecl *D) {
if (D->isDeleted())
return true;
@ -152,9 +205,13 @@ public:
Relations.emplace_back((unsigned)SymbolRole::RelationOverrideOf, *I);
}
}
gatherTemplatePseudoOverrides(D, Relations);
if (const auto *Base = D->getPrimaryTemplate())
Relations.push_back(
SymbolRelation(SymbolRoleSet(SymbolRole::RelationSpecializationOf),
Base->getTemplatedDecl()));
if (!IndexCtx.handleDecl(D, Roles, Relations))
return false;
TRY_DECL(D, IndexCtx.handleDecl(D, Roles, Relations));
handleDeclarator(D);
if (const CXXConstructorDecl *Ctor = dyn_cast<CXXConstructorDecl>(D)) {
@ -189,16 +246,18 @@ public:
}
bool VisitVarDecl(const VarDecl *D) {
if (!IndexCtx.handleDecl(D))
return false;
SmallVector<SymbolRelation, 4> Relations;
gatherTemplatePseudoOverrides(D, Relations);
TRY_DECL(D, IndexCtx.handleDecl(D, SymbolRoleSet(), Relations));
handleDeclarator(D);
IndexCtx.indexBody(D->getInit(), D);
return true;
}
bool VisitFieldDecl(const FieldDecl *D) {
if (!IndexCtx.handleDecl(D))
return false;
SmallVector<SymbolRelation, 4> Relations;
gatherTemplatePseudoOverrides(D, Relations);
TRY_DECL(D, IndexCtx.handleDecl(D, SymbolRoleSet(), Relations));
handleDeclarator(D);
if (D->isBitField())
IndexCtx.indexBody(D->getBitWidth(), D);
@ -212,8 +271,7 @@ public:
// handled in VisitObjCPropertyImplDecl
return true;
}
if (!IndexCtx.handleDecl(D))
return false;
TRY_DECL(D, IndexCtx.handleDecl(D));
handleDeclarator(D);
return true;
}
@ -224,17 +282,18 @@ public:
}
bool VisitEnumConstantDecl(const EnumConstantDecl *D) {
if (!IndexCtx.handleDecl(D))
return false;
TRY_DECL(D, IndexCtx.handleDecl(D));
IndexCtx.indexBody(D->getInitExpr(), D);
return true;
}
bool VisitTypedefNameDecl(const TypedefNameDecl *D) {
if (!D->isTransparentTag())
if (!IndexCtx.handleDecl(D))
return false;
IndexCtx.indexTypeSourceInfo(D->getTypeSourceInfo(), D);
if (!D->isTransparentTag()) {
SmallVector<SymbolRelation, 4> Relations;
gatherTemplatePseudoOverrides(D, Relations);
TRY_DECL(D, IndexCtx.handleDecl(D, SymbolRoleSet(), Relations));
IndexCtx.indexTypeSourceInfo(D->getTypeSourceInfo(), D);
}
return true;
}
@ -242,7 +301,9 @@ public:
// Non-free standing tags are handled in indexTypeSourceInfo.
if (D->isFreeStanding()) {
if (D->isThisDeclarationADefinition()) {
IndexCtx.indexTagDecl(D);
SmallVector<SymbolRelation, 4> Relations;
gatherTemplatePseudoOverrides(D, Relations);
IndexCtx.indexTagDecl(D, Relations);
} else {
auto *Parent = dyn_cast<NamedDecl>(D->getDeclContext());
return IndexCtx.handleReference(D, D->getLocation(), Parent,
@ -272,7 +333,7 @@ public:
bool VisitObjCInterfaceDecl(const ObjCInterfaceDecl *D) {
if (D->isThisDeclarationADefinition()) {
TRY_TO(IndexCtx.handleDecl(D));
TRY_DECL(D, IndexCtx.handleDecl(D));
SourceLocation SuperLoc = D->getSuperClassLoc();
if (auto *SuperD = D->getSuperClass()) {
bool hasSuperTypedef = false;
@ -303,7 +364,7 @@ public:
bool VisitObjCProtocolDecl(const ObjCProtocolDecl *D) {
if (D->isThisDeclarationADefinition()) {
TRY_TO(IndexCtx.handleDecl(D));
TRY_DECL(D, IndexCtx.handleDecl(D));
TRY_TO(handleReferencedProtocols(D->getReferencedProtocols(), D,
/*superLoc=*/SourceLocation()));
TRY_TO(IndexCtx.indexDeclContext(D));
@ -322,8 +383,7 @@ public:
if (Class->isImplicitInterfaceDecl())
IndexCtx.handleDecl(Class);
if (!IndexCtx.handleDecl(D))
return false;
TRY_DECL(D, IndexCtx.handleDecl(D));
// Visit implicit @synthesize property implementations first as their
// location is reported at the name of the @implementation block. This
@ -342,6 +402,8 @@ public:
}
bool VisitObjCCategoryDecl(const ObjCCategoryDecl *D) {
if (!IndexCtx.shouldIndex(D))
return true;
const ObjCInterfaceDecl *C = D->getClassInterface();
if (!C)
return true;
@ -370,8 +432,7 @@ public:
SourceLocation CategoryLoc = D->getCategoryNameLoc();
if (!CategoryLoc.isValid())
CategoryLoc = D->getLocation();
if (!IndexCtx.handleDecl(D, CategoryLoc))
return false;
TRY_DECL(D, IndexCtx.handleDecl(D, CategoryLoc));
IndexCtx.indexDeclContext(D);
return true;
}
@ -393,8 +454,7 @@ public:
if (ObjCMethodDecl *MD = D->getSetterMethodDecl())
if (MD->getLexicalDeclContext() == D->getLexicalDeclContext())
handleObjCMethod(MD, D);
if (!IndexCtx.handleDecl(D))
return false;
TRY_DECL(D, IndexCtx.handleDecl(D));
if (IBOutletCollectionAttr *attr = D->getAttr<IBOutletCollectionAttr>())
IndexCtx.indexTypeSourceInfo(attr->getInterfaceLoc(), D,
D->getLexicalDeclContext(), false, true);
@ -415,8 +475,7 @@ public:
Loc = Container->getLocation();
Roles |= (SymbolRoleSet)SymbolRole::Implicit;
}
if (!IndexCtx.handleDecl(D, Loc, Roles, Relations))
return false;
TRY_DECL(D, IndexCtx.handleDecl(D, Loc, Roles, Relations));
if (D->getPropertyImplementation() == ObjCPropertyImplDecl::Dynamic)
return true;
@ -450,8 +509,7 @@ public:
} else if (D->getLocation() == IvarLoc) {
IvarRoles = (SymbolRoleSet)SymbolRole::Implicit;
}
if(!IndexCtx.handleDecl(IvarD, IvarLoc, IvarRoles))
return false;
TRY_DECL(IvarD, IndexCtx.handleDecl(IvarD, IvarLoc, IvarRoles));
} else {
IndexCtx.handleReference(IvarD, D->getPropertyIvarDeclLoc(), nullptr,
D->getDeclContext(), SymbolRoleSet());
@ -461,8 +519,7 @@ public:
}
bool VisitNamespaceDecl(const NamespaceDecl *D) {
if (!IndexCtx.handleDecl(D))
return false;
TRY_DECL(D, IndexCtx.handleDecl(D));
IndexCtx.indexDeclContext(D);
return true;
}
@ -507,6 +564,9 @@ public:
D, SymbolRelation(SymbolRoleSet(SymbolRole::RelationSpecializationOf),
SpecializationOf));
}
if (TypeSourceInfo *TSI = D->getTypeAsWritten())
IndexCtx.indexTypeSourceInfo(TSI, /*Parent=*/nullptr,
D->getLexicalDeclContext());
return true;
}

View File

@ -318,6 +318,20 @@ SymbolInfo index::getSymbolInfo(const Decl *D) {
if (Info.Properties & (unsigned)SymbolProperty::Generic)
Info.Lang = SymbolLanguage::CXX;
auto getExternalSymAttr = [](const Decl *D) -> ExternalSourceSymbolAttr* {
if (auto *attr = D->getAttr<ExternalSourceSymbolAttr>())
return attr;
if (auto *dcd = dyn_cast<Decl>(D->getDeclContext())) {
if (auto *attr = dcd->getAttr<ExternalSourceSymbolAttr>())
return attr;
}
return nullptr;
};
if (auto *attr = getExternalSymAttr(D)) {
if (attr->getLanguage() == "Swift")
Info.Lang = SymbolLanguage::Swift;
}
return Info;
}
@ -458,6 +472,7 @@ StringRef index::getSymbolLanguageString(SymbolLanguage K) {
case SymbolLanguage::C: return "C";
case SymbolLanguage::ObjC: return "ObjC";
case SymbolLanguage::CXX: return "C++";
case SymbolLanguage::Swift: return "Swift";
}
llvm_unreachable("invalid symbol language kind");
}

View File

@ -210,6 +210,8 @@ void IndexingContext::indexNestedNameSpecifierLoc(NestedNameSpecifierLoc NNS,
void IndexingContext::indexTagDecl(const TagDecl *D,
ArrayRef<SymbolRelation> Relations) {
if (!shouldIndex(D))
return;
if (!shouldIndexFunctionLocalSymbols() && isFunctionLocalSymbol(D))
return;

View File

@ -17,6 +17,21 @@
using namespace clang;
using namespace index;
static bool isGeneratedDecl(const Decl *D) {
if (auto *attr = D->getAttr<ExternalSourceSymbolAttr>()) {
return attr->getGeneratedDeclaration();
}
return false;
}
bool IndexingContext::shouldIndex(const Decl *D) {
return !isGeneratedDecl(D);
}
const LangOptions &IndexingContext::getLangOpts() const {
return Ctx->getLangOpts();
}
bool IndexingContext::shouldIndexFunctionLocalSymbols() const {
return IndexOpts.IndexFunctionLocals;
}

View File

@ -48,6 +48,10 @@ public:
void setASTContext(ASTContext &ctx) { Ctx = &ctx; }
bool shouldIndex(const Decl *D);
const LangOptions &getLangOpts() const;
bool shouldSuppressRefs() const {
return false;
}

View File

@ -46,6 +46,15 @@ static bool printLoc(llvm::raw_ostream &OS, SourceLocation Loc,
return false;
}
static StringRef GetExternalSourceContainer(const NamedDecl *D) {
if (!D)
return StringRef();
if (auto *attr = D->getAttr<ExternalSourceSymbolAttr>()) {
return attr->getDefinedIn();
}
return StringRef();
}
namespace {
class USRGenerator : public ConstDeclVisitor<USRGenerator> {
SmallVectorImpl<char> &Buf;
@ -79,7 +88,8 @@ public:
void VisitNamespaceAliasDecl(const NamespaceAliasDecl *D);
void VisitFunctionTemplateDecl(const FunctionTemplateDecl *D);
void VisitClassTemplateDecl(const ClassTemplateDecl *D);
void VisitObjCContainerDecl(const ObjCContainerDecl *CD);
void VisitObjCContainerDecl(const ObjCContainerDecl *CD,
const ObjCCategoryDecl *CatD = nullptr);
void VisitObjCMethodDecl(const ObjCMethodDecl *MD);
void VisitObjCPropertyDecl(const ObjCPropertyDecl *D);
void VisitObjCPropertyImplDecl(const ObjCPropertyImplDecl *D);
@ -116,6 +126,8 @@ public:
return D->getParentFunctionOrMethod() != nullptr;
}
void GenExtSymbolContainer(const NamedDecl *D);
/// Generate the string component containing the location of the
/// declaration.
bool GenLoc(const Decl *D, bool IncludeOffset);
@ -127,13 +139,16 @@ public:
/// itself.
/// Generate a USR for an Objective-C class.
void GenObjCClass(StringRef cls) {
generateUSRForObjCClass(cls, Out);
void GenObjCClass(StringRef cls, StringRef ExtSymDefinedIn,
StringRef CategoryContextExtSymbolDefinedIn) {
generateUSRForObjCClass(cls, Out, ExtSymDefinedIn,
CategoryContextExtSymbolDefinedIn);
}
/// Generate a USR for an Objective-C class category.
void GenObjCCategory(StringRef cls, StringRef cat) {
generateUSRForObjCCategory(cls, cat, Out);
void GenObjCCategory(StringRef cls, StringRef cat,
StringRef clsExt, StringRef catExt) {
generateUSRForObjCCategory(cls, cat, Out, clsExt, catExt);
}
/// Generate a USR fragment for an Objective-C property.
@ -142,8 +157,8 @@ public:
}
/// Generate a USR for an Objective-C protocol.
void GenObjCProtocol(StringRef prot) {
generateUSRForObjCProtocol(prot, Out);
void GenObjCProtocol(StringRef prot, StringRef ext) {
generateUSRForObjCProtocol(prot, Out, ext);
}
void VisitType(QualType T);
@ -204,7 +219,11 @@ void USRGenerator::VisitFunctionDecl(const FunctionDecl *D) {
if (ShouldGenerateLocation(D) && GenLoc(D, /*IncludeOffset=*/isLocal(D)))
return;
const unsigned StartSize = Buf.size();
VisitDeclContext(D->getDeclContext());
if (Buf.size() == StartSize)
GenExtSymbolContainer(D);
bool IsTemplate = false;
if (FunctionTemplateDecl *FunTmpl = D->getDescribedFunctionTemplate()) {
IsTemplate = true;
@ -367,7 +386,16 @@ void USRGenerator::VisitObjCMethodDecl(const ObjCMethodDecl *D) {
IgnoreResults = true;
return;
}
Visit(ID);
auto getCategoryContext = [](const ObjCMethodDecl *D) ->
const ObjCCategoryDecl * {
if (auto *CD = dyn_cast<ObjCCategoryDecl>(D->getDeclContext()))
return CD;
if (auto *ICD = dyn_cast<ObjCCategoryImplDecl>(D->getDeclContext()))
return ICD->getCategoryDecl();
return nullptr;
};
auto *CD = getCategoryContext(D);
VisitObjCContainerDecl(ID, CD);
}
// Ideally we would use 'GenObjCMethod', but this is such a hot path
// for Objective-C code that we don't want to use
@ -376,13 +404,15 @@ void USRGenerator::VisitObjCMethodDecl(const ObjCMethodDecl *D) {
<< DeclarationName(D->getSelector());
}
void USRGenerator::VisitObjCContainerDecl(const ObjCContainerDecl *D) {
void USRGenerator::VisitObjCContainerDecl(const ObjCContainerDecl *D,
const ObjCCategoryDecl *CatD) {
switch (D->getKind()) {
default:
llvm_unreachable("Invalid ObjC container.");
case Decl::ObjCInterface:
case Decl::ObjCImplementation:
GenObjCClass(D->getName());
GenObjCClass(D->getName(), GetExternalSourceContainer(D),
GetExternalSourceContainer(CatD));
break;
case Decl::ObjCCategory: {
const ObjCCategoryDecl *CD = cast<ObjCCategoryDecl>(D);
@ -402,7 +432,9 @@ void USRGenerator::VisitObjCContainerDecl(const ObjCContainerDecl *D) {
GenLoc(CD, /*IncludeOffset=*/true);
}
else
GenObjCCategory(ID->getName(), CD->getName());
GenObjCCategory(ID->getName(), CD->getName(),
GetExternalSourceContainer(ID),
GetExternalSourceContainer(CD));
break;
}
@ -417,12 +449,16 @@ void USRGenerator::VisitObjCContainerDecl(const ObjCContainerDecl *D) {
IgnoreResults = true;
return;
}
GenObjCCategory(ID->getName(), CD->getName());
GenObjCCategory(ID->getName(), CD->getName(),
GetExternalSourceContainer(ID),
GetExternalSourceContainer(CD));
break;
}
case Decl::ObjCProtocol:
GenObjCProtocol(cast<ObjCProtocolDecl>(D)->getName());
case Decl::ObjCProtocol: {
const ObjCProtocolDecl *PD = cast<ObjCProtocolDecl>(D);
GenObjCProtocol(PD->getName(), GetExternalSourceContainer(PD));
break;
}
}
}
@ -452,6 +488,8 @@ void USRGenerator::VisitTagDecl(const TagDecl *D) {
ShouldGenerateLocation(D) && GenLoc(D, /*IncludeOffset=*/isLocal(D)))
return;
GenExtSymbolContainer(D);
D = D->getCanonicalDecl();
VisitDeclContext(D->getDeclContext());
@ -544,6 +582,12 @@ void USRGenerator::VisitTemplateTypeParmDecl(const TemplateTypeParmDecl *D) {
GenLoc(D, /*IncludeOffset=*/true);
}
void USRGenerator::GenExtSymbolContainer(const NamedDecl *D) {
StringRef Container = GetExternalSourceContainer(D);
if (!Container.empty())
Out << "@M@" << Container;
}
bool USRGenerator::GenLoc(const Decl *D, bool IncludeOffset) {
if (generatedLoc)
return IgnoreResults;
@ -866,12 +910,34 @@ void USRGenerator::VisitTemplateArgument(const TemplateArgument &Arg) {
// USR generation functions.
//===----------------------------------------------------------------------===//
void clang::index::generateUSRForObjCClass(StringRef Cls, raw_ostream &OS) {
static void combineClassAndCategoryExtContainers(StringRef ClsSymDefinedIn,
StringRef CatSymDefinedIn,
raw_ostream &OS) {
if (ClsSymDefinedIn.empty() && CatSymDefinedIn.empty())
return;
if (CatSymDefinedIn.empty()) {
OS << "@M@" << ClsSymDefinedIn << '@';
return;
}
OS << "@CM@" << CatSymDefinedIn << '@';
if (ClsSymDefinedIn != CatSymDefinedIn) {
OS << ClsSymDefinedIn << '@';
}
}
void clang::index::generateUSRForObjCClass(StringRef Cls, raw_ostream &OS,
StringRef ExtSymDefinedIn,
StringRef CategoryContextExtSymbolDefinedIn) {
combineClassAndCategoryExtContainers(ExtSymDefinedIn,
CategoryContextExtSymbolDefinedIn, OS);
OS << "objc(cs)" << Cls;
}
void clang::index::generateUSRForObjCCategory(StringRef Cls, StringRef Cat,
raw_ostream &OS) {
raw_ostream &OS,
StringRef ClsSymDefinedIn,
StringRef CatSymDefinedIn) {
combineClassAndCategoryExtContainers(ClsSymDefinedIn, CatSymDefinedIn, OS);
OS << "objc(cy)" << Cls << '@' << Cat;
}
@ -890,10 +956,25 @@ void clang::index::generateUSRForObjCProperty(StringRef Prop, bool isClassProp,
OS << (isClassProp ? "(cpy)" : "(py)") << Prop;
}
void clang::index::generateUSRForObjCProtocol(StringRef Prot, raw_ostream &OS) {
void clang::index::generateUSRForObjCProtocol(StringRef Prot, raw_ostream &OS,
StringRef ExtSymDefinedIn) {
if (!ExtSymDefinedIn.empty())
OS << "@M@" << ExtSymDefinedIn << '@';
OS << "objc(pl)" << Prot;
}
void clang::index::generateUSRForGlobalEnum(StringRef EnumName, raw_ostream &OS,
StringRef ExtSymDefinedIn) {
if (!ExtSymDefinedIn.empty())
OS << "@M@" << ExtSymDefinedIn;
OS << "@E@" << EnumName;
}
void clang::index::generateUSRForEnumConstant(StringRef EnumConstantName,
raw_ostream &OS) {
OS << '@' << EnumConstantName;
}
bool clang::index::generateUSRForDecl(const Decl *D,
SmallVectorImpl<char> &Buf) {
if (!D)

View File

@ -581,6 +581,7 @@ Module *ModuleMap::createModuleForInterfaceUnit(SourceLocation Loc,
auto *Result =
new Module(Name, Loc, nullptr, /*IsFramework*/ false,
/*IsExplicit*/ false, NumCreatedModules++);
Result->Kind = Module::ModuleInterfaceUnit;
Modules[Name] = SourceModule = Result;
// Mark the main source file as being within the newly-created module so that

View File

@ -989,9 +989,9 @@ struct PragmaDebugHandler : public PragmaHandler {
#ifdef _MSC_VER
#pragma warning(disable : 4717)
#endif
static void DebugOverflowStack() {
void (*volatile Self)() = DebugOverflowStack;
Self();
static void DebugOverflowStack(void (*P)() = nullptr) {
void (*volatile Self)(void(*P)()) = DebugOverflowStack;
Self(reinterpret_cast<void(*)()>(Self));
}
#ifdef _MSC_VER
#pragma warning(default : 4717)

View File

@ -456,6 +456,7 @@ Parser::ParseRHSOfBinaryExpression(ExprResult LHS, prec::Level MinPrec) {
if (!getLangOpts().CPlusPlus)
continue;
}
// Ensure potential typos aren't left undiagnosed.
if (LHS.isInvalid()) {
Actions.CorrectDelayedTyposInExpr(OrigLHS);

View File

@ -534,23 +534,6 @@ void Parser::LateTemplateParserCleanupCallback(void *P) {
}
bool Parser::ParseFirstTopLevelDecl(DeclGroupPtrTy &Result) {
// C++ Modules TS: module-declaration must be the first declaration in the
// file. (There can be no preceding preprocessor directives, but we expect
// the lexer to check that.)
if (Tok.is(tok::kw_module)) {
Result = ParseModuleDecl();
return false;
} else if (getLangOpts().getCompilingModule() ==
LangOptions::CMK_ModuleInterface) {
// FIXME: We avoid providing this diagnostic when generating an object file
// from an existing PCM file. This is not a good way to detect this
// condition; we should provide a mechanism to indicate whether we've
// already parsed a declaration in this translation unit and avoid calling
// ParseFirstTopLevelDecl in that case.
if (Actions.TUKind == TU_Module)
Diag(Tok, diag::err_expected_module_interface_decl);
}
// C11 6.9p1 says translation units must have at least one top-level
// declaration. C++ doesn't have this restriction. We also don't want to
// complain if we have a precompiled header, although technically if the PCH
@ -583,6 +566,14 @@ bool Parser::ParseTopLevelDecl(DeclGroupPtrTy &Result) {
Result = ParseModuleImport(SourceLocation());
return false;
case tok::kw_export:
if (NextToken().isNot(tok::kw_module))
break;
LLVM_FALLTHROUGH;
case tok::kw_module:
Result = ParseModuleDecl();
return false;
case tok::annot_module_include:
Actions.ActOnModuleInclude(Tok.getLocation(),
reinterpret_cast<Module *>(
@ -2049,30 +2040,28 @@ void Parser::ParseMicrosoftIfExistsExternalDeclaration() {
/// Parse a C++ Modules TS module declaration, which appears at the beginning
/// of a module interface, module partition, or module implementation file.
///
/// module-declaration: [Modules TS + P0273R0]
/// 'module' module-kind[opt] module-name attribute-specifier-seq[opt] ';'
/// module-kind:
/// 'implementation'
/// 'partition'
/// module-declaration: [Modules TS + P0273R0 + P0629R0]
/// 'export'[opt] 'module' 'partition'[opt]
/// module-name attribute-specifier-seq[opt] ';'
///
/// Note that the module-kind values are context-sensitive keywords.
/// Note that 'partition' is a context-sensitive keyword.
Parser::DeclGroupPtrTy Parser::ParseModuleDecl() {
assert(Tok.is(tok::kw_module) && getLangOpts().ModulesTS &&
"should not be parsing a module declaration");
SourceLocation StartLoc = Tok.getLocation();
Sema::ModuleDeclKind MDK = TryConsumeToken(tok::kw_export)
? Sema::ModuleDeclKind::Module
: Sema::ModuleDeclKind::Implementation;
assert(Tok.is(tok::kw_module) && "not a module declaration");
SourceLocation ModuleLoc = ConsumeToken();
// Check for a module-kind.
Sema::ModuleDeclKind MDK = Sema::ModuleDeclKind::Module;
if (Tok.is(tok::identifier) && NextToken().is(tok::identifier)) {
if (Tok.getIdentifierInfo()->isStr("implementation"))
MDK = Sema::ModuleDeclKind::Implementation;
else if (Tok.getIdentifierInfo()->isStr("partition"))
MDK = Sema::ModuleDeclKind::Partition;
else {
Diag(Tok, diag::err_unexpected_module_kind) << Tok.getIdentifierInfo();
SkipUntil(tok::semi);
return nullptr;
}
if (Tok.is(tok::identifier) && NextToken().is(tok::identifier) &&
Tok.getIdentifierInfo()->isStr("partition")) {
// If 'partition' is present, this must be a module interface unit.
if (MDK != Sema::ModuleDeclKind::Module)
Diag(Tok.getLocation(), diag::err_module_implementation_partition)
<< FixItHint::CreateInsertion(ModuleLoc, "export ");
MDK = Sema::ModuleDeclKind::Partition;
ConsumeToken();
}
@ -2080,14 +2069,14 @@ Parser::DeclGroupPtrTy Parser::ParseModuleDecl() {
if (ParseModuleName(ModuleLoc, Path, /*IsImport*/false))
return nullptr;
// We don't support any module attributes yet; just parse them and diagnose.
ParsedAttributesWithRange Attrs(AttrFactory);
MaybeParseCXX11Attributes(Attrs);
// We don't support any module attributes yet.
ProhibitCXX11Attributes(Attrs, diag::err_attribute_not_module_attr);
ExpectAndConsumeSemi(diag::err_module_expected_semi);
return Actions.ActOnModuleDecl(ModuleLoc, MDK, Path);
return Actions.ActOnModuleDecl(StartLoc, ModuleLoc, MDK, Path);
}
/// Parse a module import declaration. This is essentially the same for

View File

@ -1161,10 +1161,14 @@ void Sema::PushFunctionScope() {
// memory for a new scope.
FunctionScopes.back()->Clear();
FunctionScopes.push_back(FunctionScopes.back());
if (LangOpts.OpenMP)
pushOpenMPFunctionRegion();
return;
}
FunctionScopes.push_back(new FunctionScopeInfo(getDiagnostics()));
if (LangOpts.OpenMP)
pushOpenMPFunctionRegion();
}
void Sema::PushBlockScope(Scope *BlockScope, BlockDecl *Block) {
@ -1192,6 +1196,9 @@ void Sema::PopFunctionScopeInfo(const AnalysisBasedWarnings::Policy *WP,
FunctionScopeInfo *Scope = FunctionScopes.pop_back_val();
assert(!FunctionScopes.empty() && "mismatched push/pop!");
if (LangOpts.OpenMP)
popOpenMPFunctionRegion(Scope);
// Issue any analysis-based warnings.
if (WP && D)
AnalysisWarnings.IssueWarnings(*WP, Scope, D, blkExpr);

View File

@ -408,7 +408,7 @@ static bool SemaOpenCLBuiltinEnqueueKernel(Sema &S, CallExpr *TheCall) {
}
// Third argument is always an ndrange_t type.
if (Arg2->getType().getAsString() != "ndrange_t") {
if (Arg2->getType().getUnqualifiedType().getAsString() != "ndrange_t") {
S.Diag(TheCall->getArg(2)->getLocStart(),
diag::err_opencl_enqueue_kernel_expected_type)
<< "'ndrange_t'";

View File

@ -615,7 +615,7 @@ bool Sema::isMicrosoftMissingTypename(const CXXScopeSpec *SS, Scope *S) {
CXXRecordDecl *RD = cast<CXXRecordDecl>(CurContext);
for (const auto &Base : RD->bases())
if (Context.hasSameUnqualifiedType(QualType(Ty, 1), Base.getType()))
if (Ty && Context.hasSameUnqualifiedType(QualType(Ty, 1), Base.getType()))
return true;
return S->isFunctionPrototypeScope();
}
@ -13523,7 +13523,8 @@ CreateNewDecl:
// If this is an undefined enum, warn.
if (TUK != TUK_Definition && !Invalid) {
TagDecl *Def;
if ((getLangOpts().CPlusPlus11 || getLangOpts().ObjC2) &&
if (!EnumUnderlyingIsImplicit &&
(getLangOpts().CPlusPlus11 || getLangOpts().ObjC2) &&
cast<EnumDecl>(New)->isFixed()) {
// C++0x: 7.2p2: opaque-enum-declaration.
// Conflicts are diagnosed above. Do nothing.
@ -15687,30 +15688,41 @@ static void checkModuleImportContext(Sema &S, Module *M,
}
}
Sema::DeclGroupPtrTy Sema::ActOnModuleDecl(SourceLocation ModuleLoc,
Sema::DeclGroupPtrTy Sema::ActOnModuleDecl(SourceLocation StartLoc,
SourceLocation ModuleLoc,
ModuleDeclKind MDK,
ModuleIdPath Path) {
// 'module implementation' requires that we are not compiling a module of any
// kind. 'module' and 'module partition' require that we are compiling a
// module inteface (not a module map).
auto CMK = getLangOpts().getCompilingModule();
if (MDK == ModuleDeclKind::Implementation
? CMK != LangOptions::CMK_None
: CMK != LangOptions::CMK_ModuleInterface) {
// A module implementation unit requires that we are not compiling a module
// of any kind. A module interface unit requires that we are not compiling a
// module map.
switch (getLangOpts().getCompilingModule()) {
case LangOptions::CMK_None:
// It's OK to compile a module interface as a normal translation unit.
break;
case LangOptions::CMK_ModuleInterface:
if (MDK != ModuleDeclKind::Implementation)
break;
// We were asked to compile a module interface unit but this is a module
// implementation unit. That indicates the 'export' is missing.
Diag(ModuleLoc, diag::err_module_interface_implementation_mismatch)
<< (unsigned)MDK;
<< FixItHint::CreateInsertion(ModuleLoc, "export ");
break;
case LangOptions::CMK_ModuleMap:
Diag(ModuleLoc, diag::err_module_decl_in_module_map_module);
return nullptr;
}
// FIXME: Create a ModuleDecl and return it.
// FIXME: Most of this work should be done by the preprocessor rather than
// here, in case we look ahead across something where the current
// module matters (eg a #include).
// here, in order to support macro import.
// The dots in a module name in the Modules TS are a lie. Unlike Clang's
// hierarchical module map modules, the dots here are just another character
// that can appear in a module name. Flatten down to the actual module name.
// Flatten the dots in a module name. Unlike Clang's hierarchical module map
// modules, the dots here are just another character that can appear in a
// module name.
std::string ModuleName;
for (auto &Piece : Path) {
if (!ModuleName.empty())
@ -15735,8 +15747,8 @@ Sema::DeclGroupPtrTy Sema::ActOnModuleDecl(SourceLocation ModuleLoc,
case ModuleDeclKind::Module: {
// FIXME: Check we're not in a submodule.
// We can't have imported a definition of this module or parsed a module
// map defining it already.
// We can't have parsed or imported a definition of this module or parsed a
// module map defining it already.
if (auto *M = Map.findModule(ModuleName)) {
Diag(Path[0].second, diag::err_module_redefinition) << ModuleName;
if (M->DefinitionLoc.isValid())
@ -15910,6 +15922,12 @@ Decl *Sema::ActOnStartExportDecl(Scope *S, SourceLocation ExportLoc,
ExportDecl *D = ExportDecl::Create(Context, CurContext, ExportLoc);
// C++ Modules TS draft:
// An export-declaration shall appear in the purview of a module other than
// the global module.
if (ModuleScopes.empty() || !ModuleScopes.back().Module ||
ModuleScopes.back().Module->Kind != Module::ModuleInterfaceUnit)
Diag(ExportLoc, diag::err_export_not_in_module_interface);
// An export-declaration [...] shall not contain more than one
// export keyword.
//

View File

@ -7220,6 +7220,8 @@ void Sema::DiagnoseUnguardedAvailabilityViolations(Decl *D) {
Body = FD->getBody();
} else if (auto *MD = dyn_cast<ObjCMethodDecl>(D))
Body = MD->getBody();
else if (auto *BD = dyn_cast<BlockDecl>(D))
Body = BD->getBody();
assert(Body && "Need a body here!");

View File

@ -171,9 +171,14 @@ DiagnoseAvailabilityOfDecl(Sema &S, NamedDecl *D, SourceLocation Loc,
if (AvailabilityResult Result =
S.ShouldDiagnoseAvailabilityOfDecl(D, &Message)) {
if (Result == AR_NotYetIntroduced && S.getCurFunctionOrMethodDecl()) {
S.getEnclosingFunction()->HasPotentialAvailabilityViolations = true;
return;
if (Result == AR_NotYetIntroduced) {
if (S.getCurFunctionOrMethodDecl()) {
S.getEnclosingFunction()->HasPotentialAvailabilityViolations = true;
return;
} else if (S.getCurBlock() || S.getCurLambda()) {
S.getCurFunction()->HasPotentialAvailabilityViolations = true;
return;
}
}
const ObjCPropertyDecl *ObjCPDecl = nullptr;
@ -12498,6 +12503,9 @@ ExprResult Sema::ActOnBlockStmtExpr(SourceLocation CaretLoc,
BSI->TheDecl->setBody(cast<CompoundStmt>(Body));
if (Body && getCurFunction()->HasPotentialAvailabilityViolations)
DiagnoseUnguardedAvailabilityViolations(BSI->TheDecl);
// Try to apply the named return value optimization. We have to check again
// if we can do this, though, because blocks keep return statements around
// to deduce an implicit return type.

View File

@ -1326,12 +1326,6 @@ bool Sema::CppLookupName(LookupResult &R, Scope *S) {
return !R.empty();
}
/// \brief Find the declaration that a class temploid member specialization was
/// instantiated from, or the member itself if it is an explicit specialization.
static Decl *getInstantiatedFrom(Decl *D, MemberSpecializationInfo *MSInfo) {
return MSInfo->isExplicitSpecialization() ? D : MSInfo->getInstantiatedFrom();
}
Module *Sema::getOwningModule(Decl *Entity) {
// If it's imported, grab its owning module.
Module *M = Entity->getImportedOwningModule();
@ -1413,12 +1407,11 @@ static Module *getDefiningModule(Sema &S, Decl *Entity) {
if (CXXRecordDecl *Pattern = RD->getTemplateInstantiationPattern())
Entity = Pattern;
} else if (EnumDecl *ED = dyn_cast<EnumDecl>(Entity)) {
if (MemberSpecializationInfo *MSInfo = ED->getMemberSpecializationInfo())
Entity = getInstantiatedFrom(ED, MSInfo);
if (auto *Pattern = ED->getTemplateInstantiationPattern())
Entity = Pattern;
} else if (VarDecl *VD = dyn_cast<VarDecl>(Entity)) {
// FIXME: Map from variable template specializations back to the template.
if (MemberSpecializationInfo *MSInfo = VD->getMemberSpecializationInfo())
Entity = getInstantiatedFrom(VD, MSInfo);
if (VarDecl *Pattern = VD->getTemplateInstantiationPattern())
Entity = Pattern;
}
// Walk up to the containing context. That might also have been instantiated

View File

@ -118,7 +118,9 @@ private:
typedef SmallVector<SharingMapTy, 4> StackTy;
/// \brief Stack of used declaration and their data-sharing attributes.
StackTy Stack;
DeclSAMapTy Threadprivates;
const FunctionScopeInfo *CurrentNonCapturingFunctionScope = nullptr;
SmallVector<std::pair<StackTy, const FunctionScopeInfo *>, 4> Stack;
/// \brief true, if check for DSA must be from parent directive, false, if
/// from current directive.
OpenMPClauseKind ClauseKindMode = OMPC_unknown;
@ -133,8 +135,14 @@ private:
/// \brief Checks if the variable is a local for OpenMP region.
bool isOpenMPLocal(VarDecl *D, StackTy::reverse_iterator Iter);
bool isStackEmpty() const {
return Stack.empty() ||
Stack.back().second != CurrentNonCapturingFunctionScope ||
Stack.back().first.empty();
}
public:
explicit DSAStackTy(Sema &S) : Stack(1), SemaRef(S) {}
explicit DSAStackTy(Sema &S) : SemaRef(S) {}
bool isClauseParsingMode() const { return ClauseKindMode != OMPC_unknown; }
void setClauseParsingMode(OpenMPClauseKind K) { ClauseKindMode = K; }
@ -144,13 +152,38 @@ public:
void push(OpenMPDirectiveKind DKind, const DeclarationNameInfo &DirName,
Scope *CurScope, SourceLocation Loc) {
Stack.push_back(SharingMapTy(DKind, DirName, CurScope, Loc));
Stack.back().DefaultAttrLoc = Loc;
if (Stack.empty() ||
Stack.back().second != CurrentNonCapturingFunctionScope)
Stack.emplace_back(StackTy(), CurrentNonCapturingFunctionScope);
Stack.back().first.emplace_back(DKind, DirName, CurScope, Loc);
Stack.back().first.back().DefaultAttrLoc = Loc;
}
void pop() {
assert(Stack.size() > 1 && "Data-sharing attributes stack is empty!");
Stack.pop_back();
assert(!Stack.back().first.empty() &&
"Data-sharing attributes stack is empty!");
Stack.back().first.pop_back();
}
/// Start new OpenMP region stack in new non-capturing function.
void pushFunction() {
const FunctionScopeInfo *CurFnScope = SemaRef.getCurFunction();
assert(!isa<CapturingScopeInfo>(CurFnScope));
CurrentNonCapturingFunctionScope = CurFnScope;
}
/// Pop region stack for non-capturing function.
void popFunction(const FunctionScopeInfo *OldFSI) {
if (!Stack.empty() && Stack.back().second == OldFSI) {
assert(Stack.back().first.empty());
Stack.pop_back();
}
CurrentNonCapturingFunctionScope = nullptr;
for (const FunctionScopeInfo *FSI : llvm::reverse(SemaRef.FunctionScopes)) {
if (!isa<CapturingScopeInfo>(FSI)) {
CurrentNonCapturingFunctionScope = FSI;
break;
}
}
}
void addCriticalWithHint(OMPCriticalDirective *D, llvm::APSInt Hint) {
@ -229,31 +262,35 @@ public:
/// \brief Returns currently analyzed directive.
OpenMPDirectiveKind getCurrentDirective() const {
return Stack.back().Directive;
return isStackEmpty() ? OMPD_unknown : Stack.back().first.back().Directive;
}
/// \brief Returns parent directive.
OpenMPDirectiveKind getParentDirective() const {
if (Stack.size() > 2)
return Stack[Stack.size() - 2].Directive;
return OMPD_unknown;
if (isStackEmpty() || Stack.back().first.size() == 1)
return OMPD_unknown;
return std::next(Stack.back().first.rbegin())->Directive;
}
/// \brief Set default data sharing attribute to none.
void setDefaultDSANone(SourceLocation Loc) {
Stack.back().DefaultAttr = DSA_none;
Stack.back().DefaultAttrLoc = Loc;
assert(!isStackEmpty());
Stack.back().first.back().DefaultAttr = DSA_none;
Stack.back().first.back().DefaultAttrLoc = Loc;
}
/// \brief Set default data sharing attribute to shared.
void setDefaultDSAShared(SourceLocation Loc) {
Stack.back().DefaultAttr = DSA_shared;
Stack.back().DefaultAttrLoc = Loc;
assert(!isStackEmpty());
Stack.back().first.back().DefaultAttr = DSA_shared;
Stack.back().first.back().DefaultAttrLoc = Loc;
}
DefaultDataSharingAttributes getDefaultDSA() const {
return Stack.back().DefaultAttr;
return isStackEmpty() ? DSA_unspecified
: Stack.back().first.back().DefaultAttr;
}
SourceLocation getDefaultDSALocation() const {
return Stack.back().DefaultAttrLoc;
return isStackEmpty() ? SourceLocation()
: Stack.back().first.back().DefaultAttrLoc;
}
/// \brief Checks if the specified variable is a threadprivate.
@ -264,52 +301,64 @@ public:
/// \brief Marks current region as ordered (it has an 'ordered' clause).
void setOrderedRegion(bool IsOrdered, Expr *Param) {
Stack.back().OrderedRegion.setInt(IsOrdered);
Stack.back().OrderedRegion.setPointer(Param);
assert(!isStackEmpty());
Stack.back().first.back().OrderedRegion.setInt(IsOrdered);
Stack.back().first.back().OrderedRegion.setPointer(Param);
}
/// \brief Returns true, if parent region is ordered (has associated
/// 'ordered' clause), false - otherwise.
bool isParentOrderedRegion() const {
if (Stack.size() > 2)
return Stack[Stack.size() - 2].OrderedRegion.getInt();
return false;
if (isStackEmpty() || Stack.back().first.size() == 1)
return false;
return std::next(Stack.back().first.rbegin())->OrderedRegion.getInt();
}
/// \brief Returns optional parameter for the ordered region.
Expr *getParentOrderedRegionParam() const {
if (Stack.size() > 2)
return Stack[Stack.size() - 2].OrderedRegion.getPointer();
return nullptr;
if (isStackEmpty() || Stack.back().first.size() == 1)
return nullptr;
return std::next(Stack.back().first.rbegin())->OrderedRegion.getPointer();
}
/// \brief Marks current region as nowait (it has a 'nowait' clause).
void setNowaitRegion(bool IsNowait = true) {
Stack.back().NowaitRegion = IsNowait;
assert(!isStackEmpty());
Stack.back().first.back().NowaitRegion = IsNowait;
}
/// \brief Returns true, if parent region is nowait (has associated
/// 'nowait' clause), false - otherwise.
bool isParentNowaitRegion() const {
if (Stack.size() > 2)
return Stack[Stack.size() - 2].NowaitRegion;
return false;
if (isStackEmpty() || Stack.back().first.size() == 1)
return false;
return std::next(Stack.back().first.rbegin())->NowaitRegion;
}
/// \brief Marks parent region as cancel region.
void setParentCancelRegion(bool Cancel = true) {
if (Stack.size() > 2)
Stack[Stack.size() - 2].CancelRegion =
Stack[Stack.size() - 2].CancelRegion || Cancel;
if (!isStackEmpty() && Stack.back().first.size() > 1) {
auto &StackElemRef = *std::next(Stack.back().first.rbegin());
StackElemRef.CancelRegion |= StackElemRef.CancelRegion || Cancel;
}
}
/// \brief Return true if current region has inner cancel construct.
bool isCancelRegion() const { return Stack.back().CancelRegion; }
bool isCancelRegion() const {
return isStackEmpty() ? false : Stack.back().first.back().CancelRegion;
}
/// \brief Set collapse value for the region.
void setAssociatedLoops(unsigned Val) { Stack.back().AssociatedLoops = Val; }
void setAssociatedLoops(unsigned Val) {
assert(!isStackEmpty());
Stack.back().first.back().AssociatedLoops = Val;
}
/// \brief Return collapse value for region.
unsigned getAssociatedLoops() const { return Stack.back().AssociatedLoops; }
unsigned getAssociatedLoops() const {
return isStackEmpty() ? 0 : Stack.back().first.back().AssociatedLoops;
}
/// \brief Marks current target region as one with closely nested teams
/// region.
void setParentTeamsRegionLoc(SourceLocation TeamsRegionLoc) {
if (Stack.size() > 2)
Stack[Stack.size() - 2].InnerTeamsRegionLoc = TeamsRegionLoc;
if (!isStackEmpty() && Stack.back().first.size() > 1) {
std::next(Stack.back().first.rbegin())->InnerTeamsRegionLoc =
TeamsRegionLoc;
}
}
/// \brief Returns true, if current region has closely nested teams region.
bool hasInnerTeamsRegion() const {
@ -317,14 +366,20 @@ public:
}
/// \brief Returns location of the nested teams region (if any).
SourceLocation getInnerTeamsRegionLoc() const {
if (Stack.size() > 1)
return Stack.back().InnerTeamsRegionLoc;
return SourceLocation();
return isStackEmpty() ? SourceLocation()
: Stack.back().first.back().InnerTeamsRegionLoc;
}
Scope *getCurScope() const { return Stack.back().CurScope; }
Scope *getCurScope() { return Stack.back().CurScope; }
SourceLocation getConstructLoc() { return Stack.back().ConstructLoc; }
Scope *getCurScope() const {
return isStackEmpty() ? nullptr : Stack.back().first.back().CurScope;
}
Scope *getCurScope() {
return isStackEmpty() ? nullptr : Stack.back().first.back().CurScope;
}
SourceLocation getConstructLoc() {
return isStackEmpty() ? SourceLocation()
: Stack.back().first.back().ConstructLoc;
}
/// Do the check specified in \a Check to all component lists and return true
/// if any issue is found.
@ -333,8 +388,10 @@ public:
const llvm::function_ref<
bool(OMPClauseMappableExprCommon::MappableExprComponentListRef,
OpenMPClauseKind)> &Check) {
auto SI = Stack.rbegin();
auto SE = Stack.rend();
if (isStackEmpty())
return false;
auto SI = Stack.back().first.rbegin();
auto SE = Stack.back().first.rend();
if (SI == SE)
return false;
@ -361,9 +418,9 @@ public:
ValueDecl *VD,
OMPClauseMappableExprCommon::MappableExprComponentListRef Components,
OpenMPClauseKind WhereFoundClauseKind) {
assert(Stack.size() > 1 &&
assert(!isStackEmpty() &&
"Not expecting to retrieve components from a empty stack!");
auto &MEC = Stack.back().MappedExprComponents[VD];
auto &MEC = Stack.back().first.back().MappedExprComponents[VD];
// Create new entry and append the new components there.
MEC.Components.resize(MEC.Components.size() + 1);
MEC.Components.back().append(Components.begin(), Components.end());
@ -371,23 +428,25 @@ public:
}
unsigned getNestingLevel() const {
assert(Stack.size() > 1);
return Stack.size() - 2;
assert(!isStackEmpty());
return Stack.back().first.size() - 1;
}
void addDoacrossDependClause(OMPDependClause *C, OperatorOffsetTy &OpsOffs) {
assert(Stack.size() > 2);
assert(isOpenMPWorksharingDirective(Stack[Stack.size() - 2].Directive));
Stack[Stack.size() - 2].DoacrossDepends.insert({C, OpsOffs});
assert(!isStackEmpty() && Stack.back().first.size() > 1);
auto &StackElem = *std::next(Stack.back().first.rbegin());
assert(isOpenMPWorksharingDirective(StackElem.Directive));
StackElem.DoacrossDepends.insert({C, OpsOffs});
}
llvm::iterator_range<DoacrossDependMapTy::const_iterator>
getDoacrossDependClauses() const {
assert(Stack.size() > 1);
if (isOpenMPWorksharingDirective(Stack[Stack.size() - 1].Directive)) {
auto &Ref = Stack[Stack.size() - 1].DoacrossDepends;
assert(!isStackEmpty());
auto &StackElem = Stack.back().first.back();
if (isOpenMPWorksharingDirective(StackElem.Directive)) {
auto &Ref = StackElem.DoacrossDepends;
return llvm::make_range(Ref.begin(), Ref.end());
}
return llvm::make_range(Stack[0].DoacrossDepends.end(),
Stack[0].DoacrossDepends.end());
return llvm::make_range(StackElem.DoacrossDepends.end(),
StackElem.DoacrossDepends.end());
}
};
bool isParallelOrTaskRegion(OpenMPDirectiveKind DKind) {
@ -416,7 +475,7 @@ DSAStackTy::DSAVarData DSAStackTy::getDSA(StackTy::reverse_iterator &Iter,
auto *VD = dyn_cast<VarDecl>(D);
auto *FD = dyn_cast<FieldDecl>(D);
DSAVarData DVar;
if (Iter == std::prev(Stack.rend())) {
if (isStackEmpty() || Iter == Stack.back().first.rend()) {
// OpenMP [2.9.1.1, Data-sharing Attribute Rules for Variables Referenced
// in a region but not in construct]
// File-scope or namespace-scope variables referenced in called routines
@ -490,8 +549,9 @@ DSAStackTy::DSAVarData DSAStackTy::getDSA(StackTy::reverse_iterator &Iter,
// bound to the current team is shared.
if (isOpenMPTaskingDirective(DVar.DKind)) {
DSAVarData DVarTemp;
for (StackTy::reverse_iterator I = std::next(Iter), EE = Stack.rend();
I != EE; ++I) {
auto I = Iter, E = Stack.back().first.rend();
do {
++I;
// OpenMP [2.9.1.1, Data-sharing Attribute Rules for Variables
// Referenced in a Construct, implicitly determined, p.6]
// In a task construct, if no default clause is present, a variable
@ -503,9 +563,7 @@ DSAStackTy::DSAVarData DSAStackTy::getDSA(StackTy::reverse_iterator &Iter,
DVar.CKind = OMPC_firstprivate;
return DVar;
}
if (isParallelOrTaskRegion(I->Directive))
break;
}
} while (I != E && !isParallelOrTaskRegion(I->Directive));
DVar.CKind =
(DVarTemp.CKind == OMPC_unknown) ? OMPC_firstprivate : OMPC_shared;
return DVar;
@ -520,12 +578,13 @@ DSAStackTy::DSAVarData DSAStackTy::getDSA(StackTy::reverse_iterator &Iter,
}
Expr *DSAStackTy::addUniqueAligned(ValueDecl *D, Expr *NewDE) {
assert(Stack.size() > 1 && "Data sharing attributes stack is empty");
assert(!isStackEmpty() && "Data sharing attributes stack is empty");
D = getCanonicalDecl(D);
auto It = Stack.back().AlignedMap.find(D);
if (It == Stack.back().AlignedMap.end()) {
auto &StackElem = Stack.back().first.back();
auto It = StackElem.AlignedMap.find(D);
if (It == StackElem.AlignedMap.end()) {
assert(NewDE && "Unexpected nullptr expr to be added into aligned map");
Stack.back().AlignedMap[D] = NewDE;
StackElem.AlignedMap[D] = NewDE;
return nullptr;
} else {
assert(It->second && "Unexpected nullptr expr in the aligned map");
@ -535,35 +594,43 @@ Expr *DSAStackTy::addUniqueAligned(ValueDecl *D, Expr *NewDE) {
}
void DSAStackTy::addLoopControlVariable(ValueDecl *D, VarDecl *Capture) {
assert(Stack.size() > 1 && "Data-sharing attributes stack is empty");
assert(!isStackEmpty() && "Data-sharing attributes stack is empty");
D = getCanonicalDecl(D);
Stack.back().LCVMap.insert(
std::make_pair(D, LCDeclInfo(Stack.back().LCVMap.size() + 1, Capture)));
auto &StackElem = Stack.back().first.back();
StackElem.LCVMap.insert(
{D, LCDeclInfo(StackElem.LCVMap.size() + 1, Capture)});
}
DSAStackTy::LCDeclInfo DSAStackTy::isLoopControlVariable(ValueDecl *D) {
assert(Stack.size() > 1 && "Data-sharing attributes stack is empty");
assert(!isStackEmpty() && "Data-sharing attributes stack is empty");
D = getCanonicalDecl(D);
return Stack.back().LCVMap.count(D) > 0 ? Stack.back().LCVMap[D]
: LCDeclInfo(0, nullptr);
auto &StackElem = Stack.back().first.back();
auto It = StackElem.LCVMap.find(D);
if (It != StackElem.LCVMap.end())
return It->second;
return {0, nullptr};
}
DSAStackTy::LCDeclInfo DSAStackTy::isParentLoopControlVariable(ValueDecl *D) {
assert(Stack.size() > 2 && "Data-sharing attributes stack is empty");
assert(!isStackEmpty() && Stack.back().first.size() > 1 &&
"Data-sharing attributes stack is empty");
D = getCanonicalDecl(D);
return Stack[Stack.size() - 2].LCVMap.count(D) > 0
? Stack[Stack.size() - 2].LCVMap[D]
: LCDeclInfo(0, nullptr);
auto &StackElem = *std::next(Stack.back().first.rbegin());
auto It = StackElem.LCVMap.find(D);
if (It != StackElem.LCVMap.end())
return It->second;
return {0, nullptr};
}
ValueDecl *DSAStackTy::getParentLoopControlVariable(unsigned I) {
assert(Stack.size() > 2 && "Data-sharing attributes stack is empty");
if (Stack[Stack.size() - 2].LCVMap.size() < I)
assert(!isStackEmpty() && Stack.back().first.size() > 1 &&
"Data-sharing attributes stack is empty");
auto &StackElem = *std::next(Stack.back().first.rbegin());
if (StackElem.LCVMap.size() < I)
return nullptr;
for (auto &Pair : Stack[Stack.size() - 2].LCVMap) {
for (auto &Pair : StackElem.LCVMap)
if (Pair.second.first == I)
return Pair.first;
}
return nullptr;
}
@ -571,13 +638,13 @@ void DSAStackTy::addDSA(ValueDecl *D, Expr *E, OpenMPClauseKind A,
DeclRefExpr *PrivateCopy) {
D = getCanonicalDecl(D);
if (A == OMPC_threadprivate) {
auto &Data = Stack[0].SharingMap[D];
auto &Data = Threadprivates[D];
Data.Attributes = A;
Data.RefExpr.setPointer(E);
Data.PrivateCopy = nullptr;
} else {
assert(Stack.size() > 1 && "Data-sharing attributes stack is empty");
auto &Data = Stack.back().SharingMap[D];
assert(!isStackEmpty() && "Data-sharing attributes stack is empty");
auto &Data = Stack.back().first.back().SharingMap[D];
assert(Data.Attributes == OMPC_unknown || (A == Data.Attributes) ||
(A == OMPC_firstprivate && Data.Attributes == OMPC_lastprivate) ||
(A == OMPC_lastprivate && Data.Attributes == OMPC_firstprivate) ||
@ -592,7 +659,7 @@ void DSAStackTy::addDSA(ValueDecl *D, Expr *E, OpenMPClauseKind A,
Data.RefExpr.setPointerAndInt(E, IsLastprivate);
Data.PrivateCopy = PrivateCopy;
if (PrivateCopy) {
auto &Data = Stack.back().SharingMap[PrivateCopy->getDecl()];
auto &Data = Stack.back().first.back().SharingMap[PrivateCopy->getDecl()];
Data.Attributes = A;
Data.RefExpr.setPointerAndInt(PrivateCopy, IsLastprivate);
Data.PrivateCopy = nullptr;
@ -602,19 +669,17 @@ void DSAStackTy::addDSA(ValueDecl *D, Expr *E, OpenMPClauseKind A,
bool DSAStackTy::isOpenMPLocal(VarDecl *D, StackTy::reverse_iterator Iter) {
D = D->getCanonicalDecl();
if (Stack.size() > 2) {
reverse_iterator I = Iter, E = std::prev(Stack.rend());
if (!isStackEmpty() && Stack.back().first.size() > 1) {
reverse_iterator I = Iter, E = Stack.back().first.rend();
Scope *TopScope = nullptr;
while (I != E && !isParallelOrTaskRegion(I->Directive)) {
while (I != E && !isParallelOrTaskRegion(I->Directive))
++I;
}
if (I == E)
return false;
TopScope = I->CurScope ? I->CurScope->getParent() : nullptr;
Scope *CurScope = getCurScope();
while (CurScope != TopScope && !CurScope->isDeclScope(D)) {
while (CurScope != TopScope && !CurScope->isDeclScope(D))
CurScope = CurScope->getParent();
}
return CurScope != TopScope;
}
return false;
@ -665,16 +730,16 @@ DSAStackTy::DSAVarData DSAStackTy::getTopDSA(ValueDecl *D, bool FromParent) {
D->getLocation()),
OMPC_threadprivate);
}
if (Stack[0].SharingMap.count(D)) {
DVar.RefExpr = Stack[0].SharingMap[D].RefExpr.getPointer();
auto TI = Threadprivates.find(D);
if (TI != Threadprivates.end()) {
DVar.RefExpr = TI->getSecond().RefExpr.getPointer();
DVar.CKind = OMPC_threadprivate;
return DVar;
}
if (Stack.size() == 1) {
if (isStackEmpty())
// Not in OpenMP execution region and top scope was already checked.
return DVar;
}
// OpenMP [2.9.1.1, Data-sharing Attribute Rules for Variables Referenced
// in a Construct, C/C++, predetermined, p.4]
@ -722,11 +787,10 @@ DSAStackTy::DSAVarData DSAStackTy::getTopDSA(ValueDecl *D, bool FromParent) {
// Explicitly specified attributes and local variables with predetermined
// attributes.
auto StartI = std::next(Stack.rbegin());
auto EndI = std::prev(Stack.rend());
if (FromParent && StartI != EndI) {
auto StartI = std::next(Stack.back().first.rbegin());
auto EndI = Stack.back().first.rend();
if (FromParent && StartI != EndI)
StartI = std::next(StartI);
}
auto I = std::prev(StartI);
if (I->SharingMap.count(D)) {
DVar.RefExpr = I->SharingMap[D].RefExpr.getPointer();
@ -740,12 +804,15 @@ DSAStackTy::DSAVarData DSAStackTy::getTopDSA(ValueDecl *D, bool FromParent) {
DSAStackTy::DSAVarData DSAStackTy::getImplicitDSA(ValueDecl *D,
bool FromParent) {
D = getCanonicalDecl(D);
auto StartI = Stack.rbegin();
auto EndI = std::prev(Stack.rend());
if (FromParent && StartI != EndI) {
StartI = std::next(StartI);
if (isStackEmpty()) {
StackTy::reverse_iterator I;
return getDSA(I, D);
}
D = getCanonicalDecl(D);
auto StartI = Stack.back().first.rbegin();
auto EndI = Stack.back().first.rend();
if (FromParent && StartI != EndI)
StartI = std::next(StartI);
return getDSA(StartI, D);
}
@ -754,33 +821,40 @@ DSAStackTy::hasDSA(ValueDecl *D,
const llvm::function_ref<bool(OpenMPClauseKind)> &CPred,
const llvm::function_ref<bool(OpenMPDirectiveKind)> &DPred,
bool FromParent) {
if (isStackEmpty())
return {};
D = getCanonicalDecl(D);
auto StartI = std::next(Stack.rbegin());
auto EndI = Stack.rend();
if (FromParent && StartI != EndI) {
auto StartI = std::next(Stack.back().first.rbegin());
auto EndI = Stack.back().first.rend();
if (FromParent && StartI != EndI)
StartI = std::next(StartI);
}
for (auto I = StartI, EE = EndI; I != EE; ++I) {
if (StartI == EndI)
return {};
auto I = std::prev(StartI);
do {
++I;
if (!DPred(I->Directive) && !isParallelOrTaskRegion(I->Directive))
continue;
DSAVarData DVar = getDSA(I, D);
if (CPred(DVar.CKind))
return DVar;
}
return DSAVarData();
} while (I != EndI);
return {};
}
DSAStackTy::DSAVarData DSAStackTy::hasInnermostDSA(
ValueDecl *D, const llvm::function_ref<bool(OpenMPClauseKind)> &CPred,
const llvm::function_ref<bool(OpenMPDirectiveKind)> &DPred,
bool FromParent) {
if (isStackEmpty())
return {};
D = getCanonicalDecl(D);
auto StartI = std::next(Stack.rbegin());
auto EndI = Stack.rend();
auto StartI = std::next(Stack.back().first.rbegin());
auto EndI = Stack.back().first.rend();
if (FromParent && StartI != EndI)
StartI = std::next(StartI);
if (StartI == EndI || !DPred(StartI->Directive))
return DSAVarData();
return {};
DSAVarData DVar = getDSA(StartI, D);
return CPred(DVar.CKind) ? DVar : DSAVarData();
}
@ -790,9 +864,11 @@ bool DSAStackTy::hasExplicitDSA(
unsigned Level, bool NotLastprivate) {
if (CPred(ClauseKindMode))
return true;
if (isStackEmpty())
return false;
D = getCanonicalDecl(D);
auto StartI = std::next(Stack.begin());
auto EndI = Stack.end();
auto StartI = Stack.back().first.begin();
auto EndI = Stack.back().first.end();
if (std::distance(StartI, EndI) <= (int)Level)
return false;
std::advance(StartI, Level);
@ -805,8 +881,10 @@ bool DSAStackTy::hasExplicitDSA(
bool DSAStackTy::hasExplicitDirective(
const llvm::function_ref<bool(OpenMPDirectiveKind)> &DPred,
unsigned Level) {
auto StartI = std::next(Stack.begin());
auto EndI = Stack.end();
if (isStackEmpty())
return false;
auto StartI = Stack.back().first.begin();
auto EndI = Stack.back().first.end();
if (std::distance(StartI, EndI) <= (int)Level)
return false;
std::advance(StartI, Level);
@ -819,13 +897,12 @@ bool DSAStackTy::hasDirective(
&DPred,
bool FromParent) {
// We look only in the enclosing region.
if (Stack.size() < 2)
if (isStackEmpty())
return false;
auto StartI = std::next(Stack.rbegin());
auto EndI = std::prev(Stack.rend());
if (FromParent && StartI != EndI) {
auto StartI = std::next(Stack.back().first.rbegin());
auto EndI = Stack.back().first.rend();
if (FromParent && StartI != EndI)
StartI = std::next(StartI);
}
for (auto I = StartI, EE = EndI; I != EE; ++I) {
if (DPred(I->Directive, I->DirectiveName, I->ConstructLoc))
return true;
@ -839,6 +916,14 @@ void Sema::InitDataSharingAttributesStack() {
#define DSAStack static_cast<DSAStackTy *>(VarDataSharingAttributesStack)
void Sema::pushOpenMPFunctionRegion() {
DSAStack->pushFunction();
}
void Sema::popOpenMPFunctionRegion(const FunctionScopeInfo *OldFSI) {
DSAStack->popFunction(OldFSI);
}
bool Sema::IsOpenMPCapturedByRef(ValueDecl *D, unsigned Level) {
assert(LangOpts.OpenMP && "OpenMP is not allowed");

View File

@ -2220,6 +2220,9 @@ public:
Base = BaseResult.get();
QualType BaseType = Base->getType();
if (isArrow && !BaseType->isPointerType())
return ExprError();
// FIXME: this involves duplicating earlier analysis in a lot of
// cases; we should avoid this when possible.
LookupResult R(getSema(), MemberNameInfo, Sema::LookupMemberName);

View File

@ -4779,6 +4779,7 @@ ASTReader::ReadSubmoduleBlock(ModuleFile &F, unsigned ClientLoadCapabilities) {
ModuleMap &ModMap = PP.getHeaderSearchInfo().getModuleMap();
bool First = true;
Module *CurrentModule = nullptr;
Module::ModuleKind ModuleKind = Module::ModuleMapModule;
RecordData Record;
while (true) {
llvm::BitstreamEntry Entry = F.Stream.advanceSkippingSubblocks();
@ -4871,6 +4872,7 @@ ASTReader::ReadSubmoduleBlock(ModuleFile &F, unsigned ClientLoadCapabilities) {
CurrentModule->setASTFile(F.File);
}
CurrentModule->Kind = ModuleKind;
CurrentModule->Signature = F.Signature;
CurrentModule->IsFromModuleFile = true;
CurrentModule->IsSystem = IsSystem || CurrentModule->IsSystem;
@ -4969,6 +4971,7 @@ ASTReader::ReadSubmoduleBlock(ModuleFile &F, unsigned ClientLoadCapabilities) {
SubmodulesLoaded.resize(SubmodulesLoaded.size() + F.LocalNumSubmodules);
}
ModuleKind = (Module::ModuleKind)Record[2];
break;
}

View File

@ -2694,9 +2694,10 @@ void ASTWriter::WriteSubmodules(Module *WritingModule) {
unsigned ConflictAbbrev = Stream.EmitAbbrev(std::move(Abbrev));
// Write the submodule metadata block.
RecordData::value_type Record[] = {getNumberOfModules(WritingModule),
FirstSubmoduleID -
NUM_PREDEF_SUBMODULE_IDS};
RecordData::value_type Record[] = {
getNumberOfModules(WritingModule),
FirstSubmoduleID - NUM_PREDEF_SUBMODULE_IDS,
(unsigned)WritingModule->Kind};
Stream.EmitRecord(SUBMODULE_METADATA, Record);
// Write all of the submodules.

View File

@ -177,7 +177,10 @@ public:
II_wcsdup(nullptr), II_win_wcsdup(nullptr), II_g_malloc(nullptr),
II_g_malloc0(nullptr), II_g_realloc(nullptr), II_g_try_malloc(nullptr),
II_g_try_malloc0(nullptr), II_g_try_realloc(nullptr),
II_g_free(nullptr), II_g_memdup(nullptr) {}
II_g_free(nullptr), II_g_memdup(nullptr), II_g_malloc_n(nullptr),
II_g_malloc0_n(nullptr), II_g_realloc_n(nullptr),
II_g_try_malloc_n(nullptr), II_g_try_malloc0_n(nullptr),
II_g_try_realloc_n(nullptr) {}
/// In pessimistic mode, the checker assumes that it does not know which
/// functions might free the memory.
@ -241,7 +244,10 @@ private:
*II_if_nameindex, *II_if_freenameindex, *II_wcsdup,
*II_win_wcsdup, *II_g_malloc, *II_g_malloc0,
*II_g_realloc, *II_g_try_malloc, *II_g_try_malloc0,
*II_g_try_realloc, *II_g_free, *II_g_memdup;
*II_g_try_realloc, *II_g_free, *II_g_memdup,
*II_g_malloc_n, *II_g_malloc0_n, *II_g_realloc_n,
*II_g_try_malloc_n, *II_g_try_malloc0_n,
*II_g_try_realloc_n;
mutable Optional<uint64_t> KernelZeroFlagVal;
void initIdentifierInfo(ASTContext &C) const;
@ -321,9 +327,12 @@ private:
bool &ReleasedAllocated,
bool ReturnsNullOnFailure = false) const;
ProgramStateRef ReallocMem(CheckerContext &C, const CallExpr *CE,
bool FreesMemOnFailure,
ProgramStateRef State) const;
ProgramStateRef ReallocMemAux(CheckerContext &C, const CallExpr *CE,
bool FreesMemOnFailure,
ProgramStateRef State,
bool SuffixWithN = false) const;
static SVal evalMulForBufferSize(CheckerContext &C, const Expr *Blocks,
const Expr *BlockBytes);
static ProgramStateRef CallocMem(CheckerContext &C, const CallExpr *CE,
ProgramStateRef State);
@ -569,6 +578,12 @@ void MallocChecker::initIdentifierInfo(ASTContext &Ctx) const {
II_g_try_realloc = &Ctx.Idents.get("g_try_realloc");
II_g_free = &Ctx.Idents.get("g_free");
II_g_memdup = &Ctx.Idents.get("g_memdup");
II_g_malloc_n = &Ctx.Idents.get("g_malloc_n");
II_g_malloc0_n = &Ctx.Idents.get("g_malloc0_n");
II_g_realloc_n = &Ctx.Idents.get("g_realloc_n");
II_g_try_malloc_n = &Ctx.Idents.get("g_try_malloc_n");
II_g_try_malloc0_n = &Ctx.Idents.get("g_try_malloc0_n");
II_g_try_realloc_n = &Ctx.Idents.get("g_try_realloc_n");
}
bool MallocChecker::isMemFunction(const FunctionDecl *FD, ASTContext &C) const {
@ -617,7 +632,10 @@ bool MallocChecker::isCMemFunction(const FunctionDecl *FD,
FunI == II_g_malloc || FunI == II_g_malloc0 ||
FunI == II_g_realloc || FunI == II_g_try_malloc ||
FunI == II_g_try_malloc0 || FunI == II_g_try_realloc ||
FunI == II_g_memdup)
FunI == II_g_memdup || FunI == II_g_malloc_n ||
FunI == II_g_malloc0_n || FunI == II_g_realloc_n ||
FunI == II_g_try_malloc_n || FunI == II_g_try_malloc0_n ||
FunI == II_g_try_realloc_n)
return true;
}
@ -767,6 +785,17 @@ llvm::Optional<ProgramStateRef> MallocChecker::performKernelMalloc(
return None;
}
SVal MallocChecker::evalMulForBufferSize(CheckerContext &C, const Expr *Blocks,
const Expr *BlockBytes) {
SValBuilder &SB = C.getSValBuilder();
SVal BlocksVal = C.getSVal(Blocks);
SVal BlockBytesVal = C.getSVal(BlockBytes);
ProgramStateRef State = C.getState();
SVal TotalSize = SB.evalBinOp(State, BO_Mul, BlocksVal, BlockBytesVal,
SB.getContext().getSizeType());
return TotalSize;
}
void MallocChecker::checkPostStmt(const CallExpr *CE, CheckerContext &C) const {
if (C.wasInlined)
return;
@ -813,10 +842,10 @@ void MallocChecker::checkPostStmt(const CallExpr *CE, CheckerContext &C) const {
State = ProcessZeroAllocation(C, CE, 0, State);
} else if (FunI == II_realloc || FunI == II_g_realloc ||
FunI == II_g_try_realloc) {
State = ReallocMem(C, CE, false, State);
State = ReallocMemAux(C, CE, false, State);
State = ProcessZeroAllocation(C, CE, 1, State);
} else if (FunI == II_reallocf) {
State = ReallocMem(C, CE, true, State);
State = ReallocMemAux(C, CE, true, State);
State = ProcessZeroAllocation(C, CE, 1, State);
} else if (FunI == II_calloc) {
State = CallocMem(C, CE, State);
@ -874,6 +903,25 @@ void MallocChecker::checkPostStmt(const CallExpr *CE, CheckerContext &C) const {
return;
State = MallocMemAux(C, CE, CE->getArg(1), UndefinedVal(), State);
State = ProcessZeroAllocation(C, CE, 1, State);
} else if (FunI == II_g_malloc_n || FunI == II_g_try_malloc_n ||
FunI == II_g_malloc0_n || FunI == II_g_try_malloc0_n) {
if (CE->getNumArgs() < 2)
return;
SVal Init = UndefinedVal();
if (FunI == II_g_malloc0_n || FunI == II_g_try_malloc0_n) {
SValBuilder &SB = C.getSValBuilder();
Init = SB.makeZeroVal(SB.getContext().CharTy);
}
SVal TotalSize = evalMulForBufferSize(C, CE->getArg(0), CE->getArg(1));
State = MallocMemAux(C, CE, TotalSize, Init, State);
State = ProcessZeroAllocation(C, CE, 0, State);
State = ProcessZeroAllocation(C, CE, 1, State);
} else if (FunI == II_g_realloc_n || FunI == II_g_try_realloc_n) {
if (CE->getNumArgs() < 3)
return;
State = ReallocMemAux(C, CE, false, State, true);
State = ProcessZeroAllocation(C, CE, 1, State);
State = ProcessZeroAllocation(C, CE, 2, State);
}
}
@ -1976,14 +2024,17 @@ void MallocChecker::ReportUseZeroAllocated(CheckerContext &C,
}
}
ProgramStateRef MallocChecker::ReallocMem(CheckerContext &C,
const CallExpr *CE,
bool FreesOnFail,
ProgramStateRef State) const {
ProgramStateRef MallocChecker::ReallocMemAux(CheckerContext &C,
const CallExpr *CE,
bool FreesOnFail,
ProgramStateRef State,
bool SuffixWithN) const {
if (!State)
return nullptr;
if (CE->getNumArgs() < 2)
if (SuffixWithN && CE->getNumArgs() < 3)
return nullptr;
else if (CE->getNumArgs() < 2)
return nullptr;
const Expr *arg0Expr = CE->getArg(0);
@ -1998,20 +2049,19 @@ ProgramStateRef MallocChecker::ReallocMem(CheckerContext &C,
DefinedOrUnknownSVal PtrEQ =
svalBuilder.evalEQ(State, arg0Val, svalBuilder.makeNull());
// Get the size argument. If there is no size arg then give up.
// Get the size argument.
const Expr *Arg1 = CE->getArg(1);
if (!Arg1)
return nullptr;
// Get the value of the size argument.
SVal Arg1ValG = State->getSVal(Arg1, LCtx);
if (!Arg1ValG.getAs<DefinedOrUnknownSVal>())
SVal TotalSize = State->getSVal(Arg1, LCtx);
if (SuffixWithN)
TotalSize = evalMulForBufferSize(C, Arg1, CE->getArg(2));
if (!TotalSize.getAs<DefinedOrUnknownSVal>())
return nullptr;
DefinedOrUnknownSVal Arg1Val = Arg1ValG.castAs<DefinedOrUnknownSVal>();
// Compare the size argument to 0.
DefinedOrUnknownSVal SizeZero =
svalBuilder.evalEQ(State, Arg1Val,
svalBuilder.evalEQ(State, TotalSize.castAs<DefinedOrUnknownSVal>(),
svalBuilder.makeIntValWithPtrWidth(0, false));
ProgramStateRef StatePtrIsNull, StatePtrNotNull;
@ -2025,8 +2075,8 @@ ProgramStateRef MallocChecker::ReallocMem(CheckerContext &C,
// If the ptr is NULL and the size is not 0, the call is equivalent to
// malloc(size).
if ( PrtIsNull && !SizeIsZero) {
ProgramStateRef stateMalloc = MallocMemAux(C, CE, CE->getArg(1),
if (PrtIsNull && !SizeIsZero) {
ProgramStateRef stateMalloc = MallocMemAux(C, CE, TotalSize,
UndefinedVal(), StatePtrIsNull);
return stateMalloc;
}
@ -2059,7 +2109,7 @@ ProgramStateRef MallocChecker::ReallocMem(CheckerContext &C,
if (ProgramStateRef stateFree =
FreeMemAux(C, CE, State, 0, false, ReleasedAllocated)) {
ProgramStateRef stateRealloc = MallocMemAux(C, CE, CE->getArg(1),
ProgramStateRef stateRealloc = MallocMemAux(C, CE, TotalSize,
UnknownVal(), stateFree);
if (!stateRealloc)
return nullptr;
@ -2090,12 +2140,8 @@ ProgramStateRef MallocChecker::CallocMem(CheckerContext &C, const CallExpr *CE,
return nullptr;
SValBuilder &svalBuilder = C.getSValBuilder();
const LocationContext *LCtx = C.getLocationContext();
SVal count = State->getSVal(CE->getArg(0), LCtx);
SVal elementSize = State->getSVal(CE->getArg(1), LCtx);
SVal TotalSize = svalBuilder.evalBinOp(State, BO_Mul, count, elementSize,
svalBuilder.getContext().getSizeType());
SVal zeroVal = svalBuilder.makeZeroVal(svalBuilder.getContext().CharTy);
SVal TotalSize = evalMulForBufferSize(C, CE->getArg(0), CE->getArg(1));
return MallocMemAux(C, CE, TotalSize, zeroVal, State);
}

View File

@ -61,7 +61,9 @@ const Expr *bugreporter::getDerefExpr(const Stmt *S) {
return U->getSubExpr()->IgnoreParenCasts();
}
else if (const MemberExpr *ME = dyn_cast<MemberExpr>(E)) {
if (ME->isArrow() || isDeclRefExprToReference(ME->getBase())) {
if (ME->isImplicitAccess()) {
return ME;
} else if (ME->isArrow() || isDeclRefExprToReference(ME->getBase())) {
return ME->getBase()->IgnoreParenCasts();
} else {
// If we have a member expr with a dot, the base must have been
@ -73,9 +75,9 @@ const Expr *bugreporter::getDerefExpr(const Stmt *S) {
return IvarRef->getBase()->IgnoreParenCasts();
}
else if (const ArraySubscriptExpr *AE = dyn_cast<ArraySubscriptExpr>(E)) {
return AE->getBase();
return getDerefExpr(AE->getBase());
}
else if (isDeclRefExprToReference(E)) {
else if (isa<DeclRefExpr>(E)) {
return E;
}
break;
@ -961,7 +963,24 @@ bool bugreporter::trackNullOrUndefValue(const ExplodedNode *N,
const Expr *Inner = nullptr;
if (const Expr *Ex = dyn_cast<Expr>(S)) {
Ex = Ex->IgnoreParenCasts();
if (ExplodedGraph::isInterestingLValueExpr(Ex) || CallEvent::isCallStmt(Ex))
// Performing operator `&' on an lvalue expression is essentially a no-op.
// Then, if we are taking addresses of fields or elements, these are also
// unlikely to matter.
// FIXME: There's a hack in our Store implementation that always computes
// field offsets around null pointers as if they are always equal to 0.
// The idea here is to report accesses to fields as null dereferences
// even though the pointer value that's being dereferenced is actually
// the offset of the field rather than exactly 0.
// See the FIXME in StoreManager's getLValueFieldOrIvar() method.
// This code interacts heavily with this hack; otherwise the value
// would not be null at all for most fields, so we'd be unable to track it.
if (const auto *Op = dyn_cast<UnaryOperator>(Ex))
if (Op->getOpcode() == UO_AddrOf && Op->getSubExpr()->isLValue())
if (const Expr *DerefEx = getDerefExpr(Op->getSubExpr()))
Ex = DerefEx;
if (Ex && (ExplodedGraph::isInterestingLValueExpr(Ex) || CallEvent::isCallStmt(Ex)))
Inner = Ex;
}

View File

@ -1904,7 +1904,7 @@ void ExprEngine::processSwitch(SwitchNodeBuilder& builder) {
// Evaluate the LHS of the case value.
llvm::APSInt V1 = Case->getLHS()->EvaluateKnownConstInt(getContext());
assert(V1.getBitWidth() == getContext().getTypeSize(CondE->getType()));
assert(V1.getBitWidth() == getContext().getIntWidth(CondE->getType()));
// Get the RHS of the case, if it exists.
llvm::APSInt V2;

View File

@ -1338,6 +1338,9 @@ RegionStoreManager::getSizeInElements(ProgramStateRef state,
/// the array). This is called by ExprEngine when evaluating casts
/// from arrays to pointers.
SVal RegionStoreManager::ArrayToPointer(Loc Array, QualType T) {
if (Array.getAs<loc::ConcreteInt>())
return Array;
if (!Array.getAs<loc::MemRegionVal>())
return UnknownVal();

View File

@ -404,9 +404,15 @@ SVal StoreManager::getLValueFieldOrIvar(const Decl *D, SVal Base) {
case loc::ConcreteIntKind:
// While these seem funny, this can happen through casts.
// FIXME: What we should return is the field offset. For example,
// add the field offset to the integer value. That way funny things
// FIXME: What we should return is the field offset, not base. For example,
// add the field offset to the integer value. That way things
// like this work properly: &(((struct foo *) 0xa)->f)
// However, that's not easy to fix without reducing our abilities
// to catch null pointer dereference. Eg., ((struct foo *)0x0)->f = 7
// is a null dereference even though we're dereferencing offset of f
// rather than null. Coming up with an approach that computes offsets
// over null pointers properly while still being able to catch null
// dereferences might be worth it.
return Base;
default:
@ -431,7 +437,7 @@ SVal StoreManager::getLValueElement(QualType elementType, NonLoc Offset,
// If the base is an unknown or undefined value, just return it back.
// FIXME: For absolute pointer addresses, we just return that value back as
// well, although in reality we should return the offset added to that
// value.
// value. See also the similar FIXME in getLValueFieldOrIvar().
if (Base.isUnknownOrUndef() || Base.getAs<loc::ConcreteInt>())
return Base;

View File

@ -24,3 +24,16 @@ void testCasting(int i) {
clang_analyzer_eval(j == 0); // expected-warning{{FALSE}}
}
}
enum class EnumBool : bool {
F = false,
T = true
};
bool testNoCrashOnSwitchEnumBool(EnumBool E) {
switch (E) {
case EnumBool::F:
return false;
}
return true;
}

View File

@ -13,6 +13,12 @@ gpointer g_realloc(gpointer mem, gsize n_bytes);
gpointer g_try_malloc(gsize n_bytes);
gpointer g_try_malloc0(gsize n_bytes);
gpointer g_try_realloc(gpointer mem, gsize n_bytes);
gpointer g_malloc_n(gsize n_blocks, gsize n_block_bytes);
gpointer g_malloc0_n(gsize n_blocks, gsize n_block_bytes);
gpointer g_realloc_n(gpointer mem, gsize n_blocks, gsize n_block_bytes);
gpointer g_try_malloc_n(gsize n_blocks, gsize n_block_bytes);
gpointer g_try_malloc0_n(gsize n_blocks, gsize n_block_bytes);
gpointer g_try_realloc_n(gpointer mem, gsize n_blocks, gsize n_block_bytes);
void g_free(gpointer mem);
gpointer g_memdup(gconstpointer mem, guint byte_size);
@ -25,6 +31,12 @@ void f1() {
gpointer g3 = g_try_malloc(n_bytes);
gpointer g4 = g_try_malloc0(n_bytes);
g3 = g_try_realloc(g3, n_bytes * 2);
gpointer g5 = g_malloc_n(n_bytes, sizeof(char));
gpointer g6 = g_malloc0_n(n_bytes, sizeof(char));
g5 = g_realloc_n(g5, n_bytes * 2, sizeof(char));
gpointer g7 = g_try_malloc_n(n_bytes, sizeof(char));
gpointer g8 = g_try_malloc0_n(n_bytes, sizeof(char));
g7 = g_try_realloc_n(g7, n_bytes * 2, sizeof(char));
g_free(g1);
g_free(g2);
@ -38,6 +50,12 @@ void f2() {
gpointer g3 = g_try_malloc(n_bytes);
gpointer g4 = g_try_malloc0(n_bytes);
g3 = g_try_realloc(g3, n_bytes * 2);
gpointer g5 = g_malloc_n(n_bytes, sizeof(char));
gpointer g6 = g_malloc0_n(n_bytes, sizeof(char));
g5 = g_realloc_n(g5, n_bytes * 2, sizeof(char));
gpointer g7 = g_try_malloc_n(n_bytes, sizeof(char));
gpointer g8 = g_try_malloc0_n(n_bytes, sizeof(char));
g7 = g_try_realloc_n(g7, n_bytes * 2, sizeof(char));
g_free(g1);
g_free(g2);
@ -52,8 +70,100 @@ void f3() {
gpointer g3 = g_try_malloc(n_bytes);
gpointer g4 = g_try_malloc0(n_bytes);
g3 = g_try_realloc(g3, n_bytes * 2); // expected-warning{{Potential leak of memory pointed to by 'g4'}}
gpointer g5 = g_malloc_n(n_bytes, sizeof(char));
gpointer g6 = g_malloc0_n(n_bytes, sizeof(char));
g5 = g_realloc_n(g5, n_bytes * 2, sizeof(char)); // expected-warning{{Potential leak of memory pointed to by 'g6'}}
gpointer g7 = g_try_malloc_n(n_bytes, sizeof(char)); // expected-warning{{Potential leak of memory pointed to by 'g5'}}
gpointer g8 = g_try_malloc0_n(n_bytes, sizeof(char));
g7 = g_try_realloc_n(g7, n_bytes * 2, sizeof(char)); // expected-warning{{Potential leak of memory pointed to by 'g8'}}
g_free(g1); // expected-warning{{Potential leak of memory pointed to by 'g7'}}
g_free(g2);
g_free(g3);
}
void f4() {
gpointer g1 = g_malloc(n_bytes);
gpointer g2 = g_malloc0(n_bytes);
g1 = g_realloc(g1, n_bytes * 2);
gpointer g3 = g_try_malloc(n_bytes);
gpointer g4 = g_try_malloc0(n_bytes);
g3 = g_try_realloc(g3, n_bytes * 2);
gpointer g5 = g_malloc_n(n_bytes, sizeof(char));
gpointer g6 = g_malloc0_n(n_bytes, sizeof(char));
g5 = g_realloc_n(g5, n_bytes * 2, sizeof(char)); // expected-warning{{Potential leak of memory pointed to by 'g6'}}
gpointer g7 = g_try_malloc_n(n_bytes, sizeof(char)); // expected-warning{{Potential leak of memory pointed to by 'g5'}}
gpointer g8 = g_try_malloc0_n(n_bytes, sizeof(char));
g7 = g_try_realloc_n(g7, n_bytes * 2, sizeof(char)); // expected-warning{{Potential leak of memory pointed to by 'g8'}}
g_free(g1); // expected-warning{{Potential leak of memory pointed to by 'g7'}}
g_free(g2);
g_free(g3);
g_free(g4);
}
void f5() {
gpointer g1 = g_malloc(n_bytes);
gpointer g2 = g_malloc0(n_bytes);
g1 = g_realloc(g1, n_bytes * 2);
gpointer g3 = g_try_malloc(n_bytes);
gpointer g4 = g_try_malloc0(n_bytes);
g3 = g_try_realloc(g3, n_bytes * 2);
gpointer g5 = g_malloc_n(n_bytes, sizeof(char));
gpointer g6 = g_malloc0_n(n_bytes, sizeof(char));
g5 = g_realloc_n(g5, n_bytes * 2, sizeof(char)); // expected-warning{{Potential leak of memory pointed to by 'g6'}}
gpointer g7 = g_try_malloc_n(n_bytes, sizeof(char));
gpointer g8 = g_try_malloc0_n(n_bytes, sizeof(char));
g7 = g_try_realloc_n(g7, n_bytes * 2, sizeof(char)); // expected-warning{{Potential leak of memory pointed to by 'g8'}}
g_free(g1); // expected-warning{{Potential leak of memory pointed to by 'g7'}}
g_free(g2);
g_free(g3);
g_free(g4);
g_free(g5);
}
void f6() {
gpointer g1 = g_malloc(n_bytes);
gpointer g2 = g_malloc0(n_bytes);
g1 = g_realloc(g1, n_bytes * 2);
gpointer g3 = g_try_malloc(n_bytes);
gpointer g4 = g_try_malloc0(n_bytes);
g3 = g_try_realloc(g3, n_bytes * 2);
gpointer g5 = g_malloc_n(n_bytes, sizeof(char));
gpointer g6 = g_malloc0_n(n_bytes, sizeof(char));
g5 = g_realloc_n(g5, n_bytes * 2, sizeof(char));
gpointer g7 = g_try_malloc_n(n_bytes, sizeof(char));
gpointer g8 = g_try_malloc0_n(n_bytes, sizeof(char));
g7 = g_try_realloc_n(g7, n_bytes * 2, sizeof(char)); // expected-warning{{Potential leak of memory pointed to by 'g8'}}
g_free(g1); // expected-warning{{Potential leak of memory pointed to by 'g7'}}
g_free(g2);
g_free(g3);
g_free(g4);
g_free(g5);
g_free(g6);
}
void f7() {
gpointer g1 = g_malloc(n_bytes);
gpointer g2 = g_malloc0(n_bytes);
g1 = g_realloc(g1, n_bytes * 2);
gpointer g3 = g_try_malloc(n_bytes);
gpointer g4 = g_try_malloc0(n_bytes);
g3 = g_try_realloc(g3, n_bytes * 2);
gpointer g5 = g_malloc_n(n_bytes, sizeof(char));
gpointer g6 = g_malloc0_n(n_bytes, sizeof(char));
g5 = g_realloc_n(g5, n_bytes * 2, sizeof(char));
gpointer g7 = g_try_malloc_n(n_bytes, sizeof(char));
gpointer g8 = g_try_malloc0_n(n_bytes, sizeof(char));
g7 = g_try_realloc_n(g7, n_bytes * 2, sizeof(char)); // expected-warning{{Potential leak of memory pointed to by 'g8'}}
g_free(g1);
g_free(g2);
g_free(g3);
g_free(g4);
g_free(g5);
g_free(g6);
g_free(g7);
}

View File

@ -1,7 +1,7 @@
// RUN: %clang_analyze_cc1 -analyzer-checker=core -analyzer-config suppress-inlined-defensive-checks=true -verify %s
// Perform inline defensive checks.
void idc(int *p) {
void idc(void *p) {
if (p)
;
}
@ -139,3 +139,42 @@ void idcTrackZeroThroughDoubleAssignemnt(int x) {
int z = y;
idcTriggerZeroValueThroughCall(z);
}
struct S {
int f1;
int f2;
};
void idcTrackZeroValueThroughUnaryPointerOperators(struct S *s) {
idc(s);
*(&(s->f1)) = 7; // no-warning
}
void idcTrackZeroValueThroughUnaryPointerOperatorsWithOffset1(struct S *s) {
idc(s);
int *x = &(s->f2);
*x = 7; // no-warning
}
void idcTrackZeroValueThroughUnaryPointerOperatorsWithOffset2(struct S *s) {
idc(s);
int *x = &(s->f2) - 1;
// FIXME: Should not warn.
*x = 7; // expected-warning{{Dereference of null pointer}}
}
void idcTrackZeroValueThroughUnaryPointerOperatorsWithAssignment(struct S *s) {
idc(s);
int *x = &(s->f1);
*x = 7; // no-warning
}
struct S2 {
int a[1];
};
void idcTrackZeroValueThroughUnaryPointerOperatorsWithArrayField(struct S2 *s) {
idc(s);
*(&(s->a[0])) = 7; // no-warning
}

View File

@ -71,3 +71,16 @@ void test(int *p1, int *p2) {
idc(p1);
Foo f(p1);
}
struct Bar {
int x;
};
void idcBar(Bar *b) {
if (b)
;
}
void testRefToField(Bar *b) {
idcBar(b);
int &x = b->x; // no-warning
x = 5;
}

View File

@ -0,0 +1,37 @@
// RUN: %clang_analyze_cc1 -w -triple i386-apple-darwin10 -analyzer-checker=core,debug.ExprInspection -verify %s
void clang_analyzer_eval(int);
struct S {
int x, y;
int z[2];
};
void testOffsets(struct S *s, int coin) {
if (s != 0)
return;
// FIXME: Here we are testing the hack that computes offsets to null pointers
// as 0 in order to find null dereferences of not-exactly-null pointers,
// such as &(s->y) below, which is equal to 4 rather than 0 in run-time.
// These are indeed null.
clang_analyzer_eval(s == 0); // expected-warning{{TRUE}}
clang_analyzer_eval(&(s->x) == 0); // expected-warning{{TRUE}}
// FIXME: These should ideally be true.
clang_analyzer_eval(&(s->y) == 4); // expected-warning{{FALSE}}
clang_analyzer_eval(&(s->z[0]) == 8); // expected-warning{{FALSE}}
clang_analyzer_eval(&(s->z[1]) == 12); // expected-warning{{FALSE}}
// FIXME: These should ideally be false.
clang_analyzer_eval(&(s->y) == 0); // expected-warning{{TRUE}}
clang_analyzer_eval(&(s->z[0]) == 0); // expected-warning{{TRUE}}
clang_analyzer_eval(&(s->z[1]) == 0); // expected-warning{{TRUE}}
// But these should still be reported as null dereferences.
if (coin)
s->y = 5; // expected-warning{{Access to field 'y' results in a dereference of a null pointer (loaded from variable 's')}}
else
s->z[1] = 6; // expected-warning{{Array access (via field 'z') results in a null pointer dereference}}
}

View File

@ -122,7 +122,7 @@ void f1(void) {
}
void f_uninit(void) {
int x;
int x; // expected-note {{'x' declared without an initial value}}
doStuff_uninit(&x); // expected-warning {{1st function call argument is a pointer to uninitialized value}}
// expected-note@-1 {{1st function call argument is a pointer to uninitialized value}}
}

View File

@ -0,0 +1,55 @@
// Tests for module-declaration syntax.
//
// RUN: rm -rf %t
// RUN: mkdir -p %t
// RUN: echo 'export module x; int a, b;' > %t/x.cppm
// RUN: echo 'export module x.y; int c;' > %t/x.y.cppm
//
// RUN: %clang_cc1 -std=c++1z -fmodules-ts -emit-module-interface %t/x.cppm -o %t/x.pcm
// RUN: %clang_cc1 -std=c++1z -fmodules-ts -emit-module-interface -fmodule-file=%t/x.pcm %t/x.y.cppm -o %t/x.y.pcm
//
// Module implementation for unknown and known module. (The former is ill-formed.)
// FIXME: TEST=1 should fail because we don't have an interface for module z.
// RUN: %clang_cc1 -std=c++1z -fmodules-ts -I%t -fmodule-file=%t/x.y.pcm -verify %s \
// RUN: -DTEST=1 -DEXPORT= -DPARTITION= -DMODULE_NAME=z
// RUN: %clang_cc1 -std=c++1z -fmodules-ts -I%t -fmodule-file=%t/x.y.pcm -verify %s \
// RUN: -DTEST=2 -DEXPORT= -DPARTITION= -DMODULE_NAME=x
//
// Module interface for unknown and known module. (The latter is ill-formed due to
// redefinition.)
// RUN: %clang_cc1 -std=c++1z -fmodules-ts -I%t -fmodule-file=%t/x.y.pcm -verify %s \
// RUN: -DTEST=3 -DEXPORT=export -DPARTITION= -DMODULE_NAME=z
// RUN: %clang_cc1 -std=c++1z -fmodules-ts -I%t -fmodule-file=%t/x.y.pcm -verify %s \
// RUN: -DTEST=4 -DEXPORT=export -DPARTITION= -DMODULE_NAME=x
//
// Defining a module partition.
// RUN: %clang_cc1 -std=c++1z -fmodules-ts -I%t -fmodule-file=%t/x.y.pcm -verify %s \
// RUN: -DTEST=5 -DEXPORT=export -DPARTITION=partition -DMODULE_NAME=z
// RUN: %clang_cc1 -std=c++1z -fmodules-ts -I%t -fmodule-file=%t/x.y.pcm -verify %s \
// RUN: -DTEST=6 -DEXPORT= -DPARTITION=partition -DMODULE_NAME=z
//
// Miscellaneous syntax.
// RUN: %clang_cc1 -std=c++1z -fmodules-ts -I%t -fmodule-file=%t/x.y.pcm -verify %s \
// RUN: -DTEST=7 -DEXPORT= -DPARTITION=elderberry -DMODULE_NAME=z
// RUN: %clang_cc1 -std=c++1z -fmodules-ts -I%t -fmodule-file=%t/x.y.pcm -verify %s \
// RUN: -DTEST=8 -DEXPORT= -DPARTITION= -DMODULE_NAME='z [[]]'
// RUN: %clang_cc1 -std=c++1z -fmodules-ts -I%t -fmodule-file=%t/x.y.pcm -verify %s \
// RUN: -DTEST=9 -DEXPORT= -DPARTITION= -DMODULE_NAME='z [[fancy]]'
// RUN: %clang_cc1 -std=c++1z -fmodules-ts -I%t -fmodule-file=%t/x.y.pcm -verify %s \
// RUN: -DTEST=10 -DEXPORT= -DPARTITION= -DMODULE_NAME='z [[maybe_unused]]'
EXPORT module PARTITION MODULE_NAME;
#if TEST == 4
// expected-error@-2 {{redefinition of module 'x'}}
// expected-note-re@module-declaration.cpp:* {{loaded from '{{.*}}/x.pcm'}}
#elif TEST == 6
// expected-error@-5 {{module partition must be declared 'export'}}
#elif TEST == 7
// expected-error@-7 {{expected ';'}} expected-error@-7 {{requires a type specifier}}
#elif TEST == 9
// expected-warning@-9 {{unknown attribute 'fancy' ignored}}
#elif TEST == 10
// expected-error-re@-11 {{'maybe_unused' attribute cannot be applied to a module{{$}}}}
#else
// expected-no-diagnostics
#endif

View File

@ -1,7 +1,7 @@
// RUN: %clang_cc1 -fmodules-ts -std=c++1z -triple=x86_64-linux-gnu -fmodules-codegen -emit-module-interface %s -o %t.pcm
// RUN: %clang_cc1 -fmodules-ts -std=c++1z -triple=x86_64-linux-gnu %t.pcm -emit-llvm -o - | FileCheck %s
module FooBar;
export module FooBar;
export {
// CHECK-LABEL: define i32 @_Z1fv(

View File

@ -0,0 +1,41 @@
// RUN: rm -rf %t
// RUN: mkdir -p %t
// RUN: echo 'export module x; int a, b;' > %t/x.cppm
// RUN: echo 'export module x.y; int c;' > %t/x.y.cppm
//
// RUN: %clang_cc1 -std=c++1z -fmodules-ts -emit-module-interface %t/x.cppm -o %t/x.pcm
// RUN: %clang_cc1 -std=c++1z -fmodules-ts -emit-module-interface -fmodule-file=%t/x.pcm %t/x.y.cppm -o %t/x.y.pcm
//
// RUN: %clang_cc1 -std=c++1z -fmodules-ts -I%t -fmodule-file=%t/x.y.pcm -verify %s \
// RUN: -DMODULE_NAME=z
// RUN: %clang_cc1 -std=c++1z -fmodules-ts -I%t -fmodule-file=%t/x.y.pcm -verify %s \
// RUN: -DMODULE_X -DMODULE_NAME=x
module MODULE_NAME;
int use_1 = a;
#if !MODULE_X
// expected-error@-2 {{declaration of 'a' must be imported from module 'x' before it is required}}
// expected-note@x.cppm:1 {{here}}
#endif
import x;
int use_2 = b; // ok
// There is no relation between module x and module x.y.
int use_3 = c; // expected-error {{declaration of 'c' must be imported from module 'x.y'}}
// expected-note@x.y.cppm:1 {{here}}
import x [[]];
import x [[foo]]; // expected-warning {{unknown attribute 'foo' ignored}}
import x [[noreturn]]; // expected-error {{'noreturn' attribute cannot be applied to a module import}}
import x [[blarg::noreturn]]; // expected-warning {{unknown attribute 'noreturn' ignored}}
import x.y;
import x.; // expected-error {{expected a module name after 'import'}}
import .x; // expected-error {{expected a module name after 'import'}}
int use_4 = c; // ok
import blarg; // expected-error {{module 'blarg' not found}}

View File

@ -0,0 +1,27 @@
// RUN: %clang_cc1 -fmodules-ts %s -verify -o /dev/null
// RUN: %clang_cc1 -fmodules-ts %s -DINTERFACE -verify -o /dev/null
// RUN: %clang_cc1 -fmodules-ts %s -DIMPLEMENTATION -verify -o /dev/null
//
// RUN: %clang_cc1 -fmodules-ts %s -DBUILT_AS_INTERFACE -emit-module-interface -verify -o /dev/null
// RUN: %clang_cc1 -fmodules-ts %s -DINTERFACE -DBUILT_AS_INTERFACE -emit-module-interface -verify -o /dev/null
// RUN: %clang_cc1 -fmodules-ts %s -DIMPLEMENTATION -DBUILT_AS_INTERFACE -emit-module-interface -verify -o /dev/null
#if INTERFACE
export module A;
#elif IMPLEMENTATION
module A;
#ifdef BUILT_AS_INTERFACE
// expected-error@-2 {{missing 'export' specifier in module declaration while building module interface}}
#endif
#else
#ifdef BUILT_AS_INTERFACE
// FIXME: Diagnose missing module declaration (at end of TU)
#endif
#endif
export int a;
#ifndef INTERFACE
// expected-error@-2 {{export declaration can only be used within a module interface unit}}
#else
// expected-no-diagnostics
#endif

View File

@ -0,0 +1,7 @@
// RUN: %clang_cc1 -fsanitize=address -emit-llvm -o - -triple x86_64-windows-msvc %s | FileCheck %s --check-prefix=WITH-GC
// RUN: %clang_cc1 -fsanitize=address -emit-llvm -o - -triple x86_64-windows-msvc -fdata-sections %s | FileCheck %s --check-prefix=WITH-GC
int global;
// WITH-GC-NOT: call void @__asan_register_globals
// WITHOUT-GC: call void @__asan_register_globals

View File

@ -36,13 +36,7 @@ void foo() {
// CHECK-COMMON: %[[I8PTR:.*]] = bitcast i32* %[[PTR:.*]] to i8*
// CHECK-COMMON-NEXT: %[[SIZE:.*]] = call i64 @llvm.objectsize.i64.p0i8(i8* %[[I8PTR]], i1 false, i1 false)
// CHECK-COMMON-NEXT: %[[CHECK0:.*]] = icmp uge i64 %[[SIZE]], 4
// CHECK-COMMON: %[[PTRTOINT:.*]] = ptrtoint {{.*}}* %[[PTR]] to i64
// CHECK-COMMON-NEXT: %[[MISALIGN:.*]] = and i64 %[[PTRTOINT]], 3
// CHECK-COMMON-NEXT: %[[CHECK1:.*]] = icmp eq i64 %[[MISALIGN]], 0
// CHECK-COMMON: %[[OK:.*]] = and i1 %[[CHECK0]], %[[CHECK1]]
// CHECK-COMMON-NEXT: %[[OK:.*]] = icmp uge i64 %[[SIZE]], 4
// CHECK-UBSAN: br i1 %[[OK]], {{.*}} !prof ![[WEIGHT_MD:.*]], !nosanitize
// CHECK-TRAP: br i1 %[[OK]], {{.*}}

View File

@ -32,10 +32,10 @@ void unroll() {
baz(i);
}
// Checks if icp is invoked by normal compile, but not thinlto compile.
// Check that icp is not invoked (both -O2 and ThinLTO).
// O2-LABEL: define void @icp
// THINLTO-LABEL: define void @icp
// O2: if.true.direct_targ
// O2-NOT: if.true.direct_targ
// ThinLTO-NOT: if.true.direct_targ
void icp(void (*p)()) {
p();

View File

@ -22,15 +22,7 @@ void foo() {
// PARTIAL: %[[SIZE:.*]] = call i64 @llvm.objectsize.i64.p0i8(i8* {{.*}}, i1 false, i1 false)
// PARTIAL-NEXT: %[[CHECK0:.*]] = icmp uge i64 %[[SIZE]], 4
// PARTIAL: %[[MISALIGN:.*]] = and i64 {{.*}}, 3
// PARTIAL-NEXT: %[[CHECK1:.*]] = icmp eq i64 %[[MISALIGN]], 0
// PARTIAL: br i1 %[[CHECK0]], {{.*}} !nosanitize
// PARTIAL: %[[CHECK01:.*]] = and i1 %[[CHECK1]], %[[CHECK0]]
// PARTIAL: br i1 %[[CHECK01]], {{.*}} !nosanitize
// PARTIAL: br i1 %[[CHECK1]], {{.*}} !nosanitize
// PARTIAL: call void @__ubsan_handle_type_mismatch_v1_abort(
// PARTIAL-NEXT: unreachable
// PARTIAL: call void @__ubsan_handle_type_mismatch_v1(
}

View File

@ -1,7 +1,12 @@
// RUN: %clang_cc1 -debug-info-kind=limited -split-dwarf-file foo.dwo -S -emit-llvm -o - %s | FileCheck %s
// RUN: %clang_cc1 -debug-info-kind=limited -enable-split-dwarf -split-dwarf-file foo.dwo -S -emit-llvm -o - %s | FileCheck --check-prefix=VANILLA %s
int main (void) {
return 0;
}
// Testing to ensure that the dwo name gets output into the compile unit.
// CHECK: !DICompileUnit({{.*}}, splitDebugFilename: "foo.dwo"
// Testing to ensure that the dwo name is not output into the compile unit if
// it's for vanilla split-dwarf rather than split-dwarf for implicit modules.
// VANILLA-NOT: splitDebugFilename

View File

@ -12,15 +12,15 @@ struct D : A {
void testExternallyVisible() {
A *a = new A;
// CHECK: load {{.*}} !invariant.group ![[A_MD:[0-9]+]]
// CHECK: load {{.*}} !invariant.group ![[MD:[0-9]+]]
a->foo();
D *d = new D;
// CHECK: call void @_ZN1DC1Ev(
// CHECK: load {{.*}} !invariant.group ![[D_MD:[0-9]+]]
// CHECK: load {{.*}} !invariant.group ![[MD]]
d->foo();
A *a2 = d;
// CHECK: load {{.*}} !invariant.group ![[A_MD]]
// CHECK: load {{.*}} !invariant.group ![[MD]]
a2->foo();
}
// CHECK-LABEL: {{^}}}
@ -40,35 +40,32 @@ struct C : B {
// CHECK-LABEL: define void @_Z21testInternallyVisibleb(
void testInternallyVisible(bool p) {
B *b = new B;
// CHECK: = load {{.*}}, !invariant.group ![[B_MD:[0-9]+]]
// CHECK: = load {{.*}}, !invariant.group ![[MD]]
b->bar();
// CHECK: call void @_ZN12_GLOBAL__N_11CC1Ev(
C *c = new C;
// CHECK: = load {{.*}}, !invariant.group ![[C_MD:[0-9]+]]
// CHECK: = load {{.*}}, !invariant.group ![[MD]]
c->bar();
}
// Checking A::A()
// CHECK-LABEL: define linkonce_odr void @_ZN1AC2Ev(
// CHECK: store {{.*}}, !invariant.group ![[A_MD]]
// CHECK: store {{.*}}, !invariant.group ![[MD]]
// CHECK-LABEL: {{^}}}
// Checking D::D()
// CHECK-LABEL: define linkonce_odr void @_ZN1DC2Ev(
// CHECK: = call i8* @llvm.invariant.group.barrier(i8*
// CHECK: call void @_ZN1AC2Ev(%struct.A*
// CHECK: store {{.*}} !invariant.group ![[D_MD]]
// CHECK: store {{.*}} !invariant.group ![[MD]]
// Checking B::B()
// CHECK-LABEL: define internal void @_ZN12_GLOBAL__N_11BC2Ev(
// CHECK: store {{.*}}, !invariant.group ![[B_MD]]
// CHECK: store {{.*}}, !invariant.group ![[MD]]
// Checking C::C()
// CHECK-LABEL: define internal void @_ZN12_GLOBAL__N_11CC2Ev(
// CHECK: store {{.*}}, !invariant.group ![[C_MD]]
// CHECK: store {{.*}}, !invariant.group ![[MD]]
// CHECK: ![[A_MD]] = !{!"_ZTS1A"}
// CHECK: ![[D_MD]] = !{!"_ZTS1D"}
// CHECK: ![[B_MD]] = distinct !{}
// CHECK: ![[C_MD]] = distinct !{}
// CHECK: ![[MD]] = !{}

View File

@ -12,6 +12,7 @@ void load_non_null_pointers() {
char c = "foo"[0];
// CHECK-NOT: and i64 {{.*}}, !nosanitize
// CHECK-NOT: icmp ne {{.*}}, null, !nosanitize
// CHECK: ret void
}
@ -43,10 +44,6 @@ struct A {
};
f();
// LAMBDA: %[[LAMBDAINT:[0-9]+]] = ptrtoint %class.anon* %[[FUNCVAR:.*]] to i64, !nosanitize
// LAMBDA: and i64 %[[LAMBDAINT]], 7, !nosanitize
// LAMBDA: call void @__ubsan_handle_type_mismatch
// LAMBDA-NOT: call void @__ubsan_handle_type_mismatch
// LAMBDA: ret void
}

View File

@ -0,0 +1,11 @@
// RUN: %clang_cc1 -triple i686-windows -emit-llvm-only -fcoverage-mapping -dump-coverage-mapping -fprofile-instrument=clang %s | FileCheck %s
struct A {
virtual ~A();
};
// CHECK: ?PR32761@@YAXXZ:
// CHECK-NEXT: File 0, [[@LINE+1]]:16 -> [[@LINE+3]]:2 = #0
void PR32761() {
A a;
}

Some files were not shown because too many files have changed in this diff Show More