Merge llvm trunk r338150, and resolve conflicts.

This commit is contained in:
dim 2018-07-30 16:33:32 +00:00
commit 08ba384a72
2861 changed files with 247440 additions and 132800 deletions

View File

@ -4,7 +4,7 @@ LLVM Release License
University of Illinois/NCSA
Open Source License
Copyright (c) 2003-2017 University of Illinois at Urbana-Champaign.
Copyright (c) 2003-2018 University of Illinois at Urbana-Champaign.
All rights reserved.
Developed by:

View File

@ -0,0 +1,75 @@
/*===-- llvm-c/Comdat.h - Module Comdat C Interface -------------*- C++ -*-===*\
|* *|
|* The LLVM Compiler Infrastructure *|
|* *|
|* This file is distributed under the University of Illinois Open Source *|
|* License. See LICENSE.TXT for details. *|
|* *|
|*===----------------------------------------------------------------------===*|
|* *|
|* This file defines the C interface to COMDAT. *|
|* *|
\*===----------------------------------------------------------------------===*/
#ifndef LLVM_C_COMDAT_H
#define LLVM_C_COMDAT_H
#include "llvm-c/Types.h"
#ifdef __cplusplus
extern "C" {
#endif
typedef enum {
LLVMAnyComdatSelectionKind, ///< The linker may choose any COMDAT.
LLVMExactMatchComdatSelectionKind, ///< The data referenced by the COMDAT must
///< be the same.
LLVMLargestComdatSelectionKind, ///< The linker will choose the largest
///< COMDAT.
LLVMNoDuplicatesComdatSelectionKind, ///< No other Module may specify this
///< COMDAT.
LLVMSameSizeComdatSelectionKind ///< The data referenced by the COMDAT must be
///< the same size.
} LLVMComdatSelectionKind;
/**
* Return the Comdat in the module with the specified name. It is created
* if it didn't already exist.
*
* @see llvm::Module::getOrInsertComdat()
*/
LLVMComdatRef LLVMGetOrInsertComdat(LLVMModuleRef M, const char *Name);
/**
* Get the Comdat assigned to the given global object.
*
* @see llvm::GlobalObject::getComdat()
*/
LLVMComdatRef LLVMGetComdat(LLVMValueRef V);
/**
* Assign the Comdat to the given global object.
*
* @see llvm::GlobalObject::setComdat()
*/
void LLVMSetComdat(LLVMValueRef V, LLVMComdatRef C);
/*
* Get the conflict resolution selection kind for the Comdat.
*
* @see llvm::Comdat::getSelectionKind()
*/
LLVMComdatSelectionKind LLVMGetComdatSelectionKind(LLVMComdatRef C);
/*
* Set the conflict resolution selection kind for the Comdat.
*
* @see llvm::Comdat::setSelectionKind()
*/
void LLVMSetComdatSelectionKind(LLVMComdatRef C, LLVMComdatSelectionKind Kind);
#ifdef __cplusplus
}
#endif
#endif

View File

@ -186,6 +186,12 @@ typedef enum {
LLVMProtectedVisibility /**< The GV is protected */
} LLVMVisibility;
typedef enum {
LLVMNoUnnamedAddr, /**< Address of the GV is significant. */
LLVMLocalUnnamedAddr, /**< Address of the GV is locally insignificant. */
LLVMGlobalUnnamedAddr /**< Address of the GV is globally insignificant. */
} LLVMUnnamedAddr;
typedef enum {
LLVMDefaultStorageClass = 0,
LLVMDLLImportStorageClass = 1, /**< Function to be imported from DLL. */
@ -193,13 +199,48 @@ typedef enum {
} LLVMDLLStorageClass;
typedef enum {
LLVMCCallConv = 0,
LLVMFastCallConv = 8,
LLVMColdCallConv = 9,
LLVMWebKitJSCallConv = 12,
LLVMAnyRegCallConv = 13,
LLVMX86StdcallCallConv = 64,
LLVMX86FastcallCallConv = 65
LLVMCCallConv = 0,
LLVMFastCallConv = 8,
LLVMColdCallConv = 9,
LLVMGHCCallConv = 10,
LLVMHiPECallConv = 11,
LLVMWebKitJSCallConv = 12,
LLVMAnyRegCallConv = 13,
LLVMPreserveMostCallConv = 14,
LLVMPreserveAllCallConv = 15,
LLVMSwiftCallConv = 16,
LLVMCXXFASTTLSCallConv = 17,
LLVMX86StdcallCallConv = 64,
LLVMX86FastcallCallConv = 65,
LLVMARMAPCSCallConv = 66,
LLVMARMAAPCSCallConv = 67,
LLVMARMAAPCSVFPCallConv = 68,
LLVMMSP430INTRCallConv = 69,
LLVMX86ThisCallCallConv = 70,
LLVMPTXKernelCallConv = 71,
LLVMPTXDeviceCallConv = 72,
LLVMSPIRFUNCCallConv = 75,
LLVMSPIRKERNELCallConv = 76,
LLVMIntelOCLBICallConv = 77,
LLVMX8664SysVCallConv = 78,
LLVMWin64CallConv = 79,
LLVMX86VectorCallCallConv = 80,
LLVMHHVMCallConv = 81,
LLVMHHVMCCallConv = 82,
LLVMX86INTRCallConv = 83,
LLVMAVRINTRCallConv = 84,
LLVMAVRSIGNALCallConv = 85,
LLVMAVRBUILTINCallConv = 86,
LLVMAMDGPUVSCallConv = 87,
LLVMAMDGPUGSCallConv = 88,
LLVMAMDGPUPSCallConv = 89,
LLVMAMDGPUCSCallConv = 90,
LLVMAMDGPUKERNELCallConv = 91,
LLVMX86RegCallCallConv = 92,
LLVMAMDGPUHSCallConv = 93,
LLVMMSP430BUILTINCallConv = 94,
LLVMAMDGPULSCallConv = 95,
LLVMAMDGPUESCallConv = 96
} LLVMCallConv;
typedef enum {
@ -335,6 +376,62 @@ typedef enum {
LLVMDSNote
} LLVMDiagnosticSeverity;
typedef enum {
LLVMInlineAsmDialectATT,
LLVMInlineAsmDialectIntel
} LLVMInlineAsmDialect;
typedef enum {
/**
* Emits an error if two values disagree, otherwise the resulting value is
* that of the operands.
*
* @see Module::ModFlagBehavior::Error
*/
LLVMModuleFlagBehaviorError,
/**
* Emits a warning if two values disagree. The result value will be the
* operand for the flag from the first module being linked.
*
* @see Module::ModFlagBehavior::Warning
*/
LLVMModuleFlagBehaviorWarning,
/**
* Adds a requirement that another module flag be present and have a
* specified value after linking is performed. The value must be a metadata
* pair, where the first element of the pair is the ID of the module flag
* to be restricted, and the second element of the pair is the value the
* module flag should be restricted to. This behavior can be used to
* restrict the allowable results (via triggering of an error) of linking
* IDs with the **Override** behavior.
*
* @see Module::ModFlagBehavior::Require
*/
LLVMModuleFlagBehaviorRequire,
/**
* Uses the specified value, regardless of the behavior or value of the
* other module. If both modules specify **Override**, but the values
* differ, an error will be emitted.
*
* @see Module::ModFlagBehavior::Override
*/
LLVMModuleFlagBehaviorOverride,
/**
* Appends the two values, which are required to be metadata nodes.
*
* @see Module::ModFlagBehavior::Append
*/
LLVMModuleFlagBehaviorAppend,
/**
* Appends the two values, which are required to be metadata
* nodes. However, duplicate entries in the second list are dropped
* during the append operation.
*
* @see Module::ModFlagBehavior::AppendUnique
*/
LLVMModuleFlagBehaviorAppendUnique,
} LLVMModuleFlagBehavior;
/**
* Attribute index are either LLVMAttributeReturnIndex,
* LLVMAttributeFunctionIndex or a parameter number from 1 to N.
@ -565,6 +662,27 @@ const char *LLVMGetModuleIdentifier(LLVMModuleRef M, size_t *Len);
*/
void LLVMSetModuleIdentifier(LLVMModuleRef M, const char *Ident, size_t Len);
/**
* Obtain the module's original source file name.
*
* @param M Module to obtain the name of
* @param Len Out parameter which holds the length of the returned string
* @return The original source file name of M
* @see Module::getSourceFileName()
*/
const char *LLVMGetSourceFileName(LLVMModuleRef M, size_t *Len);
/**
* Set the original source file name of a module to a string Name with length
* Len.
*
* @param M The module to set the source file name of
* @param Name The string to set M's source file name to
* @param Len Length of Name
* @see Module::setSourceFileName()
*/
void LLVMSetSourceFileName(LLVMModuleRef M, const char *Name, size_t Len);
/**
* Obtain the data layout for a module.
*
@ -598,6 +716,64 @@ const char *LLVMGetTarget(LLVMModuleRef M);
*/
void LLVMSetTarget(LLVMModuleRef M, const char *Triple);
/**
* Returns the module flags as an array of flag-key-value triples. The caller
* is responsible for freeing this array by calling
* \c LLVMDisposeModuleFlagsMetadata.
*
* @see Module::getModuleFlagsMetadata()
*/
LLVMModuleFlagEntry *LLVMCopyModuleFlagsMetadata(LLVMModuleRef M, size_t *Len);
/**
* Destroys module flags metadata entries.
*/
void LLVMDisposeModuleFlagsMetadata(LLVMModuleFlagEntry *Entries);
/**
* Returns the flag behavior for a module flag entry at a specific index.
*
* @see Module::ModuleFlagEntry::Behavior
*/
LLVMModuleFlagBehavior
LLVMModuleFlagEntriesGetFlagBehavior(LLVMModuleFlagEntry *Entries,
unsigned Index);
/**
* Returns the key for a module flag entry at a specific index.
*
* @see Module::ModuleFlagEntry::Key
*/
const char *LLVMModuleFlagEntriesGetKey(LLVMModuleFlagEntry *Entries,
unsigned Index, size_t *Len);
/**
* Returns the metadata for a module flag entry at a specific index.
*
* @see Module::ModuleFlagEntry::Val
*/
LLVMMetadataRef LLVMModuleFlagEntriesGetMetadata(LLVMModuleFlagEntry *Entries,
unsigned Index);
/**
* Add a module-level flag to the module-level flags metadata if it doesn't
* already exist.
*
* @see Module::getModuleFlag()
*/
LLVMMetadataRef LLVMGetModuleFlag(LLVMModuleRef M,
const char *Key, size_t KeyLen);
/**
* Add a module-level flag to the module-level flags metadata if it doesn't
* already exist.
*
* @see Module::addModuleFlag()
*/
void LLVMAddModuleFlag(LLVMModuleRef M, LLVMModuleFlagBehavior Behavior,
const char *Key, size_t KeyLen,
LLVMMetadataRef Val);
/**
* Dump a representation of a module to stderr.
*
@ -622,12 +798,37 @@ LLVMBool LLVMPrintModuleToFile(LLVMModuleRef M, const char *Filename,
*/
char *LLVMPrintModuleToString(LLVMModuleRef M);
/**
* Get inline assembly for a module.
*
* @see Module::getModuleInlineAsm()
*/
const char *LLVMGetModuleInlineAsm(LLVMModuleRef M, size_t *Len);
/**
* Set inline assembly for a module.
*
* @see Module::setModuleInlineAsm()
*/
void LLVMSetModuleInlineAsm(LLVMModuleRef M, const char *Asm);
void LLVMSetModuleInlineAsm2(LLVMModuleRef M, const char *Asm, size_t Len);
/**
* Append inline assembly to a module.
*
* @see Module::appendModuleInlineAsm()
*/
void LLVMAppendModuleInlineAsm(LLVMModuleRef M, const char *Asm, size_t Len);
/**
* Create the specified uniqued inline asm string.
*
* @see InlineAsm::get()
*/
LLVMValueRef LLVMGetInlineAsm(LLVMTypeRef Ty,
char *AsmString, size_t AsmStringSize,
char *Constraints, size_t ConstraintsSize,
LLVMBool HasSideEffects, LLVMBool IsAlignStack,
LLVMInlineAsmDialect Dialect);
/**
* Obtain the context to which this module is associated.
@ -718,6 +919,9 @@ LLVMValueRef LLVMGetNextFunction(LLVMValueRef Fn);
*/
LLVMValueRef LLVMGetPreviousFunction(LLVMValueRef Fn);
/** Deprecated: Use LLVMSetModuleInlineAsm2 instead. */
void LLVMSetModuleInlineAsm(LLVMModuleRef M, const char *Asm);
/**
* @}
*/
@ -1292,14 +1496,14 @@ LLVMValueKind LLVMGetValueKind(LLVMValueRef Val);
*
* @see llvm::Value::getName()
*/
const char *LLVMGetValueName(LLVMValueRef Val);
const char *LLVMGetValueName2(LLVMValueRef Val, size_t *Length);
/**
* Set the string name of a value.
*
* @see llvm::Value::setName()
*/
void LLVMSetValueName(LLVMValueRef Val, const char *Name);
void LLVMSetValueName2(LLVMValueRef Val, const char *Name, size_t NameLen);
/**
* Dump a representation of a value to stderr.
@ -1351,6 +1555,11 @@ LLVM_FOR_EACH_VALUE_SUBCLASS(LLVM_DECLARE_VALUE_CAST)
LLVMValueRef LLVMIsAMDNode(LLVMValueRef Val);
LLVMValueRef LLVMIsAMDString(LLVMValueRef Val);
/** Deprecated: Use LLVMGetValueName2 instead. */
const char *LLVMGetValueName(LLVMValueRef Val);
/** Deprecated: Use LLVMSetValueName2 instead. */
void LLVMSetValueName(LLVMValueRef Val, const char *Name);
/**
* @}
*/
@ -1793,10 +2002,12 @@ LLVMValueRef LLVMConstExtractValue(LLVMValueRef AggConstant, unsigned *IdxList,
LLVMValueRef LLVMConstInsertValue(LLVMValueRef AggConstant,
LLVMValueRef ElementValueConstant,
unsigned *IdxList, unsigned NumIdx);
LLVMValueRef LLVMBlockAddress(LLVMValueRef F, LLVMBasicBlockRef BB);
/** Deprecated: Use LLVMGetInlineAsm instead. */
LLVMValueRef LLVMConstInlineAsm(LLVMTypeRef Ty,
const char *AsmString, const char *Constraints,
LLVMBool HasSideEffects, LLVMBool IsAlignStack);
LLVMValueRef LLVMBlockAddress(LLVMValueRef F, LLVMBasicBlockRef BB);
/**
* @}
@ -1823,7 +2034,12 @@ LLVMVisibility LLVMGetVisibility(LLVMValueRef Global);
void LLVMSetVisibility(LLVMValueRef Global, LLVMVisibility Viz);
LLVMDLLStorageClass LLVMGetDLLStorageClass(LLVMValueRef Global);
void LLVMSetDLLStorageClass(LLVMValueRef Global, LLVMDLLStorageClass Class);
LLVMUnnamedAddr LLVMGetUnnamedAddress(LLVMValueRef Global);
void LLVMSetUnnamedAddress(LLVMValueRef Global, LLVMUnnamedAddr UnnamedAddr);
/** Deprecated: Use LLVMGetUnnamedAddress instead. */
LLVMBool LLVMHasUnnamedAddr(LLVMValueRef Global);
/** Deprecated: Use LLVMSetUnnamedAddress instead. */
void LLVMSetUnnamedAddr(LLVMValueRef Global, LLVMBool HasUnnamedAddr);
/**
@ -1901,6 +2117,56 @@ void LLVMSetExternallyInitialized(LLVMValueRef GlobalVar, LLVMBool IsExtInit);
LLVMValueRef LLVMAddAlias(LLVMModuleRef M, LLVMTypeRef Ty, LLVMValueRef Aliasee,
const char *Name);
/**
* Obtain a GlobalAlias value from a Module by its name.
*
* The returned value corresponds to a llvm::GlobalAlias value.
*
* @see llvm::Module::getNamedAlias()
*/
LLVMValueRef LLVMGetNamedGlobalAlias(LLVMModuleRef M,
const char *Name, size_t NameLen);
/**
* Obtain an iterator to the first GlobalAlias in a Module.
*
* @see llvm::Module::alias_begin()
*/
LLVMValueRef LLVMGetFirstGlobalAlias(LLVMModuleRef M);
/**
* Obtain an iterator to the last GlobalAlias in a Module.
*
* @see llvm::Module::alias_end()
*/
LLVMValueRef LLVMGetLastGlobalAlias(LLVMModuleRef M);
/**
* Advance a GlobalAlias iterator to the next GlobalAlias.
*
* Returns NULL if the iterator was already at the end and there are no more
* global aliases.
*/
LLVMValueRef LLVMGetNextGlobalAlias(LLVMValueRef GA);
/**
* Decrement a GlobalAlias iterator to the previous GlobalAlias.
*
* Returns NULL if the iterator was already at the beginning and there are
* no previous global aliases.
*/
LLVMValueRef LLVMGetPreviousGlobalAlias(LLVMValueRef GA);
/**
* Retrieve the target value of an alias.
*/
LLVMValueRef LLVMAliasGetAliasee(LLVMValueRef Alias);
/**
* Set the target value of an alias.
*/
void LLVMAliasSetAliasee(LLVMValueRef Alias, LLVMValueRef Aliasee);
/**
* @}
*/
@ -2523,11 +2789,12 @@ LLVMValueRef LLVMInstructionClone(LLVMValueRef Inst);
/**
* Obtain the argument count for a call instruction.
*
* This expects an LLVMValueRef that corresponds to a llvm::CallInst or
* llvm::InvokeInst.
* This expects an LLVMValueRef that corresponds to a llvm::CallInst,
* llvm::InvokeInst, or llvm:FuncletPadInst.
*
* @see llvm::CallInst::getNumArgOperands()
* @see llvm::InvokeInst::getNumArgOperands()
* @see llvm::FuncletPadInst::getNumArgOperands()
*/
unsigned LLVMGetNumArgOperands(LLVMValueRef Instr);
@ -2612,9 +2879,12 @@ LLVMBasicBlockRef LLVMGetNormalDest(LLVMValueRef InvokeInst);
/**
* Return the unwind destination basic block.
*
* This only works on llvm::InvokeInst instructions.
* Works on llvm::InvokeInst, llvm::CleanupReturnInst, and
* llvm::CatchSwitchInst instructions.
*
* @see llvm::InvokeInst::getUnwindDest()
* @see llvm::CleanupReturnInst::getUnwindDest()
* @see llvm::CatchSwitchInst::getUnwindDest()
*/
LLVMBasicBlockRef LLVMGetUnwindDest(LLVMValueRef InvokeInst);
@ -2630,9 +2900,12 @@ void LLVMSetNormalDest(LLVMValueRef InvokeInst, LLVMBasicBlockRef B);
/**
* Set the unwind destination basic block.
*
* This only works on llvm::InvokeInst instructions.
* Works on llvm::InvokeInst, llvm::CleanupReturnInst, and
* llvm::CatchSwitchInst instructions.
*
* @see llvm::InvokeInst::setUnwindDest()
* @see llvm::CleanupReturnInst::setUnwindDest()
* @see llvm::CatchSwitchInst::setUnwindDest()
*/
void LLVMSetUnwindDest(LLVMValueRef InvokeInst, LLVMBasicBlockRef B);
@ -2861,11 +3134,26 @@ LLVMValueRef LLVMBuildInvoke(LLVMBuilderRef, LLVMValueRef Fn,
LLVMValueRef *Args, unsigned NumArgs,
LLVMBasicBlockRef Then, LLVMBasicBlockRef Catch,
const char *Name);
LLVMValueRef LLVMBuildUnreachable(LLVMBuilderRef);
/* Exception Handling */
LLVMValueRef LLVMBuildResume(LLVMBuilderRef B, LLVMValueRef Exn);
LLVMValueRef LLVMBuildLandingPad(LLVMBuilderRef B, LLVMTypeRef Ty,
LLVMValueRef PersFn, unsigned NumClauses,
const char *Name);
LLVMValueRef LLVMBuildResume(LLVMBuilderRef B, LLVMValueRef Exn);
LLVMValueRef LLVMBuildUnreachable(LLVMBuilderRef);
LLVMValueRef LLVMBuildCleanupRet(LLVMBuilderRef B, LLVMValueRef CatchPad,
LLVMBasicBlockRef BB);
LLVMValueRef LLVMBuildCatchRet(LLVMBuilderRef B, LLVMValueRef CatchPad,
LLVMBasicBlockRef BB);
LLVMValueRef LLVMBuildCatchPad(LLVMBuilderRef B, LLVMValueRef ParentPad,
LLVMValueRef *Args, unsigned NumArgs,
const char *Name);
LLVMValueRef LLVMBuildCleanupPad(LLVMBuilderRef B, LLVMValueRef ParentPad,
LLVMValueRef *Args, unsigned NumArgs,
const char *Name);
LLVMValueRef LLVMBuildCatchSwitch(LLVMBuilderRef B, LLVMValueRef ParentPad,
LLVMBasicBlockRef UnwindBB,
unsigned NumHandlers, const char *Name);
/* Add a case to the switch instruction */
void LLVMAddCase(LLVMValueRef Switch, LLVMValueRef OnVal,
@ -2889,6 +3177,51 @@ LLVMBool LLVMIsCleanup(LLVMValueRef LandingPad);
/* Set the 'cleanup' flag in the landingpad instruction */
void LLVMSetCleanup(LLVMValueRef LandingPad, LLVMBool Val);
/* Add a destination to the catchswitch instruction */
void LLVMAddHandler(LLVMValueRef CatchSwitch, LLVMBasicBlockRef Dest);
/* Get the number of handlers on the catchswitch instruction */
unsigned LLVMGetNumHandlers(LLVMValueRef CatchSwitch);
/**
* Obtain the basic blocks acting as handlers for a catchswitch instruction.
*
* The Handlers parameter should point to a pre-allocated array of
* LLVMBasicBlockRefs at least LLVMGetNumHandlers() large. On return, the
* first LLVMGetNumHandlers() entries in the array will be populated
* with LLVMBasicBlockRef instances.
*
* @param CatchSwitch The catchswitch instruction to operate on.
* @param Handlers Memory address of an array to be filled with basic blocks.
*/
void LLVMGetHandlers(LLVMValueRef CatchSwitch, LLVMBasicBlockRef *Handlers);
/* Funclets */
/* Get the number of funcletpad arguments. */
LLVMValueRef LLVMGetArgOperand(LLVMValueRef Funclet, unsigned i);
/* Set a funcletpad argument at the given index. */
void LLVMSetArgOperand(LLVMValueRef Funclet, unsigned i, LLVMValueRef value);
/**
* Get the parent catchswitch instruction of a catchpad instruction.
*
* This only works on llvm::CatchPadInst instructions.
*
* @see llvm::CatchPadInst::getCatchSwitch()
*/
LLVMValueRef LLVMGetParentCatchSwitch(LLVMValueRef CatchPad);
/**
* Set the parent catchswitch instruction of a catchpad instruction.
*
* This only works on llvm::CatchPadInst instructions.
*
* @see llvm::CatchPadInst::setCatchSwitch()
*/
void LLVMSetParentCatchSwitch(LLVMValueRef CatchPad, LLVMValueRef CatchSwitch);
/* Arithmetic */
LLVMValueRef LLVMBuildAdd(LLVMBuilderRef, LLVMValueRef LHS, LLVMValueRef RHS,
const char *Name);
@ -3186,7 +3519,7 @@ LLVMBool LLVMInitializeFunctionPassManager(LLVMPassManagerRef FPM);
@see llvm::FunctionPassManager::run(Function&) */
LLVMBool LLVMRunFunctionPassManager(LLVMPassManagerRef FPM, LLVMValueRef F);
/** Finalizes all of the function passes scheduled in in the function pass
/** Finalizes all of the function passes scheduled in the function pass
manager. Returns 1 if any of the passes modified the module, 0 otherwise.
@see llvm::FunctionPassManager::doFinalization */
LLVMBool LLVMFinalizeFunctionPassManager(LLVMPassManagerRef FPM);

View File

@ -0,0 +1,90 @@
/*===-- include/llvm-c/DataTypes.h - Define fixed size types ------*- C -*-===*\
|* *|
|* The LLVM Compiler Infrastructure *|
|* *|
|* This file is distributed under the University of Illinois Open Source *|
|* License. See LICENSE.TXT for details. *|
|* *|
|*===----------------------------------------------------------------------===*|
|* *|
|* This file contains definitions to figure out the size of _HOST_ data types.*|
|* This file is important because different host OS's define different macros,*|
|* which makes portability tough. This file exports the following *|
|* definitions: *|
|* *|
|* [u]int(32|64)_t : typedefs for signed and unsigned 32/64 bit system types*|
|* [U]INT(8|16|32|64)_(MIN|MAX) : Constants for the min and max values. *|
|* *|
|* No library is required when using these functions. *|
|* *|
|*===----------------------------------------------------------------------===*/
/* Please leave this file C-compatible. */
#ifndef LLVM_C_DATATYPES_H
#define LLVM_C_DATATYPES_H
#ifdef __cplusplus
#include <cmath>
#else
#include <math.h>
#endif
#include <inttypes.h>
#include <stdint.h>
#ifndef _MSC_VER
#if !defined(UINT32_MAX)
# error "The standard header <cstdint> is not C++11 compliant. Must #define "\
"__STDC_LIMIT_MACROS before #including llvm-c/DataTypes.h"
#endif
#if !defined(UINT32_C)
# error "The standard header <cstdint> is not C++11 compliant. Must #define "\
"__STDC_CONSTANT_MACROS before #including llvm-c/DataTypes.h"
#endif
/* Note that <inttypes.h> includes <stdint.h>, if this is a C99 system. */
#include <sys/types.h>
#ifdef _AIX
// GCC is strict about defining large constants: they must have LL modifier.
#undef INT64_MAX
#undef INT64_MIN
#endif
#else /* _MSC_VER */
#ifdef __cplusplus
#include <cstddef>
#include <cstdlib>
#else
#include <stddef.h>
#include <stdlib.h>
#endif
#include <sys/types.h>
#if defined(_WIN64)
typedef signed __int64 ssize_t;
#else
typedef signed int ssize_t;
#endif /* _WIN64 */
#endif /* _MSC_VER */
/* Set defaults for constants which we cannot find. */
#if !defined(INT64_MAX)
# define INT64_MAX 9223372036854775807LL
#endif
#if !defined(INT64_MIN)
# define INT64_MIN ((-INT64_MAX)-1)
#endif
#if !defined(UINT64_MAX)
# define UINT64_MAX 0xffffffffffffffffULL
#endif
#ifndef HUGE_VALF
#define HUGE_VALF (float)HUGE_VAL
#endif
#endif /* LLVM_C_DATATYPES_H */

View File

@ -52,6 +52,11 @@ typedef enum {
LLVMDIFlagBitField = 1 << 19,
LLVMDIFlagNoReturn = 1 << 20,
LLVMDIFlagMainSubprogram = 1 << 21,
LLVMDIFlagTypePassByValue = 1 << 22,
LLVMDIFlagTypePassByReference = 1 << 23,
LLVMDIFlagFixedEnum = 1 << 24,
LLVMDIFlagThunk = 1 << 25,
LLVMDIFlagTrivial = 1 << 26,
LLVMDIFlagIndirectVirtualBase = (1 << 2) | (1 << 5),
LLVMDIFlagAccessibility = LLVMDIFlagPrivate | LLVMDIFlagProtected |
LLVMDIFlagPublic,
@ -119,6 +124,11 @@ typedef enum {
LLVMDWARFEmissionLineTablesOnly
} LLVMDWARFEmissionKind;
/**
* An LLVM DWARF type encoding.
*/
typedef unsigned LLVMDWARFTypeEncoding;
/**
* The current debug metadata version number.
*/
@ -210,6 +220,158 @@ LLVMDIBuilderCreateFile(LLVMDIBuilderRef Builder, const char *Filename,
size_t FilenameLen, const char *Directory,
size_t DirectoryLen);
/**
* Creates a new descriptor for a module with the specified parent scope.
* \param Builder The \c DIBuilder.
* \param ParentScope The parent scope containing this module declaration.
* \param Name Module name.
* \param NameLen The length of the C string passed to \c Name.
* \param ConfigMacros A space-separated shell-quoted list of -D macro
definitions as they would appear on a command line.
* \param ConfigMacrosLen The length of the C string passed to \c ConfigMacros.
* \param IncludePath The path to the module map file.
* \param IncludePathLen The length of the C string passed to \c IncludePath.
* \param ISysRoot The Clang system root (value of -isysroot).
* \param ISysRootLen The length of the C string passed to \c ISysRoot.
*/
LLVMMetadataRef
LLVMDIBuilderCreateModule(LLVMDIBuilderRef Builder, LLVMMetadataRef ParentScope,
const char *Name, size_t NameLen,
const char *ConfigMacros, size_t ConfigMacrosLen,
const char *IncludePath, size_t IncludePathLen,
const char *ISysRoot, size_t ISysRootLen);
/**
* Creates a new descriptor for a namespace with the specified parent scope.
* \param Builder The \c DIBuilder.
* \param ParentScope The parent scope containing this module declaration.
* \param Name NameSpace name.
* \param NameLen The length of the C string passed to \c Name.
* \param ExportSymbols Whether or not the namespace exports symbols, e.g.
* this is true of C++ inline namespaces.
*/
LLVMMetadataRef
LLVMDIBuilderCreateNameSpace(LLVMDIBuilderRef Builder,
LLVMMetadataRef ParentScope,
const char *Name, size_t NameLen,
LLVMBool ExportSymbols);
/**
* Create a new descriptor for the specified subprogram.
* \param Builder The \c DIBuilder.
* \param Scope Function scope.
* \param Name Function name.
* \param NameLen Length of enumeration name.
* \param LinkageName Mangled function name.
* \param LinkageNameLen Length of linkage name.
* \param File File where this variable is defined.
* \param LineNo Line number.
* \param Ty Function type.
* \param IsLocalToUnit True if this function is not externally visible.
* \param IsDefinition True if this is a function definition.
* \param ScopeLine Set to the beginning of the scope this starts
* \param Flags E.g.: \c LLVMDIFlagLValueReference. These flags are
* used to emit dwarf attributes.
* \param IsOptimized True if optimization is ON.
*/
LLVMMetadataRef LLVMDIBuilderCreateFunction(
LLVMDIBuilderRef Builder, LLVMMetadataRef Scope, const char *Name,
size_t NameLen, const char *LinkageName, size_t LinkageNameLen,
LLVMMetadataRef File, unsigned LineNo, LLVMMetadataRef Ty,
LLVMBool IsLocalToUnit, LLVMBool IsDefinition,
unsigned ScopeLine, LLVMDIFlags Flags, LLVMBool IsOptimized);
/**
* Create a descriptor for a lexical block with the specified parent context.
* \param Builder The \c DIBuilder.
* \param Scope Parent lexical block.
* \param File Source file.
* \param Line The line in the source file.
* \param Column The column in the source file.
*/
LLVMMetadataRef LLVMDIBuilderCreateLexicalBlock(
LLVMDIBuilderRef Builder, LLVMMetadataRef Scope,
LLVMMetadataRef File, unsigned Line, unsigned Column);
/**
* Create a descriptor for a lexical block with a new file attached.
* \param Builder The \c DIBuilder.
* \param Scope Lexical block.
* \param File Source file.
* \param Discriminator DWARF path discriminator value.
*/
LLVMMetadataRef
LLVMDIBuilderCreateLexicalBlockFile(LLVMDIBuilderRef Builder,
LLVMMetadataRef Scope,
LLVMMetadataRef File,
unsigned Discriminator);
/**
* Create a descriptor for an imported namespace. Suitable for e.g. C++
* using declarations.
* \param Builder The \c DIBuilder.
* \param Scope The scope this module is imported into
* \param File File where the declaration is located.
* \param Line Line number of the declaration.
*/
LLVMMetadataRef
LLVMDIBuilderCreateImportedModuleFromNamespace(LLVMDIBuilderRef Builder,
LLVMMetadataRef Scope,
LLVMMetadataRef NS,
LLVMMetadataRef File,
unsigned Line);
/**
* Create a descriptor for an imported module that aliases another
* imported entity descriptor.
* \param Builder The \c DIBuilder.
* \param Scope The scope this module is imported into
* \param ImportedEntity Previous imported entity to alias.
* \param File File where the declaration is located.
* \param Line Line number of the declaration.
*/
LLVMMetadataRef
LLVMDIBuilderCreateImportedModuleFromAlias(LLVMDIBuilderRef Builder,
LLVMMetadataRef Scope,
LLVMMetadataRef ImportedEntity,
LLVMMetadataRef File,
unsigned Line);
/**
* Create a descriptor for an imported module.
* \param Builder The \c DIBuilder.
* \param Scope The scope this module is imported into
* \param M The module being imported here
* \param File File where the declaration is located.
* \param Line Line number of the declaration.
*/
LLVMMetadataRef
LLVMDIBuilderCreateImportedModuleFromModule(LLVMDIBuilderRef Builder,
LLVMMetadataRef Scope,
LLVMMetadataRef M,
LLVMMetadataRef File,
unsigned Line);
/**
* Create a descriptor for an imported function, type, or variable. Suitable
* for e.g. FORTRAN-style USE declarations.
* \param Builder The DIBuilder.
* \param Scope The scope this module is imported into.
* \param Decl The declaration (or definition) of a function, type,
or variable.
* \param File File where the declaration is located.
* \param Line Line number of the declaration.
* \param Name A name that uniquely identifies this imported declaration.
* \param NameLen The length of the C string passed to \c Name.
*/
LLVMMetadataRef
LLVMDIBuilderCreateImportedDeclaration(LLVMDIBuilderRef Builder,
LLVMMetadataRef Scope,
LLVMMetadataRef Decl,
LLVMMetadataRef File,
unsigned Line,
const char *Name, size_t NameLen);
/**
* Creates a new DebugLocation that describes a source location.
* \param Line The line in the source file.
@ -225,6 +387,768 @@ LLVMDIBuilderCreateDebugLocation(LLVMContextRef Ctx, unsigned Line,
unsigned Column, LLVMMetadataRef Scope,
LLVMMetadataRef InlinedAt);
/**
* Get the line number of this debug location.
* \param Location The debug location.
*
* @see DILocation::getLine()
*/
unsigned LLVMDILocationGetLine(LLVMMetadataRef Location);
/**
* Get the column number of this debug location.
* \param Location The debug location.
*
* @see DILocation::getColumn()
*/
unsigned LLVMDILocationGetColumn(LLVMMetadataRef Location);
/**
* Get the local scope associated with this debug location.
* \param Location The debug location.
*
* @see DILocation::getScope()
*/
LLVMMetadataRef LLVMDILocationGetScope(LLVMMetadataRef Location);
/**
* Create a type array.
* \param Builder The DIBuilder.
* \param Data The type elements.
* \param NumElements Number of type elements.
*/
LLVMMetadataRef LLVMDIBuilderGetOrCreateTypeArray(LLVMDIBuilderRef Builder,
LLVMMetadataRef *Data,
size_t NumElements);
/**
* Create subroutine type.
* \param Builder The DIBuilder.
* \param File The file in which the subroutine resides.
* \param ParameterTypes An array of subroutine parameter types. This
* includes return type at 0th index.
* \param NumParameterTypes The number of parameter types in \c ParameterTypes
* \param Flags E.g.: \c LLVMDIFlagLValueReference.
* These flags are used to emit dwarf attributes.
*/
LLVMMetadataRef
LLVMDIBuilderCreateSubroutineType(LLVMDIBuilderRef Builder,
LLVMMetadataRef File,
LLVMMetadataRef *ParameterTypes,
unsigned NumParameterTypes,
LLVMDIFlags Flags);
/**
* Create debugging information entry for an enumeration.
* \param Builder The DIBuilder.
* \param Scope Scope in which this enumeration is defined.
* \param Name Enumeration name.
* \param NameLen Length of enumeration name.
* \param File File where this member is defined.
* \param LineNumber Line number.
* \param SizeInBits Member size.
* \param AlignInBits Member alignment.
* \param Elements Enumeration elements.
* \param NumElements Number of enumeration elements.
* \param ClassTy Underlying type of a C++11/ObjC fixed enum.
*/
LLVMMetadataRef LLVMDIBuilderCreateEnumerationType(
LLVMDIBuilderRef Builder, LLVMMetadataRef Scope, const char *Name,
size_t NameLen, LLVMMetadataRef File, unsigned LineNumber,
uint64_t SizeInBits, uint32_t AlignInBits, LLVMMetadataRef *Elements,
unsigned NumElements, LLVMMetadataRef ClassTy);
/**
* Create debugging information entry for a union.
* \param Builder The DIBuilder.
* \param Scope Scope in which this union is defined.
* \param Name Union name.
* \param NameLen Length of union name.
* \param File File where this member is defined.
* \param LineNumber Line number.
* \param SizeInBits Member size.
* \param AlignInBits Member alignment.
* \param Flags Flags to encode member attribute, e.g. private
* \param Elements Union elements.
* \param NumElements Number of union elements.
* \param RunTimeLang Optional parameter, Objective-C runtime version.
* \param UniqueId A unique identifier for the union.
* \param UniqueIdLen Length of unique identifier.
*/
LLVMMetadataRef LLVMDIBuilderCreateUnionType(
LLVMDIBuilderRef Builder, LLVMMetadataRef Scope, const char *Name,
size_t NameLen, LLVMMetadataRef File, unsigned LineNumber,
uint64_t SizeInBits, uint32_t AlignInBits, LLVMDIFlags Flags,
LLVMMetadataRef *Elements, unsigned NumElements, unsigned RunTimeLang,
const char *UniqueId, size_t UniqueIdLen);
/**
* Create debugging information entry for an array.
* \param Builder The DIBuilder.
* \param Size Array size.
* \param AlignInBits Alignment.
* \param Ty Element type.
* \param Subscripts Subscripts.
* \param NumSubscripts Number of subscripts.
*/
LLVMMetadataRef
LLVMDIBuilderCreateArrayType(LLVMDIBuilderRef Builder, uint64_t Size,
uint32_t AlignInBits, LLVMMetadataRef Ty,
LLVMMetadataRef *Subscripts,
unsigned NumSubscripts);
/**
* Create debugging information entry for a vector type.
* \param Builder The DIBuilder.
* \param Size Vector size.
* \param AlignInBits Alignment.
* \param Ty Element type.
* \param Subscripts Subscripts.
* \param NumSubscripts Number of subscripts.
*/
LLVMMetadataRef
LLVMDIBuilderCreateVectorType(LLVMDIBuilderRef Builder, uint64_t Size,
uint32_t AlignInBits, LLVMMetadataRef Ty,
LLVMMetadataRef *Subscripts,
unsigned NumSubscripts);
/**
* Create a DWARF unspecified type.
* \param Builder The DIBuilder.
* \param Name The unspecified type's name.
* \param NameLen Length of type name.
*/
LLVMMetadataRef
LLVMDIBuilderCreateUnspecifiedType(LLVMDIBuilderRef Builder, const char *Name,
size_t NameLen);
/**
* Create debugging information entry for a basic
* type.
* \param Builder The DIBuilder.
* \param Name Type name.
* \param NameLen Length of type name.
* \param SizeInBits Size of the type.
* \param Encoding DWARF encoding code, e.g. \c LLVMDWARFTypeEncoding_float.
*/
LLVMMetadataRef
LLVMDIBuilderCreateBasicType(LLVMDIBuilderRef Builder, const char *Name,
size_t NameLen, uint64_t SizeInBits,
LLVMDWARFTypeEncoding Encoding);
/**
* Create debugging information entry for a pointer.
* \param Builder The DIBuilder.
* \param PointeeTy Type pointed by this pointer.
* \param SizeInBits Size.
* \param AlignInBits Alignment. (optional, pass 0 to ignore)
* \param AddressSpace DWARF address space. (optional, pass 0 to ignore)
* \param Name Pointer type name. (optional)
* \param NameLen Length of pointer type name. (optional)
*/
LLVMMetadataRef LLVMDIBuilderCreatePointerType(
LLVMDIBuilderRef Builder, LLVMMetadataRef PointeeTy,
uint64_t SizeInBits, uint32_t AlignInBits, unsigned AddressSpace,
const char *Name, size_t NameLen);
/**
* Create debugging information entry for a struct.
* \param Builder The DIBuilder.
* \param Scope Scope in which this struct is defined.
* \param Name Struct name.
* \param NameLen Struct name length.
* \param File File where this member is defined.
* \param LineNumber Line number.
* \param SizeInBits Member size.
* \param AlignInBits Member alignment.
* \param Flags Flags to encode member attribute, e.g. private
* \param Elements Struct elements.
* \param NumElements Number of struct elements.
* \param RunTimeLang Optional parameter, Objective-C runtime version.
* \param VTableHolder The object containing the vtable for the struct.
* \param UniqueId A unique identifier for the struct.
* \param UniqueIdLen Length of the unique identifier for the struct.
*/
LLVMMetadataRef LLVMDIBuilderCreateStructType(
LLVMDIBuilderRef Builder, LLVMMetadataRef Scope, const char *Name,
size_t NameLen, LLVMMetadataRef File, unsigned LineNumber,
uint64_t SizeInBits, uint32_t AlignInBits, LLVMDIFlags Flags,
LLVMMetadataRef DerivedFrom, LLVMMetadataRef *Elements,
unsigned NumElements, unsigned RunTimeLang, LLVMMetadataRef VTableHolder,
const char *UniqueId, size_t UniqueIdLen);
/**
* Create debugging information entry for a member.
* \param Builder The DIBuilder.
* \param Scope Member scope.
* \param Name Member name.
* \param NameLen Length of member name.
* \param File File where this member is defined.
* \param LineNo Line number.
* \param SizeInBits Member size.
* \param AlignInBits Member alignment.
* \param OffsetInBits Member offset.
* \param Flags Flags to encode member attribute, e.g. private
* \param Ty Parent type.
*/
LLVMMetadataRef LLVMDIBuilderCreateMemberType(
LLVMDIBuilderRef Builder, LLVMMetadataRef Scope, const char *Name,
size_t NameLen, LLVMMetadataRef File, unsigned LineNo,
uint64_t SizeInBits, uint32_t AlignInBits, uint64_t OffsetInBits,
LLVMDIFlags Flags, LLVMMetadataRef Ty);
/**
* Create debugging information entry for a
* C++ static data member.
* \param Builder The DIBuilder.
* \param Scope Member scope.
* \param Name Member name.
* \param NameLen Length of member name.
* \param File File where this member is declared.
* \param LineNumber Line number.
* \param Type Type of the static member.
* \param Flags Flags to encode member attribute, e.g. private.
* \param ConstantVal Const initializer of the member.
* \param AlignInBits Member alignment.
*/
LLVMMetadataRef
LLVMDIBuilderCreateStaticMemberType(
LLVMDIBuilderRef Builder, LLVMMetadataRef Scope, const char *Name,
size_t NameLen, LLVMMetadataRef File, unsigned LineNumber,
LLVMMetadataRef Type, LLVMDIFlags Flags, LLVMValueRef ConstantVal,
uint32_t AlignInBits);
/**
* Create debugging information entry for a pointer to member.
* \param Builder The DIBuilder.
* \param PointeeType Type pointed to by this pointer.
* \param ClassType Type for which this pointer points to members of.
* \param SizeInBits Size.
* \param AlignInBits Alignment.
* \param Flags Flags.
*/
LLVMMetadataRef
LLVMDIBuilderCreateMemberPointerType(LLVMDIBuilderRef Builder,
LLVMMetadataRef PointeeType,
LLVMMetadataRef ClassType,
uint64_t SizeInBits,
uint32_t AlignInBits,
LLVMDIFlags Flags);
/**
* Create debugging information entry for Objective-C instance variable.
* \param Builder The DIBuilder.
* \param Name Member name.
* \param NameLen The length of the C string passed to \c Name.
* \param File File where this member is defined.
* \param LineNo Line number.
* \param SizeInBits Member size.
* \param AlignInBits Member alignment.
* \param OffsetInBits Member offset.
* \param Flags Flags to encode member attribute, e.g. private
* \param Ty Parent type.
* \param PropertyNode Property associated with this ivar.
*/
LLVMMetadataRef
LLVMDIBuilderCreateObjCIVar(LLVMDIBuilderRef Builder,
const char *Name, size_t NameLen,
LLVMMetadataRef File, unsigned LineNo,
uint64_t SizeInBits, uint32_t AlignInBits,
uint64_t OffsetInBits, LLVMDIFlags Flags,
LLVMMetadataRef Ty, LLVMMetadataRef PropertyNode);
/**
* Create debugging information entry for Objective-C property.
* \param Builder The DIBuilder.
* \param Name Property name.
* \param NameLen The length of the C string passed to \c Name.
* \param File File where this property is defined.
* \param LineNo Line number.
* \param GetterName Name of the Objective C property getter selector.
* \param GetterNameLen The length of the C string passed to \c GetterName.
* \param SetterName Name of the Objective C property setter selector.
* \param SetterNameLen The length of the C string passed to \c SetterName.
* \param PropertyAttributes Objective C property attributes.
* \param Ty Type.
*/
LLVMMetadataRef
LLVMDIBuilderCreateObjCProperty(LLVMDIBuilderRef Builder,
const char *Name, size_t NameLen,
LLVMMetadataRef File, unsigned LineNo,
const char *GetterName, size_t GetterNameLen,
const char *SetterName, size_t SetterNameLen,
unsigned PropertyAttributes,
LLVMMetadataRef Ty);
/**
* Create a uniqued DIType* clone with FlagObjectPointer and FlagArtificial set.
* \param Builder The DIBuilder.
* \param Type The underlying type to which this pointer points.
*/
LLVMMetadataRef
LLVMDIBuilderCreateObjectPointerType(LLVMDIBuilderRef Builder,
LLVMMetadataRef Type);
/**
* Create debugging information entry for a qualified
* type, e.g. 'const int'.
* \param Builder The DIBuilder.
* \param Tag Tag identifying type,
* e.g. LLVMDWARFTypeQualifier_volatile_type
* \param Type Base Type.
*/
LLVMMetadataRef
LLVMDIBuilderCreateQualifiedType(LLVMDIBuilderRef Builder, unsigned Tag,
LLVMMetadataRef Type);
/**
* Create debugging information entry for a c++
* style reference or rvalue reference type.
* \param Builder The DIBuilder.
* \param Tag Tag identifying type,
* \param Type Base Type.
*/
LLVMMetadataRef
LLVMDIBuilderCreateReferenceType(LLVMDIBuilderRef Builder, unsigned Tag,
LLVMMetadataRef Type);
/**
* Create C++11 nullptr type.
* \param Builder The DIBuilder.
*/
LLVMMetadataRef
LLVMDIBuilderCreateNullPtrType(LLVMDIBuilderRef Builder);
/**
* Create debugging information entry for a typedef.
* \param Builder The DIBuilder.
* \param Type Original type.
* \param Name Typedef name.
* \param File File where this type is defined.
* \param LineNo Line number.
* \param Scope The surrounding context for the typedef.
*/
LLVMMetadataRef
LLVMDIBuilderCreateTypedef(LLVMDIBuilderRef Builder, LLVMMetadataRef Type,
const char *Name, size_t NameLen,
LLVMMetadataRef File, unsigned LineNo,
LLVMMetadataRef Scope);
/**
* Create debugging information entry to establish inheritance relationship
* between two types.
* \param Builder The DIBuilder.
* \param Ty Original type.
* \param BaseTy Base type. Ty is inherits from base.
* \param BaseOffset Base offset.
* \param VBPtrOffset Virtual base pointer offset.
* \param Flags Flags to describe inheritance attribute, e.g. private
*/
LLVMMetadataRef
LLVMDIBuilderCreateInheritance(LLVMDIBuilderRef Builder,
LLVMMetadataRef Ty, LLVMMetadataRef BaseTy,
uint64_t BaseOffset, uint32_t VBPtrOffset,
LLVMDIFlags Flags);
/**
* Create a permanent forward-declared type.
* \param Builder The DIBuilder.
* \param Tag A unique tag for this type.
* \param Name Type name.
* \param NameLen Length of type name.
* \param Scope Type scope.
* \param File File where this type is defined.
* \param Line Line number where this type is defined.
* \param RuntimeLang Indicates runtime version for languages like
* Objective-C.
* \param SizeInBits Member size.
* \param AlignInBits Member alignment.
* \param UniqueIdentifier A unique identifier for the type.
* \param UniqueIdentifierLen Length of the unique identifier.
*/
LLVMMetadataRef LLVMDIBuilderCreateForwardDecl(
LLVMDIBuilderRef Builder, unsigned Tag, const char *Name,
size_t NameLen, LLVMMetadataRef Scope, LLVMMetadataRef File, unsigned Line,
unsigned RuntimeLang, uint64_t SizeInBits, uint32_t AlignInBits,
const char *UniqueIdentifier, size_t UniqueIdentifierLen);
/**
* Create a temporary forward-declared type.
* \param Builder The DIBuilder.
* \param Tag A unique tag for this type.
* \param Name Type name.
* \param NameLen Length of type name.
* \param Scope Type scope.
* \param File File where this type is defined.
* \param Line Line number where this type is defined.
* \param RuntimeLang Indicates runtime version for languages like
* Objective-C.
* \param SizeInBits Member size.
* \param AlignInBits Member alignment.
* \param Flags Flags.
* \param UniqueIdentifier A unique identifier for the type.
* \param UniqueIdentifierLen Length of the unique identifier.
*/
LLVMMetadataRef
LLVMDIBuilderCreateReplaceableCompositeType(
LLVMDIBuilderRef Builder, unsigned Tag, const char *Name,
size_t NameLen, LLVMMetadataRef Scope, LLVMMetadataRef File, unsigned Line,
unsigned RuntimeLang, uint64_t SizeInBits, uint32_t AlignInBits,
LLVMDIFlags Flags, const char *UniqueIdentifier,
size_t UniqueIdentifierLen);
/**
* Create debugging information entry for a bit field member.
* \param Builder The DIBuilder.
* \param Scope Member scope.
* \param Name Member name.
* \param NameLen Length of member name.
* \param File File where this member is defined.
* \param LineNumber Line number.
* \param SizeInBits Member size.
* \param OffsetInBits Member offset.
* \param StorageOffsetInBits Member storage offset.
* \param Flags Flags to encode member attribute.
* \param Type Parent type.
*/
LLVMMetadataRef
LLVMDIBuilderCreateBitFieldMemberType(LLVMDIBuilderRef Builder,
LLVMMetadataRef Scope,
const char *Name, size_t NameLen,
LLVMMetadataRef File, unsigned LineNumber,
uint64_t SizeInBits,
uint64_t OffsetInBits,
uint64_t StorageOffsetInBits,
LLVMDIFlags Flags, LLVMMetadataRef Type);
/**
* Create debugging information entry for a class.
* \param Scope Scope in which this class is defined.
* \param Name Class name.
* \param NameLen The length of the C string passed to \c Name.
* \param File File where this member is defined.
* \param LineNumber Line number.
* \param SizeInBits Member size.
* \param AlignInBits Member alignment.
* \param OffsetInBits Member offset.
* \param Flags Flags to encode member attribute, e.g. private.
* \param DerivedFrom Debug info of the base class of this type.
* \param Elements Class members.
* \param NumElements Number of class elements.
* \param VTableHolder Debug info of the base class that contains vtable
* for this type. This is used in
* DW_AT_containing_type. See DWARF documentation
* for more info.
* \param TemplateParamsNode Template type parameters.
* \param UniqueIdentifier A unique identifier for the type.
* \param UniqueIdentifierLen Length of the unique identifier.
*/
LLVMMetadataRef LLVMDIBuilderCreateClassType(LLVMDIBuilderRef Builder,
LLVMMetadataRef Scope, const char *Name, size_t NameLen,
LLVMMetadataRef File, unsigned LineNumber, uint64_t SizeInBits,
uint32_t AlignInBits, uint64_t OffsetInBits, LLVMDIFlags Flags,
LLVMMetadataRef DerivedFrom,
LLVMMetadataRef *Elements, unsigned NumElements,
LLVMMetadataRef VTableHolder, LLVMMetadataRef TemplateParamsNode,
const char *UniqueIdentifier, size_t UniqueIdentifierLen);
/**
* Create a uniqued DIType* clone with FlagArtificial set.
* \param Builder The DIBuilder.
* \param Type The underlying type.
*/
LLVMMetadataRef
LLVMDIBuilderCreateArtificialType(LLVMDIBuilderRef Builder,
LLVMMetadataRef Type);
/**
* Get the name of this DIType.
* \param DType The DIType.
* \param Length The length of the returned string.
*
* @see DIType::getName()
*/
const char *LLVMDITypeGetName(LLVMMetadataRef DType, size_t *Length);
/**
* Get the size of this DIType in bits.
* \param DType The DIType.
*
* @see DIType::getSizeInBits()
*/
uint64_t LLVMDITypeGetSizeInBits(LLVMMetadataRef DType);
/**
* Get the offset of this DIType in bits.
* \param DType The DIType.
*
* @see DIType::getOffsetInBits()
*/
uint64_t LLVMDITypeGetOffsetInBits(LLVMMetadataRef DType);
/**
* Get the alignment of this DIType in bits.
* \param DType The DIType.
*
* @see DIType::getAlignInBits()
*/
uint32_t LLVMDITypeGetAlignInBits(LLVMMetadataRef DType);
/**
* Get the source line where this DIType is declared.
* \param DType The DIType.
*
* @see DIType::getLine()
*/
unsigned LLVMDITypeGetLine(LLVMMetadataRef DType);
/**
* Get the flags associated with this DIType.
* \param DType The DIType.
*
* @see DIType::getFlags()
*/
LLVMDIFlags LLVMDITypeGetFlags(LLVMMetadataRef DType);
/**
* Create a descriptor for a value range.
* \param Builder The DIBuilder.
* \param LowerBound Lower bound of the subrange, e.g. 0 for C, 1 for Fortran.
* \param Count Count of elements in the subrange.
*/
LLVMMetadataRef LLVMDIBuilderGetOrCreateSubrange(LLVMDIBuilderRef Builder,
int64_t LowerBound,
int64_t Count);
/**
* Create an array of DI Nodes.
* \param Builder The DIBuilder.
* \param Data The DI Node elements.
* \param NumElements Number of DI Node elements.
*/
LLVMMetadataRef LLVMDIBuilderGetOrCreateArray(LLVMDIBuilderRef Builder,
LLVMMetadataRef *Data,
size_t NumElements);
/**
* Create a new descriptor for the specified variable which has a complex
* address expression for its address.
* \param Builder The DIBuilder.
* \param Addr An array of complex address operations.
* \param Length Length of the address operation array.
*/
LLVMMetadataRef LLVMDIBuilderCreateExpression(LLVMDIBuilderRef Builder,
int64_t *Addr, size_t Length);
/**
* Create a new descriptor for the specified variable that does not have an
* address, but does have a constant value.
* \param Builder The DIBuilder.
* \param Value The constant value.
*/
LLVMMetadataRef
LLVMDIBuilderCreateConstantValueExpression(LLVMDIBuilderRef Builder,
int64_t Value);
/**
* Create a new descriptor for the specified variable.
* \param Scope Variable scope.
* \param Name Name of the variable.
* \param NameLen The length of the C string passed to \c Name.
* \param Linkage Mangled name of the variable.
* \param LinkLen The length of the C string passed to \c Linkage.
* \param File File where this variable is defined.
* \param LineNo Line number.
* \param Ty Variable Type.
* \param LocalToUnit Boolean flag indicate whether this variable is
* externally visible or not.
* \param Expr The location of the global relative to the attached
* GlobalVariable.
* \param Decl Reference to the corresponding declaration.
* \param AlignInBits Variable alignment(or 0 if no alignment attr was
* specified)
*/
LLVMMetadataRef
LLVMDIBuilderCreateGlobalVariableExpression(LLVMDIBuilderRef Builder,
LLVMMetadataRef Scope,
const char *Name, size_t NameLen,
const char *Linkage, size_t LinkLen,
LLVMMetadataRef File,
unsigned LineNo,
LLVMMetadataRef Ty,
LLVMBool LocalToUnit,
LLVMMetadataRef Expr,
LLVMMetadataRef Decl,
uint32_t AlignInBits);
/**
* Create a new temporary \c MDNode. Suitable for use in constructing cyclic
* \c MDNode structures. A temporary \c MDNode is not uniqued, may be RAUW'd,
* and must be manually deleted with \c LLVMDisposeTemporaryMDNode.
* \param Ctx The context in which to construct the temporary node.
* \param Data The metadata elements.
* \param NumElements Number of metadata elements.
*/
LLVMMetadataRef LLVMTemporaryMDNode(LLVMContextRef Ctx, LLVMMetadataRef *Data,
size_t NumElements);
/**
* Deallocate a temporary node.
*
* Calls \c replaceAllUsesWith(nullptr) before deleting, so any remaining
* references will be reset.
* \param TempNode The temporary metadata node.
*/
void LLVMDisposeTemporaryMDNode(LLVMMetadataRef TempNode);
/**
* Replace all uses of temporary metadata.
* \param TempTargetMetadata The temporary metadata node.
* \param Replacement The replacement metadata node.
*/
void LLVMMetadataReplaceAllUsesWith(LLVMMetadataRef TempTargetMetadata,
LLVMMetadataRef Replacement);
/**
* Create a new descriptor for the specified global variable that is temporary
* and meant to be RAUWed.
* \param Scope Variable scope.
* \param Name Name of the variable.
* \param NameLen The length of the C string passed to \c Name.
* \param Linkage Mangled name of the variable.
* \param LnkLen The length of the C string passed to \c Linkage.
* \param File File where this variable is defined.
* \param LineNo Line number.
* \param Ty Variable Type.
* \param LocalToUnit Boolean flag indicate whether this variable is
* externally visible or not.
* \param Decl Reference to the corresponding declaration.
* \param AlignInBits Variable alignment(or 0 if no alignment attr was
* specified)
*/
LLVMMetadataRef
LLVMDIBuilderCreateTempGlobalVariableFwdDecl(LLVMDIBuilderRef Builder,
LLVMMetadataRef Scope,
const char *Name, size_t NameLen,
const char *Linkage, size_t LnkLen,
LLVMMetadataRef File,
unsigned LineNo,
LLVMMetadataRef Ty,
LLVMBool LocalToUnit,
LLVMMetadataRef Decl,
uint32_t AlignInBits);
/**
* Insert a new llvm.dbg.declare intrinsic call before the given instruction.
* \param Builder The DIBuilder.
* \param Storage The storage of the variable to declare.
* \param VarInfo The variable's debug info descriptor.
* \param Expr A complex location expression for the variable.
* \param DebugLoc Debug info location.
* \param Instr Instruction acting as a location for the new intrinsic.
*/
LLVMValueRef LLVMDIBuilderInsertDeclareBefore(
LLVMDIBuilderRef Builder, LLVMValueRef Storage, LLVMMetadataRef VarInfo,
LLVMMetadataRef Expr, LLVMMetadataRef DebugLoc, LLVMValueRef Instr);
/**
* Insert a new llvm.dbg.declare intrinsic call at the end of the given basic
* block. If the basic block has a terminator instruction, the intrinsic is
* inserted before that terminator instruction.
* \param Builder The DIBuilder.
* \param Storage The storage of the variable to declare.
* \param VarInfo The variable's debug info descriptor.
* \param Expr A complex location expression for the variable.
* \param DebugLoc Debug info location.
* \param Block Basic block acting as a location for the new intrinsic.
*/
LLVMValueRef LLVMDIBuilderInsertDeclareAtEnd(
LLVMDIBuilderRef Builder, LLVMValueRef Storage, LLVMMetadataRef VarInfo,
LLVMMetadataRef Expr, LLVMMetadataRef DebugLoc, LLVMBasicBlockRef Block);
/**
* Insert a new llvm.dbg.value intrinsic call before the given instruction.
* \param Builder The DIBuilder.
* \param Val The value of the variable.
* \param VarInfo The variable's debug info descriptor.
* \param Expr A complex location expression for the variable.
* \param DebugLoc Debug info location.
* \param Instr Instruction acting as a location for the new intrinsic.
*/
LLVMValueRef LLVMDIBuilderInsertDbgValueBefore(LLVMDIBuilderRef Builder,
LLVMValueRef Val,
LLVMMetadataRef VarInfo,
LLVMMetadataRef Expr,
LLVMMetadataRef DebugLoc,
LLVMValueRef Instr);
/**
* Insert a new llvm.dbg.value intrinsic call at the end of the given basic
* block. If the basic block has a terminator instruction, the intrinsic is
* inserted before that terminator instruction.
* \param Builder The DIBuilder.
* \param Val The value of the variable.
* \param VarInfo The variable's debug info descriptor.
* \param Expr A complex location expression for the variable.
* \param DebugLoc Debug info location.
* \param Block Basic block acting as a location for the new intrinsic.
*/
LLVMValueRef LLVMDIBuilderInsertDbgValueAtEnd(LLVMDIBuilderRef Builder,
LLVMValueRef Val,
LLVMMetadataRef VarInfo,
LLVMMetadataRef Expr,
LLVMMetadataRef DebugLoc,
LLVMBasicBlockRef Block);
/**
* Create a new descriptor for a local auto variable.
* \param Builder The DIBuilder.
* \param Scope The local scope the variable is declared in.
* \param Name Variable name.
* \param NameLen Length of variable name.
* \param File File where this variable is defined.
* \param LineNo Line number.
* \param Ty Metadata describing the type of the variable.
* \param AlwaysPreserve If true, this descriptor will survive optimizations.
* \param Flags Flags.
* \param AlignInBits Variable alignment.
*/
LLVMMetadataRef LLVMDIBuilderCreateAutoVariable(
LLVMDIBuilderRef Builder, LLVMMetadataRef Scope, const char *Name,
size_t NameLen, LLVMMetadataRef File, unsigned LineNo, LLVMMetadataRef Ty,
LLVMBool AlwaysPreserve, LLVMDIFlags Flags, uint32_t AlignInBits);
/**
* Create a new descriptor for a function parameter variable.
* \param Builder The DIBuilder.
* \param Scope The local scope the variable is declared in.
* \param Name Variable name.
* \param NameLen Length of variable name.
* \param ArgNo Unique argument number for this variable; starts at 1.
* \param File File where this variable is defined.
* \param LineNo Line number.
* \param Ty Metadata describing the type of the variable.
* \param AlwaysPreserve If true, this descriptor will survive optimizations.
* \param Flags Flags.
*/
LLVMMetadataRef LLVMDIBuilderCreateParameterVariable(
LLVMDIBuilderRef Builder, LLVMMetadataRef Scope, const char *Name,
size_t NameLen, unsigned ArgNo, LLVMMetadataRef File, unsigned LineNo,
LLVMMetadataRef Ty, LLVMBool AlwaysPreserve, LLVMDIFlags Flags);
/**
* Get the metadata of the subprogram attached to a function.
*
* @see llvm::Function::getSubprogram()
*/
LLVMMetadataRef LLVMGetSubprogram(LLVMValueRef Func);
/**
* Set the subprogram attached to a function.
*
* @see llvm::Function::setSubprogram()
*/
void LLVMSetSubprogram(LLVMValueRef Func, LLVMMetadataRef SP);
#ifdef __cplusplus
} /* end extern "C" */
#endif

View File

@ -15,12 +15,7 @@
#ifndef LLVM_C_DISASSEMBLER_H
#define LLVM_C_DISASSEMBLER_H
#include "llvm/Support/DataTypes.h"
#ifdef __cplusplus
#include <cstddef>
#else
#include <stddef.h>
#endif
#include "llvm-c/DisassemblerTypes.h"
/**
* @defgroup LLVMCDisassembler Disassembler
@ -29,146 +24,6 @@
* @{
*/
/**
* An opaque reference to a disassembler context.
*/
typedef void *LLVMDisasmContextRef;
/**
* The type for the operand information call back function. This is called to
* get the symbolic information for an operand of an instruction. Typically
* this is from the relocation information, symbol table, etc. That block of
* information is saved when the disassembler context is created and passed to
* the call back in the DisInfo parameter. The instruction containing operand
* is at the PC parameter. For some instruction sets, there can be more than
* one operand with symbolic information. To determine the symbolic operand
* information for each operand, the bytes for the specific operand in the
* instruction are specified by the Offset parameter and its byte widith is the
* size parameter. For instructions sets with fixed widths and one symbolic
* operand per instruction, the Offset parameter will be zero and Size parameter
* will be the instruction width. The information is returned in TagBuf and is
* Triple specific with its specific information defined by the value of
* TagType for that Triple. If symbolic information is returned the function
* returns 1, otherwise it returns 0.
*/
typedef int (*LLVMOpInfoCallback)(void *DisInfo, uint64_t PC,
uint64_t Offset, uint64_t Size,
int TagType, void *TagBuf);
/**
* The initial support in LLVM MC for the most general form of a relocatable
* expression is "AddSymbol - SubtractSymbol + Offset". For some Darwin targets
* this full form is encoded in the relocation information so that AddSymbol and
* SubtractSymbol can be link edited independent of each other. Many other
* platforms only allow a relocatable expression of the form AddSymbol + Offset
* to be encoded.
*
* The LLVMOpInfoCallback() for the TagType value of 1 uses the struct
* LLVMOpInfo1. The value of the relocatable expression for the operand,
* including any PC adjustment, is passed in to the call back in the Value
* field. The symbolic information about the operand is returned using all
* the fields of the structure with the Offset of the relocatable expression
* returned in the Value field. It is possible that some symbols in the
* relocatable expression were assembly temporary symbols, for example
* "Ldata - LpicBase + constant", and only the Values of the symbols without
* symbol names are present in the relocation information. The VariantKind
* type is one of the Target specific #defines below and is used to print
* operands like "_foo@GOT", ":lower16:_foo", etc.
*/
struct LLVMOpInfoSymbol1 {
uint64_t Present; /* 1 if this symbol is present */
const char *Name; /* symbol name if not NULL */
uint64_t Value; /* symbol value if name is NULL */
};
struct LLVMOpInfo1 {
struct LLVMOpInfoSymbol1 AddSymbol;
struct LLVMOpInfoSymbol1 SubtractSymbol;
uint64_t Value;
uint64_t VariantKind;
};
/**
* The operand VariantKinds for symbolic disassembly.
*/
#define LLVMDisassembler_VariantKind_None 0 /* all targets */
/**
* The ARM target VariantKinds.
*/
#define LLVMDisassembler_VariantKind_ARM_HI16 1 /* :upper16: */
#define LLVMDisassembler_VariantKind_ARM_LO16 2 /* :lower16: */
/**
* The ARM64 target VariantKinds.
*/
#define LLVMDisassembler_VariantKind_ARM64_PAGE 1 /* @page */
#define LLVMDisassembler_VariantKind_ARM64_PAGEOFF 2 /* @pageoff */
#define LLVMDisassembler_VariantKind_ARM64_GOTPAGE 3 /* @gotpage */
#define LLVMDisassembler_VariantKind_ARM64_GOTPAGEOFF 4 /* @gotpageoff */
#define LLVMDisassembler_VariantKind_ARM64_TLVP 5 /* @tvlppage */
#define LLVMDisassembler_VariantKind_ARM64_TLVOFF 6 /* @tvlppageoff */
/**
* The type for the symbol lookup function. This may be called by the
* disassembler for things like adding a comment for a PC plus a constant
* offset load instruction to use a symbol name instead of a load address value.
* It is passed the block information is saved when the disassembler context is
* created and the ReferenceValue to look up as a symbol. If no symbol is found
* for the ReferenceValue NULL is returned. The ReferenceType of the
* instruction is passed indirectly as is the PC of the instruction in
* ReferencePC. If the output reference can be determined its type is returned
* indirectly in ReferenceType along with ReferenceName if any, or that is set
* to NULL.
*/
typedef const char *(*LLVMSymbolLookupCallback)(void *DisInfo,
uint64_t ReferenceValue,
uint64_t *ReferenceType,
uint64_t ReferencePC,
const char **ReferenceName);
/**
* The reference types on input and output.
*/
/* No input reference type or no output reference type. */
#define LLVMDisassembler_ReferenceType_InOut_None 0
/* The input reference is from a branch instruction. */
#define LLVMDisassembler_ReferenceType_In_Branch 1
/* The input reference is from a PC relative load instruction. */
#define LLVMDisassembler_ReferenceType_In_PCrel_Load 2
/* The input reference is from an ARM64::ADRP instruction. */
#define LLVMDisassembler_ReferenceType_In_ARM64_ADRP 0x100000001
/* The input reference is from an ARM64::ADDXri instruction. */
#define LLVMDisassembler_ReferenceType_In_ARM64_ADDXri 0x100000002
/* The input reference is from an ARM64::LDRXui instruction. */
#define LLVMDisassembler_ReferenceType_In_ARM64_LDRXui 0x100000003
/* The input reference is from an ARM64::LDRXl instruction. */
#define LLVMDisassembler_ReferenceType_In_ARM64_LDRXl 0x100000004
/* The input reference is from an ARM64::ADR instruction. */
#define LLVMDisassembler_ReferenceType_In_ARM64_ADR 0x100000005
/* The output reference is to as symbol stub. */
#define LLVMDisassembler_ReferenceType_Out_SymbolStub 1
/* The output reference is to a symbol address in a literal pool. */
#define LLVMDisassembler_ReferenceType_Out_LitPool_SymAddr 2
/* The output reference is to a cstring address in a literal pool. */
#define LLVMDisassembler_ReferenceType_Out_LitPool_CstrAddr 3
/* The output reference is to a Objective-C CoreFoundation string. */
#define LLVMDisassembler_ReferenceType_Out_Objc_CFString_Ref 4
/* The output reference is to a Objective-C message. */
#define LLVMDisassembler_ReferenceType_Out_Objc_Message 5
/* The output reference is to a Objective-C message ref. */
#define LLVMDisassembler_ReferenceType_Out_Objc_Message_Ref 6
/* The output reference is to a Objective-C selector ref. */
#define LLVMDisassembler_ReferenceType_Out_Objc_Selector_Ref 7
/* The output reference is to a Objective-C class ref. */
#define LLVMDisassembler_ReferenceType_Out_Objc_Class_Ref 8
/* The output reference is to a C++ symbol name. */
#define LLVMDisassembler_ReferenceType_DeMangled_Name 9
#ifdef __cplusplus
extern "C" {
#endif /* !defined(__cplusplus) */

View File

@ -0,0 +1,160 @@
/*===-- llvm-c/DisassemblerTypedefs.h -----------------------------*- C -*-===*\
|* *|
|* The LLVM Compiler Infrastructure *|
|* *|
|* This file is distributed under the University of Illinois Open Source *|
|* License. See LICENSE.TXT for details. *|
|* *|
|*===----------------------------------------------------------------------===*/
#ifndef LLVM_DISASSEMBLER_TYPES_H
#define LLVM_DISASSEMBLER_TYPES_H
#include "llvm-c/DataTypes.h"
#ifdef __cplusplus
#include <cstddef>
#else
#include <stddef.h>
#endif
/**
* An opaque reference to a disassembler context.
*/
typedef void *LLVMDisasmContextRef;
/**
* The type for the operand information call back function. This is called to
* get the symbolic information for an operand of an instruction. Typically
* this is from the relocation information, symbol table, etc. That block of
* information is saved when the disassembler context is created and passed to
* the call back in the DisInfo parameter. The instruction containing operand
* is at the PC parameter. For some instruction sets, there can be more than
* one operand with symbolic information. To determine the symbolic operand
* information for each operand, the bytes for the specific operand in the
* instruction are specified by the Offset parameter and its byte widith is the
* size parameter. For instructions sets with fixed widths and one symbolic
* operand per instruction, the Offset parameter will be zero and Size parameter
* will be the instruction width. The information is returned in TagBuf and is
* Triple specific with its specific information defined by the value of
* TagType for that Triple. If symbolic information is returned the function
* returns 1, otherwise it returns 0.
*/
typedef int (*LLVMOpInfoCallback)(void *DisInfo, uint64_t PC,
uint64_t Offset, uint64_t Size,
int TagType, void *TagBuf);
/**
* The initial support in LLVM MC for the most general form of a relocatable
* expression is "AddSymbol - SubtractSymbol + Offset". For some Darwin targets
* this full form is encoded in the relocation information so that AddSymbol and
* SubtractSymbol can be link edited independent of each other. Many other
* platforms only allow a relocatable expression of the form AddSymbol + Offset
* to be encoded.
*
* The LLVMOpInfoCallback() for the TagType value of 1 uses the struct
* LLVMOpInfo1. The value of the relocatable expression for the operand,
* including any PC adjustment, is passed in to the call back in the Value
* field. The symbolic information about the operand is returned using all
* the fields of the structure with the Offset of the relocatable expression
* returned in the Value field. It is possible that some symbols in the
* relocatable expression were assembly temporary symbols, for example
* "Ldata - LpicBase + constant", and only the Values of the symbols without
* symbol names are present in the relocation information. The VariantKind
* type is one of the Target specific #defines below and is used to print
* operands like "_foo@GOT", ":lower16:_foo", etc.
*/
struct LLVMOpInfoSymbol1 {
uint64_t Present; /* 1 if this symbol is present */
const char *Name; /* symbol name if not NULL */
uint64_t Value; /* symbol value if name is NULL */
};
struct LLVMOpInfo1 {
struct LLVMOpInfoSymbol1 AddSymbol;
struct LLVMOpInfoSymbol1 SubtractSymbol;
uint64_t Value;
uint64_t VariantKind;
};
/**
* The operand VariantKinds for symbolic disassembly.
*/
#define LLVMDisassembler_VariantKind_None 0 /* all targets */
/**
* The ARM target VariantKinds.
*/
#define LLVMDisassembler_VariantKind_ARM_HI16 1 /* :upper16: */
#define LLVMDisassembler_VariantKind_ARM_LO16 2 /* :lower16: */
/**
* The ARM64 target VariantKinds.
*/
#define LLVMDisassembler_VariantKind_ARM64_PAGE 1 /* @page */
#define LLVMDisassembler_VariantKind_ARM64_PAGEOFF 2 /* @pageoff */
#define LLVMDisassembler_VariantKind_ARM64_GOTPAGE 3 /* @gotpage */
#define LLVMDisassembler_VariantKind_ARM64_GOTPAGEOFF 4 /* @gotpageoff */
#define LLVMDisassembler_VariantKind_ARM64_TLVP 5 /* @tvlppage */
#define LLVMDisassembler_VariantKind_ARM64_TLVOFF 6 /* @tvlppageoff */
/**
* The type for the symbol lookup function. This may be called by the
* disassembler for things like adding a comment for a PC plus a constant
* offset load instruction to use a symbol name instead of a load address value.
* It is passed the block information is saved when the disassembler context is
* created and the ReferenceValue to look up as a symbol. If no symbol is found
* for the ReferenceValue NULL is returned. The ReferenceType of the
* instruction is passed indirectly as is the PC of the instruction in
* ReferencePC. If the output reference can be determined its type is returned
* indirectly in ReferenceType along with ReferenceName if any, or that is set
* to NULL.
*/
typedef const char *(*LLVMSymbolLookupCallback)(void *DisInfo,
uint64_t ReferenceValue,
uint64_t *ReferenceType,
uint64_t ReferencePC,
const char **ReferenceName);
/**
* The reference types on input and output.
*/
/* No input reference type or no output reference type. */
#define LLVMDisassembler_ReferenceType_InOut_None 0
/* The input reference is from a branch instruction. */
#define LLVMDisassembler_ReferenceType_In_Branch 1
/* The input reference is from a PC relative load instruction. */
#define LLVMDisassembler_ReferenceType_In_PCrel_Load 2
/* The input reference is from an ARM64::ADRP instruction. */
#define LLVMDisassembler_ReferenceType_In_ARM64_ADRP 0x100000001
/* The input reference is from an ARM64::ADDXri instruction. */
#define LLVMDisassembler_ReferenceType_In_ARM64_ADDXri 0x100000002
/* The input reference is from an ARM64::LDRXui instruction. */
#define LLVMDisassembler_ReferenceType_In_ARM64_LDRXui 0x100000003
/* The input reference is from an ARM64::LDRXl instruction. */
#define LLVMDisassembler_ReferenceType_In_ARM64_LDRXl 0x100000004
/* The input reference is from an ARM64::ADR instruction. */
#define LLVMDisassembler_ReferenceType_In_ARM64_ADR 0x100000005
/* The output reference is to as symbol stub. */
#define LLVMDisassembler_ReferenceType_Out_SymbolStub 1
/* The output reference is to a symbol address in a literal pool. */
#define LLVMDisassembler_ReferenceType_Out_LitPool_SymAddr 2
/* The output reference is to a cstring address in a literal pool. */
#define LLVMDisassembler_ReferenceType_Out_LitPool_CstrAddr 3
/* The output reference is to a Objective-C CoreFoundation string. */
#define LLVMDisassembler_ReferenceType_Out_Objc_CFString_Ref 4
/* The output reference is to a Objective-C message. */
#define LLVMDisassembler_ReferenceType_Out_Objc_Message 5
/* The output reference is to a Objective-C message ref. */
#define LLVMDisassembler_ReferenceType_Out_Objc_Message_Ref 6
/* The output reference is to a Objective-C selector ref. */
#define LLVMDisassembler_ReferenceType_Out_Objc_Selector_Ref 7
/* The output reference is to a Objective-C class ref. */
#define LLVMDisassembler_ReferenceType_Out_Objc_Class_Ref 8
/* The output reference is to a C++ symbol name. */
#define LLVMDisassembler_ReferenceType_DeMangled_Name 9
#endif

View File

@ -182,6 +182,13 @@ LLVMMCJITMemoryManagerRef LLVMCreateSimpleMCJITMemoryManager(
void LLVMDisposeMCJITMemoryManager(LLVMMCJITMemoryManagerRef MM);
/*===-- JIT Event Listener functions -------------------------------------===*/
LLVMJITEventListenerRef LLVMCreateGDBRegistrationListener(void);
LLVMJITEventListenerRef LLVMCreateIntelJITEventListener(void);
LLVMJITEventListenerRef LLVMCreateOprofileJITEventListener(void);
LLVMJITEventListenerRef LLVMCreatePerfJITEventListener(void);
/**
* @}
*/

View File

@ -37,6 +37,7 @@ void LLVMInitializeScalarOpts(LLVMPassRegistryRef R);
void LLVMInitializeObjCARCOpts(LLVMPassRegistryRef R);
void LLVMInitializeVectorization(LLVMPassRegistryRef R);
void LLVMInitializeInstCombine(LLVMPassRegistryRef R);
void LLVMInitializeAggressiveInstCombiner(LLVMPassRegistryRef R);
void LLVMInitializeIPO(LLVMPassRegistryRef R);
void LLVMInitializeInstrumentation(LLVMPassRegistryRef R);
void LLVMInitializeAnalysis(LLVMPassRegistryRef R);

View File

@ -29,9 +29,8 @@
extern "C" {
#endif
typedef struct LLVMOpaqueSharedModule *LLVMSharedModuleRef;
typedef struct LLVMOrcOpaqueJITStack *LLVMOrcJITStackRef;
typedef uint32_t LLVMOrcModuleHandle;
typedef uint64_t LLVMOrcModuleHandle;
typedef uint64_t LLVMOrcTargetAddress;
typedef uint64_t (*LLVMOrcSymbolResolverFn)(const char *Name, void *LookupCtx);
typedef uint64_t (*LLVMOrcLazyCompileCallbackFn)(LLVMOrcJITStackRef JITStack,
@ -39,33 +38,6 @@ typedef uint64_t (*LLVMOrcLazyCompileCallbackFn)(LLVMOrcJITStackRef JITStack,
typedef enum { LLVMOrcErrSuccess = 0, LLVMOrcErrGeneric } LLVMOrcErrorCode;
/**
* Turn an LLVMModuleRef into an LLVMSharedModuleRef.
*
* The JIT uses shared ownership for LLVM modules, since it is generally
* difficult to know when the JIT will be finished with a module (and the JIT
* has no way of knowing when a user may be finished with one).
*
* Calling this method with an LLVMModuleRef creates a shared-pointer to the
* module, and returns a reference to this shared pointer.
*
* The shared module should be disposed when finished with by calling
* LLVMOrcDisposeSharedModule (not LLVMDisposeModule). The Module will be
* deleted when the last shared pointer owner relinquishes it.
*/
LLVMSharedModuleRef LLVMOrcMakeSharedModule(LLVMModuleRef Mod);
/**
* Dispose of a shared module.
*
* The module should not be accessed after this call. The module will be
* deleted once all clients (including the JIT itself) have released their
* shared pointers.
*/
void LLVMOrcDisposeSharedModuleRef(LLVMSharedModuleRef SharedMod);
/**
* Create an ORC JIT stack.
*
@ -125,8 +97,7 @@ LLVMOrcErrorCode LLVMOrcSetIndirectStubPointer(LLVMOrcJITStackRef JITStack,
*/
LLVMOrcErrorCode
LLVMOrcAddEagerlyCompiledIR(LLVMOrcJITStackRef JITStack,
LLVMOrcModuleHandle *RetHandle,
LLVMSharedModuleRef Mod,
LLVMOrcModuleHandle *RetHandle, LLVMModuleRef Mod,
LLVMOrcSymbolResolverFn SymbolResolver,
void *SymbolResolverCtx);
@ -135,8 +106,7 @@ LLVMOrcAddEagerlyCompiledIR(LLVMOrcJITStackRef JITStack,
*/
LLVMOrcErrorCode
LLVMOrcAddLazilyCompiledIR(LLVMOrcJITStackRef JITStack,
LLVMOrcModuleHandle *RetHandle,
LLVMSharedModuleRef Mod,
LLVMOrcModuleHandle *RetHandle, LLVMModuleRef Mod,
LLVMOrcSymbolResolverFn SymbolResolver,
void *SymbolResolverCtx);
@ -170,11 +140,34 @@ LLVMOrcErrorCode LLVMOrcGetSymbolAddress(LLVMOrcJITStackRef JITStack,
LLVMOrcTargetAddress *RetAddr,
const char *SymbolName);
/**
* Get symbol address from JIT instance, searching only the specified
* handle.
*/
LLVMOrcErrorCode LLVMOrcGetSymbolAddressIn(LLVMOrcJITStackRef JITStack,
LLVMOrcTargetAddress *RetAddr,
LLVMOrcModuleHandle H,
const char *SymbolName);
/**
* Dispose of an ORC JIT stack.
*/
LLVMOrcErrorCode LLVMOrcDisposeInstance(LLVMOrcJITStackRef JITStack);
/**
* Register a JIT Event Listener.
*
* A NULL listener is ignored.
*/
void LLVMOrcRegisterJITEventListener(LLVMOrcJITStackRef JITStack, LLVMJITEventListenerRef L);
/**
* Unegister a JIT Event Listener.
*
* A NULL listener is ignored.
*/
void LLVMOrcUnregisterJITEventListener(LLVMOrcJITStackRef JITStack, LLVMJITEventListenerRef L);
#ifdef __cplusplus
}
#endif /* extern "C" */

View File

@ -14,8 +14,8 @@
#ifndef LLVM_C_SUPPORT_H
#define LLVM_C_SUPPORT_H
#include "llvm-c/DataTypes.h"
#include "llvm-c/Types.h"
#include "llvm/Support/DataTypes.h"
#ifdef __cplusplus
extern "C" {

View File

@ -137,6 +137,18 @@ LLVMBool LLVMTargetMachineEmitToMemoryBuffer(LLVMTargetMachineRef T, LLVMModuleR
disposed with LLVMDisposeMessage. */
char* LLVMGetDefaultTargetTriple(void);
/** Normalize a target triple. The result needs to be disposed with
LLVMDisposeMessage. */
char* LLVMNormalizeTargetTriple(const char* triple);
/** Get the host CPU as a string. The result needs to be disposed with
LLVMDisposeMessage. */
char* LLVMGetHostCPUName(void);
/** Get the host CPU's features as a string. The result needs to be disposed
with LLVMDisposeMessage. */
char* LLVMGetHostCPUFeatures(void);
/** Adds the target-specific analysis passes to the pass manager. */
void LLVMAddAnalysisPasses(LLVMTargetMachineRef T, LLVMPassManagerRef PM);

View File

@ -0,0 +1,43 @@
/*===-- Scalar.h - Scalar Transformation Library C Interface ----*- C++ -*-===*\
|* *|
|* The LLVM Compiler Infrastructure *|
|* *|
|* This file is distributed under the University of Illinois Open Source *|
|* License. See LICENSE.TXT for details. *|
|* *|
|*===----------------------------------------------------------------------===*|
|* *|
|* This header declares the C interface to libLLVMInstCombine.a, which *|
|* combines instructions to form fewer, simple IR instructions. *|
|* *|
\*===----------------------------------------------------------------------===*/
#ifndef LLVM_C_TRANSFORMS_INSTCOMBINE_H
#define LLVM_C_TRANSFORMS_INSTCOMBINE_H
#include "llvm-c/Types.h"
#ifdef __cplusplus
extern "C" {
#endif
/**
* @defgroup LLVMCTransformsInstCombine Instruction Combining transformations
* @ingroup LLVMCTransforms
*
* @{
*/
/** See llvm::createInstructionCombiningPass function. */
void LLVMAddInstructionCombiningPass(LLVMPassManagerRef PM);
/**
* @}
*/
#ifdef __cplusplus
}
#endif /* defined(__cplusplus) */
#endif

View File

@ -35,6 +35,9 @@ extern "C" {
/** See llvm::createAggressiveDCEPass function. */
void LLVMAddAggressiveDCEPass(LLVMPassManagerRef PM);
/** See llvm::createAggressiveInstCombinerPass function. */
void LLVMAddAggressiveInstCombinerPass(LLVMPassManagerRef PM);
/** See llvm::createBitTrackingDCEPass function. */
void LLVMAddBitTrackingDCEPass(LLVMPassManagerRef PM);
@ -86,6 +89,9 @@ void LLVMAddLoopRerollPass(LLVMPassManagerRef PM);
/** See llvm::createLoopUnrollPass function. */
void LLVMAddLoopUnrollPass(LLVMPassManagerRef PM);
/** See llvm::createLoopUnrollAndJamPass function. */
void LLVMAddLoopUnrollAndJamPass(LLVMPassManagerRef PM);
/** See llvm::createLoopUnswitchPass function. */
void LLVMAddLoopUnswitchPass(LLVMPassManagerRef PM);
@ -95,12 +101,6 @@ void LLVMAddMemCpyOptPass(LLVMPassManagerRef PM);
/** See llvm::createPartiallyInlineLibCallsPass function. */
void LLVMAddPartiallyInlineLibCallsPass(LLVMPassManagerRef PM);
/** See llvm::createLowerSwitchPass function. */
void LLVMAddLowerSwitchPass(LLVMPassManagerRef PM);
/** See llvm::createPromoteMemoryToRegisterPass function. */
void LLVMAddPromoteMemoryToRegisterPass(LLVMPassManagerRef PM);
/** See llvm::createReassociatePass function. */
void LLVMAddReassociatePass(LLVMPassManagerRef PM);

View File

@ -0,0 +1,50 @@
/*===-- Utils.h - Transformation Utils Library C Interface ------*- C++ -*-===*\
|* *|
|* The LLVM Compiler Infrastructure *|
|* *|
|* This file is distributed under the University of Illinois Open Source *|
|* License. See LICENSE.TXT for details. *|
|* *|
|*===----------------------------------------------------------------------===*|
|* *|
|* This header declares the C interface to libLLVMTransformUtils.a, which *|
|* implements various transformation utilities of the LLVM IR. *|
|* *|
|* Many exotic languages can interoperate with C code but have a harder time *|
|* with C++ due to name mangling. So in addition to C, this interface enables *|
|* tools written in such languages. *|
|* *|
\*===----------------------------------------------------------------------===*/
#ifndef LLVM_C_TRANSFORMS_UTILS_H
#define LLVM_C_TRANSFORMS_UTILS_H
#include "llvm-c/Types.h"
#ifdef __cplusplus
extern "C" {
#endif
/**
* @defgroup LLVMCTransformsUtils Transformation Utilities
* @ingroup LLVMCTransforms
*
* @{
*/
/** See llvm::createLowerSwitchPass function. */
void LLVMAddLowerSwitchPass(LLVMPassManagerRef PM);
/** See llvm::createPromoteMemoryToRegisterPass function. */
void LLVMAddPromoteMemoryToRegisterPass(LLVMPassManagerRef PM);
/**
* @}
*/
#ifdef __cplusplus
}
#endif /* defined(__cplusplus) */
#endif

View File

@ -33,9 +33,6 @@ extern "C" {
* @{
*/
/** DEPRECATED - Use LLVMAddSLPVectorizePass */
void LLVMAddBBVectorizePass(LLVMPassManagerRef PM);
/** See llvm::createLoopVectorizePass function. */
void LLVMAddLoopVectorizePass(LLVMPassManagerRef PM);

View File

@ -7,14 +7,14 @@
|* *|
|*===----------------------------------------------------------------------===*|
|* *|
|* This file defines types used by the the C interface to LLVM. *|
|* This file defines types used by the C interface to LLVM. *|
|* *|
\*===----------------------------------------------------------------------===*/
#ifndef LLVM_C_TYPES_H
#define LLVM_C_TYPES_H
#include "llvm/Support/DataTypes.h"
#include "llvm-c/DataTypes.h"
#ifdef __cplusplus
extern "C" {
@ -134,6 +134,21 @@ typedef struct LLVMOpaqueAttributeRef *LLVMAttributeRef;
*/
typedef struct LLVMOpaqueDiagnosticInfo *LLVMDiagnosticInfoRef;
/**
* @see llvm::Comdat
*/
typedef struct LLVMComdat *LLVMComdatRef;
/**
* @see llvm::Module::ModuleFlagEntry
*/
typedef struct LLVMOpaqueModuleFlagEntry LLVMModuleFlagEntry;
/**
* @see llvm::JITEventListener
*/
typedef struct LLVMOpaqueJITEventListener *LLVMJITEventListenerRef;
/**
* @}
*/

View File

@ -44,7 +44,7 @@ typedef bool lto_bool_t;
* @{
*/
#define LTO_API_VERSION 21
#define LTO_API_VERSION 22
/**
* \since prior to LTO_API_VERSION=3
@ -190,7 +190,7 @@ lto_module_create_from_memory_with_path(const void* mem, size_t length,
const char *path);
/**
* \brief Loads an object file in its own context.
* Loads an object file in its own context.
*
* Loads an object file in its own LLVMContext. This function call is
* thread-safe. However, modules created this way should not be merged into an
@ -205,7 +205,7 @@ lto_module_create_in_local_context(const void *mem, size_t length,
const char *path);
/**
* \brief Loads an object file in the codegen context.
* Loads an object file in the codegen context.
*
* Loads an object file into the same context as \c cg. The module is safe to
* add using \a lto_codegen_add_module().
@ -345,7 +345,7 @@ extern lto_code_gen_t
lto_codegen_create(void);
/**
* \brief Instantiate a code generator in its own context.
* Instantiate a code generator in its own context.
*
* Instantiates a code generator in its own context. Modules added via \a
* lto_codegen_add_module() must have all been created in the same context,
@ -539,7 +539,7 @@ lto_codegen_set_should_internalize(lto_code_gen_t cg,
lto_bool_t ShouldInternalize);
/**
* \brief Set whether to embed uselists in bitcode.
* Set whether to embed uselists in bitcode.
*
* Sets whether \a lto_codegen_write_merged_modules() should embed uselists in
* output bitcode. This should be turned on for all -save-temps output.
@ -784,7 +784,7 @@ extern void thinlto_codegen_set_cache_dir(thinlto_code_gen_t cg,
/**
* Sets the cache pruning interval (in seconds). A negative value disables the
* pruning. An unspecified default value will be applied, and a value of 0 will
* be ignored.
* force prunning to occur.
*
* \since LTO_API_VERSION=18
*/
@ -793,7 +793,7 @@ extern void thinlto_codegen_set_cache_pruning_interval(thinlto_code_gen_t cg,
/**
* Sets the maximum cache size that can be persistent across build, in terms of
* percentage of the available space on the the disk. Set to 100 to indicate
* percentage of the available space on the disk. Set to 100 to indicate
* no limit, 50 to indicate that the cache size will not be left over half the
* available space. A value over 100 will be reduced to 100, a value of 0 will
* be ignored. An unspecified default value will be applied.
@ -816,6 +816,28 @@ extern void thinlto_codegen_set_final_cache_size_relative_to_available_space(
extern void thinlto_codegen_set_cache_entry_expiration(thinlto_code_gen_t cg,
unsigned expiration);
/**
* Sets the maximum size of the cache directory (in bytes). A value over the
* amount of available space on the disk will be reduced to the amount of
* available space. An unspecified default value will be applied. A value of 0
* will be ignored.
*
* \since LTO_API_VERSION=22
*/
extern void thinlto_codegen_set_cache_size_bytes(thinlto_code_gen_t cg,
unsigned max_size_bytes);
/**
* Sets the maximum number of files in the cache directory. An unspecified
* default value will be applied. A value of 0 will be ignored.
*
* \since LTO_API_VERSION=22
*/
extern void thinlto_codegen_set_cache_size_files(thinlto_code_gen_t cg,
unsigned max_size_files);
/**
* @} // endgroup LLVMCTLTO_CACHING
*/

View File

@ -1215,7 +1215,7 @@ inline APFloat abs(APFloat X) {
return X;
}
/// \brief Returns the negated value of the argument.
/// Returns the negated value of the argument.
inline APFloat neg(APFloat X) {
X.changeSign();
return X;

File diff suppressed because it is too large Load Diff

View File

@ -72,7 +72,7 @@ public:
}
using APInt::toString;
/// \brief Get the correctly-extended \c int64_t value.
/// Get the correctly-extended \c int64_t value.
int64_t getExtValue() const {
assert(getMinSignedBits() <= 64 && "Too many bits for int64_t");
return isSigned() ? getSExtValue() : getZExtValue();
@ -279,13 +279,13 @@ public:
: APInt::getSignedMinValue(numBits), Unsigned);
}
/// \brief Determine if two APSInts have the same value, zero- or
/// Determine if two APSInts have the same value, zero- or
/// sign-extending as needed.
static bool isSameValue(const APSInt &I1, const APSInt &I2) {
return !compareValues(I1, I2);
}
/// \brief Compare underlying values of two numbers.
/// Compare underlying values of two numbers.
static int compareValues(const APSInt &I1, const APSInt &I2) {
if (I1.getBitWidth() == I2.getBitWidth() && I1.isSigned() == I2.isSigned())
return I1.IsUnsigned ? I1.compare(I2) : I1.compareSigned(I2);

View File

@ -0,0 +1,150 @@
//===- Any.h - Generic type erased holder of any type -----------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file provides Any, a non-template class modeled in the spirit of
// std::any. The idea is to provide a type-safe replacement for C's void*.
// It can hold a value of any copy-constructible copy-assignable type
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_ADT_ANY_H
#define LLVM_ADT_ANY_H
#include "llvm/ADT/STLExtras.h"
#include <cassert>
#include <memory>
#include <type_traits>
namespace llvm {
class Any {
template <typename T> struct TypeId { static const char Id; };
struct StorageBase {
virtual ~StorageBase() = default;
virtual std::unique_ptr<StorageBase> clone() const = 0;
virtual const void *id() const = 0;
};
template <typename T> struct StorageImpl : public StorageBase {
explicit StorageImpl(const T &Value) : Value(Value) {}
explicit StorageImpl(T &&Value) : Value(std::move(Value)) {}
std::unique_ptr<StorageBase> clone() const override {
return llvm::make_unique<StorageImpl<T>>(Value);
}
const void *id() const override { return &TypeId<T>::Id; }
T Value;
private:
StorageImpl &operator=(const StorageImpl &Other) = delete;
StorageImpl(const StorageImpl &Other) = delete;
};
public:
Any() = default;
Any(const Any &Other)
: Storage(Other.Storage ? Other.Storage->clone() : nullptr) {}
// When T is Any or T is not copy-constructible we need to explicitly disable
// the forwarding constructor so that the copy constructor gets selected
// instead.
template <
typename T,
typename std::enable_if<
llvm::conjunction<
llvm::negation<std::is_same<typename std::decay<T>::type, Any>>,
std::is_copy_constructible<typename std::decay<T>::type>>::value,
int>::type = 0>
Any(T &&Value) {
using U = typename std::decay<T>::type;
Storage = llvm::make_unique<StorageImpl<U>>(std::forward<T>(Value));
}
Any(Any &&Other) : Storage(std::move(Other.Storage)) {}
Any &swap(Any &Other) {
std::swap(Storage, Other.Storage);
return *this;
}
Any &operator=(Any Other) {
Storage = std::move(Other.Storage);
return *this;
}
bool hasValue() const { return !!Storage; }
void reset() { Storage.reset(); }
private:
template <class T> friend T any_cast(const Any &Value);
template <class T> friend T any_cast(Any &Value);
template <class T> friend T any_cast(Any &&Value);
template <class T> friend const T *any_cast(const Any *Value);
template <class T> friend T *any_cast(Any *Value);
template <typename T> friend bool any_isa(const Any &Value);
std::unique_ptr<StorageBase> Storage;
};
template <typename T> const char Any::TypeId<T>::Id = 0;
template <typename T> bool any_isa(const Any &Value) {
if (!Value.Storage)
return false;
using U =
typename std::remove_cv<typename std::remove_reference<T>::type>::type;
return Value.Storage->id() == &Any::TypeId<U>::Id;
}
template <class T> T any_cast(const Any &Value) {
using U =
typename std::remove_cv<typename std::remove_reference<T>::type>::type;
return static_cast<T>(*any_cast<U>(&Value));
}
template <class T> T any_cast(Any &Value) {
using U =
typename std::remove_cv<typename std::remove_reference<T>::type>::type;
return static_cast<T>(*any_cast<U>(&Value));
}
template <class T> T any_cast(Any &&Value) {
using U =
typename std::remove_cv<typename std::remove_reference<T>::type>::type;
return static_cast<T>(std::move(*any_cast<U>(&Value)));
}
template <class T> const T *any_cast(const Any *Value) {
using U =
typename std::remove_cv<typename std::remove_reference<T>::type>::type;
assert(Value && any_isa<T>(*Value) && "Bad any cast!");
if (!Value || !any_isa<U>(*Value))
return nullptr;
return &static_cast<Any::StorageImpl<U> &>(*Value->Storage).Value;
}
template <class T> T *any_cast(Any *Value) {
using U = typename std::decay<T>::type;
assert(Value && any_isa<U>(*Value) && "Bad any cast!");
if (!Value || !any_isa<U>(*Value))
return nullptr;
return &static_cast<Any::StorageImpl<U> &>(*Value->Storage).Value;
}
} // end namespace llvm
#endif // LLVM_ADT_ANY_H

View File

@ -184,51 +184,51 @@ namespace llvm {
/// slice(n) - Chop off the first N elements of the array.
ArrayRef<T> slice(size_t N) const { return slice(N, size() - N); }
/// \brief Drop the first \p N elements of the array.
/// Drop the first \p N elements of the array.
ArrayRef<T> drop_front(size_t N = 1) const {
assert(size() >= N && "Dropping more elements than exist");
return slice(N, size() - N);
}
/// \brief Drop the last \p N elements of the array.
/// Drop the last \p N elements of the array.
ArrayRef<T> drop_back(size_t N = 1) const {
assert(size() >= N && "Dropping more elements than exist");
return slice(0, size() - N);
}
/// \brief Return a copy of *this with the first N elements satisfying the
/// Return a copy of *this with the first N elements satisfying the
/// given predicate removed.
template <class PredicateT> ArrayRef<T> drop_while(PredicateT Pred) const {
return ArrayRef<T>(find_if_not(*this, Pred), end());
}
/// \brief Return a copy of *this with the first N elements not satisfying
/// Return a copy of *this with the first N elements not satisfying
/// the given predicate removed.
template <class PredicateT> ArrayRef<T> drop_until(PredicateT Pred) const {
return ArrayRef<T>(find_if(*this, Pred), end());
}
/// \brief Return a copy of *this with only the first \p N elements.
/// Return a copy of *this with only the first \p N elements.
ArrayRef<T> take_front(size_t N = 1) const {
if (N >= size())
return *this;
return drop_back(size() - N);
}
/// \brief Return a copy of *this with only the last \p N elements.
/// Return a copy of *this with only the last \p N elements.
ArrayRef<T> take_back(size_t N = 1) const {
if (N >= size())
return *this;
return drop_front(size() - N);
}
/// \brief Return the first N elements of this Array that satisfy the given
/// Return the first N elements of this Array that satisfy the given
/// predicate.
template <class PredicateT> ArrayRef<T> take_while(PredicateT Pred) const {
return ArrayRef<T>(begin(), find_if_not(*this, Pred));
}
/// \brief Return the first N elements of this Array that don't satisfy the
/// Return the first N elements of this Array that don't satisfy the
/// given predicate.
template <class PredicateT> ArrayRef<T> take_until(PredicateT Pred) const {
return ArrayRef<T>(begin(), find_if(*this, Pred));
@ -358,7 +358,7 @@ namespace llvm {
return slice(N, this->size() - N);
}
/// \brief Drop the first \p N elements of the array.
/// Drop the first \p N elements of the array.
MutableArrayRef<T> drop_front(size_t N = 1) const {
assert(this->size() >= N && "Dropping more elements than exist");
return slice(N, this->size() - N);
@ -369,42 +369,42 @@ namespace llvm {
return slice(0, this->size() - N);
}
/// \brief Return a copy of *this with the first N elements satisfying the
/// Return a copy of *this with the first N elements satisfying the
/// given predicate removed.
template <class PredicateT>
MutableArrayRef<T> drop_while(PredicateT Pred) const {
return MutableArrayRef<T>(find_if_not(*this, Pred), end());
}
/// \brief Return a copy of *this with the first N elements not satisfying
/// Return a copy of *this with the first N elements not satisfying
/// the given predicate removed.
template <class PredicateT>
MutableArrayRef<T> drop_until(PredicateT Pred) const {
return MutableArrayRef<T>(find_if(*this, Pred), end());
}
/// \brief Return a copy of *this with only the first \p N elements.
/// Return a copy of *this with only the first \p N elements.
MutableArrayRef<T> take_front(size_t N = 1) const {
if (N >= this->size())
return *this;
return drop_back(this->size() - N);
}
/// \brief Return a copy of *this with only the last \p N elements.
/// Return a copy of *this with only the last \p N elements.
MutableArrayRef<T> take_back(size_t N = 1) const {
if (N >= this->size())
return *this;
return drop_front(this->size() - N);
}
/// \brief Return the first N elements of this Array that satisfy the given
/// Return the first N elements of this Array that satisfy the given
/// predicate.
template <class PredicateT>
MutableArrayRef<T> take_while(PredicateT Pred) const {
return MutableArrayRef<T>(begin(), find_if_not(*this, Pred));
}
/// \brief Return the first N elements of this Array that don't satisfy the
/// Return the first N elements of this Array that don't satisfy the
/// given predicate.
template <class PredicateT>
MutableArrayRef<T> take_until(PredicateT Pred) const {

View File

@ -779,7 +779,7 @@ public:
}
private:
/// \brief Perform a logical left shift of \p Count words by moving everything
/// Perform a logical left shift of \p Count words by moving everything
/// \p Count words to the right in memory.
///
/// While confusing, words are stored from least significant at Bits[0] to
@ -810,7 +810,7 @@ private:
clear_unused_bits();
}
/// \brief Perform a logical right shift of \p Count words by moving those
/// Perform a logical right shift of \p Count words by moving those
/// words to the left in memory. See wordShl for more information.
///
void wordShr(uint32_t Count) {
@ -828,7 +828,8 @@ private:
}
MutableArrayRef<BitWord> allocate(size_t NumWords) {
BitWord *RawBits = (BitWord *)std::malloc(NumWords * sizeof(BitWord));
BitWord *RawBits = static_cast<BitWord *>(
safe_malloc(NumWords * sizeof(BitWord)));
return MutableArrayRef<BitWord>(RawBits, NumWords);
}
@ -867,8 +868,8 @@ private:
void grow(unsigned NewSize) {
size_t NewCapacity = std::max<size_t>(NumBitWords(NewSize), Bits.size() * 2);
assert(NewCapacity > 0 && "realloc-ing zero space");
BitWord *NewBits =
(BitWord *)std::realloc(Bits.data(), NewCapacity * sizeof(BitWord));
BitWord *NewBits = static_cast<BitWord *>(
safe_realloc(Bits.data(), NewCapacity * sizeof(BitWord)));
Bits = MutableArrayRef<BitWord>(NewBits, NewCapacity);
clear_unused_bits();
}

View File

@ -43,6 +43,7 @@ public:
}
StringRef val() const { return StringRef(P, Size); }
const char *data() const { return P; }
uint32_t size() const { return Size; }
uint32_t hash() const { return Hash; }
};

View File

@ -262,6 +262,13 @@ template <typename T> struct DenseMapInfo<ArrayRef<T>> {
}
};
template <> struct DenseMapInfo<hash_code> {
static inline hash_code getEmptyKey() { return hash_code(-1); }
static inline hash_code getTombstoneKey() { return hash_code(-2); }
static unsigned getHashValue(hash_code val) { return val; }
static bool isEqual(hash_code LHS, hash_code RHS) { return LHS == RHS; }
};
} // end namespace llvm
#endif // LLVM_ADT_DENSEMAPINFO_H

View File

@ -177,7 +177,7 @@ public:
return *this;
}
/// \brief Skips all children of the current node and traverses to next node
/// Skips all children of the current node and traverses to next node
///
/// Note: This function takes care of incrementing the iterator. If you
/// always increment and call this function, you risk walking off the end.

View File

@ -17,7 +17,6 @@
#define LLVM_ADT_EPOCH_TRACKER_H
#include "llvm/Config/abi-breaking.h"
#include "llvm/Config/llvm-config.h"
#include <cstdint>
@ -25,7 +24,7 @@ namespace llvm {
#if LLVM_ENABLE_ABI_BREAKING_CHECKS
/// \brief A base class for data structure classes wishing to make iterators
/// A base class for data structure classes wishing to make iterators
/// ("handles") pointing into themselves fail-fast. When building without
/// asserts, this class is empty and does nothing.
///
@ -40,15 +39,15 @@ class DebugEpochBase {
public:
DebugEpochBase() : Epoch(0) {}
/// \brief Calling incrementEpoch invalidates all handles pointing into the
/// Calling incrementEpoch invalidates all handles pointing into the
/// calling instance.
void incrementEpoch() { ++Epoch; }
/// \brief The destructor calls incrementEpoch to make use-after-free bugs
/// The destructor calls incrementEpoch to make use-after-free bugs
/// more likely to crash deterministically.
~DebugEpochBase() { incrementEpoch(); }
/// \brief A base class for iterator classes ("handles") that wish to poll for
/// A base class for iterator classes ("handles") that wish to poll for
/// iterator invalidating modifications in the underlying data structure.
/// When LLVM is built without asserts, this class is empty and does nothing.
///
@ -66,12 +65,12 @@ public:
explicit HandleBase(const DebugEpochBase *Parent)
: EpochAddress(&Parent->Epoch), EpochAtCreation(Parent->Epoch) {}
/// \brief Returns true if the DebugEpochBase this Handle is linked to has
/// Returns true if the DebugEpochBase this Handle is linked to has
/// not called incrementEpoch on itself since the creation of this
/// HandleBase instance.
bool isHandleInSync() const { return *EpochAddress == EpochAtCreation; }
/// \brief Returns a pointer to the epoch word stored in the data structure
/// Returns a pointer to the epoch word stored in the data structure
/// this handle points into. Can be used to check if two iterators point
/// into the same data structure.
const void *getEpochAddress() const { return EpochAddress; }

View File

@ -0,0 +1,293 @@
//===- FunctionExtras.h - Function type erasure utilities -------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
/// \file
/// This file provides a collection of function (or more generally, callable)
/// type erasure utilities supplementing those provided by the standard library
/// in `<function>`.
///
/// It provides `unique_function`, which works like `std::function` but supports
/// move-only callable objects.
///
/// Future plans:
/// - Add a `function` that provides const, volatile, and ref-qualified support,
/// which doesn't work with `std::function`.
/// - Provide support for specifying multiple signatures to type erase callable
/// objects with an overload set, such as those produced by generic lambdas.
/// - Expand to include a copyable utility that directly replaces std::function
/// but brings the above improvements.
///
/// Note that LLVM's utilities are greatly simplified by not supporting
/// allocators.
///
/// If the standard library ever begins to provide comparable facilities we can
/// consider switching to those.
///
//===----------------------------------------------------------------------===//
#ifndef LLVM_ADT_FUNCTION_EXTRAS_H
#define LLVM_ADT_FUNCTION_EXTRAS_H
#include "llvm/ADT/PointerIntPair.h"
#include "llvm/ADT/PointerUnion.h"
#include "llvm/Support/type_traits.h"
#include <memory>
namespace llvm {
template <typename FunctionT> class unique_function;
template <typename ReturnT, typename... ParamTs>
class unique_function<ReturnT(ParamTs...)> {
static constexpr size_t InlineStorageSize = sizeof(void *) * 3;
// MSVC has a bug and ICEs if we give it a particular dependent value
// expression as part of the `std::conditional` below. To work around this,
// we build that into a template struct's constexpr bool.
template <typename T> struct IsSizeLessThanThresholdT {
static constexpr bool value = sizeof(T) <= (2 * sizeof(void *));
};
// Provide a type function to map parameters that won't observe extra copies
// or moves and which are small enough to likely pass in register to values
// and all other types to l-value reference types. We use this to compute the
// types used in our erased call utility to minimize copies and moves unless
// doing so would force things unnecessarily into memory.
//
// The heuristic used is related to common ABI register passing conventions.
// It doesn't have to be exact though, and in one way it is more strict
// because we want to still be able to observe either moves *or* copies.
template <typename T>
using AdjustedParamT = typename std::conditional<
!std::is_reference<T>::value &&
llvm::is_trivially_copy_constructible<T>::value &&
llvm::is_trivially_move_constructible<T>::value &&
IsSizeLessThanThresholdT<T>::value,
T, T &>::type;
// The type of the erased function pointer we use as a callback to dispatch to
// the stored callable when it is trivial to move and destroy.
using CallPtrT = ReturnT (*)(void *CallableAddr,
AdjustedParamT<ParamTs>... Params);
using MovePtrT = void (*)(void *LHSCallableAddr, void *RHSCallableAddr);
using DestroyPtrT = void (*)(void *CallableAddr);
/// A struct to hold a single trivial callback with sufficient alignment for
/// our bitpacking.
struct alignas(8) TrivialCallback {
CallPtrT CallPtr;
};
/// A struct we use to aggregate three callbacks when we need full set of
/// operations.
struct alignas(8) NonTrivialCallbacks {
CallPtrT CallPtr;
MovePtrT MovePtr;
DestroyPtrT DestroyPtr;
};
// Create a pointer union between either a pointer to a static trivial call
// pointer in a struct or a pointer to a static struct of the call, move, and
// destroy pointers.
using CallbackPointerUnionT =
PointerUnion<TrivialCallback *, NonTrivialCallbacks *>;
// The main storage buffer. This will either have a pointer to out-of-line
// storage or an inline buffer storing the callable.
union StorageUnionT {
// For out-of-line storage we keep a pointer to the underlying storage and
// the size. This is enough to deallocate the memory.
struct OutOfLineStorageT {
void *StoragePtr;
size_t Size;
size_t Alignment;
} OutOfLineStorage;
static_assert(
sizeof(OutOfLineStorageT) <= InlineStorageSize,
"Should always use all of the out-of-line storage for inline storage!");
// For in-line storage, we just provide an aligned character buffer. We
// provide three pointers worth of storage here.
typename std::aligned_storage<InlineStorageSize, alignof(void *)>::type
InlineStorage;
} StorageUnion;
// A compressed pointer to either our dispatching callback or our table of
// dispatching callbacks and the flag for whether the callable itself is
// stored inline or not.
PointerIntPair<CallbackPointerUnionT, 1, bool> CallbackAndInlineFlag;
bool isInlineStorage() const { return CallbackAndInlineFlag.getInt(); }
bool isTrivialCallback() const {
return CallbackAndInlineFlag.getPointer().template is<TrivialCallback *>();
}
CallPtrT getTrivialCallback() const {
return CallbackAndInlineFlag.getPointer().template get<TrivialCallback *>()->CallPtr;
}
NonTrivialCallbacks *getNonTrivialCallbacks() const {
return CallbackAndInlineFlag.getPointer()
.template get<NonTrivialCallbacks *>();
}
void *getInlineStorage() { return &StorageUnion.InlineStorage; }
void *getOutOfLineStorage() {
return StorageUnion.OutOfLineStorage.StoragePtr;
}
size_t getOutOfLineStorageSize() const {
return StorageUnion.OutOfLineStorage.Size;
}
size_t getOutOfLineStorageAlignment() const {
return StorageUnion.OutOfLineStorage.Alignment;
}
void setOutOfLineStorage(void *Ptr, size_t Size, size_t Alignment) {
StorageUnion.OutOfLineStorage = {Ptr, Size, Alignment};
}
template <typename CallableT>
static ReturnT CallImpl(void *CallableAddr, AdjustedParamT<ParamTs>... Params) {
return (*reinterpret_cast<CallableT *>(CallableAddr))(
std::forward<ParamTs>(Params)...);
}
template <typename CallableT>
static void MoveImpl(void *LHSCallableAddr, void *RHSCallableAddr) noexcept {
new (LHSCallableAddr)
CallableT(std::move(*reinterpret_cast<CallableT *>(RHSCallableAddr)));
}
template <typename CallableT>
static void DestroyImpl(void *CallableAddr) noexcept {
reinterpret_cast<CallableT *>(CallableAddr)->~CallableT();
}
public:
unique_function() = default;
unique_function(std::nullptr_t /*null_callable*/) {}
~unique_function() {
if (!CallbackAndInlineFlag.getPointer())
return;
// Cache this value so we don't re-check it after type-erased operations.
bool IsInlineStorage = isInlineStorage();
if (!isTrivialCallback())
getNonTrivialCallbacks()->DestroyPtr(
IsInlineStorage ? getInlineStorage() : getOutOfLineStorage());
if (!IsInlineStorage)
deallocate_buffer(getOutOfLineStorage(), getOutOfLineStorageSize(),
getOutOfLineStorageAlignment());
}
unique_function(unique_function &&RHS) noexcept {
// Copy the callback and inline flag.
CallbackAndInlineFlag = RHS.CallbackAndInlineFlag;
// If the RHS is empty, just copying the above is sufficient.
if (!RHS)
return;
if (!isInlineStorage()) {
// The out-of-line case is easiest to move.
StorageUnion.OutOfLineStorage = RHS.StorageUnion.OutOfLineStorage;
} else if (isTrivialCallback()) {
// Move is trivial, just memcpy the bytes across.
memcpy(getInlineStorage(), RHS.getInlineStorage(), InlineStorageSize);
} else {
// Non-trivial move, so dispatch to a type-erased implementation.
getNonTrivialCallbacks()->MovePtr(getInlineStorage(),
RHS.getInlineStorage());
}
// Clear the old callback and inline flag to get back to as-if-null.
RHS.CallbackAndInlineFlag = {};
#ifndef NDEBUG
// In debug builds, we also scribble across the rest of the storage.
memset(RHS.getInlineStorage(), 0xAD, InlineStorageSize);
#endif
}
unique_function &operator=(unique_function &&RHS) noexcept {
if (this == &RHS)
return *this;
// Because we don't try to provide any exception safety guarantees we can
// implement move assignment very simply by first destroying the current
// object and then move-constructing over top of it.
this->~unique_function();
new (this) unique_function(std::move(RHS));
return *this;
}
template <typename CallableT> unique_function(CallableT Callable) {
bool IsInlineStorage = true;
void *CallableAddr = getInlineStorage();
if (sizeof(CallableT) > InlineStorageSize ||
alignof(CallableT) > alignof(decltype(StorageUnion.InlineStorage))) {
IsInlineStorage = false;
// Allocate out-of-line storage. FIXME: Use an explicit alignment
// parameter in C++17 mode.
auto Size = sizeof(CallableT);
auto Alignment = alignof(CallableT);
CallableAddr = allocate_buffer(Size, Alignment);
setOutOfLineStorage(CallableAddr, Size, Alignment);
}
// Now move into the storage.
new (CallableAddr) CallableT(std::move(Callable));
// See if we can create a trivial callback. We need the callable to be
// trivially moved and trivially destroyed so that we don't have to store
// type erased callbacks for those operations.
//
// FIXME: We should use constexpr if here and below to avoid instantiating
// the non-trivial static objects when unnecessary. While the linker should
// remove them, it is still wasteful.
if (llvm::is_trivially_move_constructible<CallableT>::value &&
std::is_trivially_destructible<CallableT>::value) {
// We need to create a nicely aligned object. We use a static variable
// for this because it is a trivial struct.
static TrivialCallback Callback = { &CallImpl<CallableT> };
CallbackAndInlineFlag = {&Callback, IsInlineStorage};
return;
}
// Otherwise, we need to point at an object that contains all the different
// type erased behaviors needed. Create a static instance of the struct type
// here and then use a pointer to that.
static NonTrivialCallbacks Callbacks = {
&CallImpl<CallableT>, &MoveImpl<CallableT>, &DestroyImpl<CallableT>};
CallbackAndInlineFlag = {&Callbacks, IsInlineStorage};
}
ReturnT operator()(ParamTs... Params) {
void *CallableAddr =
isInlineStorage() ? getInlineStorage() : getOutOfLineStorage();
return (isTrivialCallback()
? getTrivialCallback()
: getNonTrivialCallbacks()->CallPtr)(CallableAddr, Params...);
}
explicit operator bool() const {
return (bool)CallbackAndInlineFlag.getPointer();
}
};
} // end namespace llvm
#endif // LLVM_ADT_FUNCTION_H

View File

@ -47,6 +47,19 @@ struct GraphTraits {
// static nodes_iterator nodes_end (GraphType *G)
// nodes_iterator/begin/end - Allow iteration over all nodes in the graph
// typedef EdgeRef - Type of Edge token in the graph, which should
// be cheap to copy.
// typedef ChildEdgeIteratorType - Type used to iterate over children edges in
// graph, dereference to a EdgeRef.
// static ChildEdgeIteratorType child_edge_begin(NodeRef)
// static ChildEdgeIteratorType child_edge_end(NodeRef)
// Return iterators that point to the beginning and ending of the
// edge list for the given callgraph node.
//
// static NodeRef edge_dest(EdgeRef)
// Return the destination node of an edge.
// static unsigned size (GraphType *G)
// Return total number of nodes in the graph
@ -111,6 +124,13 @@ inverse_children(const typename GraphTraits<GraphType>::NodeRef &G) {
GraphTraits<Inverse<GraphType>>::child_end(G));
}
template <class GraphType>
iterator_range<typename GraphTraits<GraphType>::ChildEdgeIteratorType>
children_edges(const typename GraphTraits<GraphType>::NodeRef &G) {
return make_range(GraphTraits<GraphType>::child_edge_begin(G),
GraphTraits<GraphType>::child_edge_end(G));
}
} // end namespace llvm
#endif // LLVM_ADT_GRAPHTRAITS_H

View File

@ -57,7 +57,7 @@
namespace llvm {
/// \brief An opaque object representing a hash code.
/// An opaque object representing a hash code.
///
/// This object represents the result of hashing some entity. It is intended to
/// be used to implement hashtables or other hashing-based data structures.
@ -73,14 +73,14 @@ class hash_code {
size_t value;
public:
/// \brief Default construct a hash_code.
/// Default construct a hash_code.
/// Note that this leaves the value uninitialized.
hash_code() = default;
/// \brief Form a hash code directly from a numerical value.
/// Form a hash code directly from a numerical value.
hash_code(size_t value) : value(value) {}
/// \brief Convert the hash code to its numerical value for use.
/// Convert the hash code to its numerical value for use.
/*explicit*/ operator size_t() const { return value; }
friend bool operator==(const hash_code &lhs, const hash_code &rhs) {
@ -90,11 +90,11 @@ public:
return lhs.value != rhs.value;
}
/// \brief Allow a hash_code to be directly run through hash_value.
/// Allow a hash_code to be directly run through hash_value.
friend size_t hash_value(const hash_code &code) { return code.value; }
};
/// \brief Compute a hash_code for any integer value.
/// Compute a hash_code for any integer value.
///
/// Note that this function is intended to compute the same hash_code for
/// a particular value without regard to the pre-promotion type. This is in
@ -105,21 +105,21 @@ template <typename T>
typename std::enable_if<is_integral_or_enum<T>::value, hash_code>::type
hash_value(T value);
/// \brief Compute a hash_code for a pointer's address.
/// Compute a hash_code for a pointer's address.
///
/// N.B.: This hashes the *address*. Not the value and not the type.
template <typename T> hash_code hash_value(const T *ptr);
/// \brief Compute a hash_code for a pair of objects.
/// Compute a hash_code for a pair of objects.
template <typename T, typename U>
hash_code hash_value(const std::pair<T, U> &arg);
/// \brief Compute a hash_code for a standard string.
/// Compute a hash_code for a standard string.
template <typename T>
hash_code hash_value(const std::basic_string<T> &arg);
/// \brief Override the execution seed with a fixed value.
/// Override the execution seed with a fixed value.
///
/// This hashing library uses a per-execution seed designed to change on each
/// run with high probability in order to ensure that the hash codes are not
@ -164,7 +164,7 @@ static const uint64_t k1 = 0xb492b66fbe98f273ULL;
static const uint64_t k2 = 0x9ae16a3b2f90404fULL;
static const uint64_t k3 = 0xc949d7c7509e6557ULL;
/// \brief Bitwise right rotate.
/// Bitwise right rotate.
/// Normally this will compile to a single instruction, especially if the
/// shift is a manifest constant.
inline uint64_t rotate(uint64_t val, size_t shift) {
@ -254,13 +254,13 @@ inline uint64_t hash_short(const char *s, size_t length, uint64_t seed) {
return k2 ^ seed;
}
/// \brief The intermediate state used during hashing.
/// The intermediate state used during hashing.
/// Currently, the algorithm for computing hash codes is based on CityHash and
/// keeps 56 bytes of arbitrary state.
struct hash_state {
uint64_t h0, h1, h2, h3, h4, h5, h6;
/// \brief Create a new hash_state structure and initialize it based on the
/// Create a new hash_state structure and initialize it based on the
/// seed and the first 64-byte chunk.
/// This effectively performs the initial mix.
static hash_state create(const char *s, uint64_t seed) {
@ -272,7 +272,7 @@ struct hash_state {
return state;
}
/// \brief Mix 32-bytes from the input sequence into the 16-bytes of 'a'
/// Mix 32-bytes from the input sequence into the 16-bytes of 'a'
/// and 'b', including whatever is already in 'a' and 'b'.
static void mix_32_bytes(const char *s, uint64_t &a, uint64_t &b) {
a += fetch64(s);
@ -284,7 +284,7 @@ struct hash_state {
a += c;
}
/// \brief Mix in a 64-byte buffer of data.
/// Mix in a 64-byte buffer of data.
/// We mix all 64 bytes even when the chunk length is smaller, but we
/// record the actual length.
void mix(const char *s) {
@ -302,7 +302,7 @@ struct hash_state {
std::swap(h2, h0);
}
/// \brief Compute the final 64-bit hash code value based on the current
/// Compute the final 64-bit hash code value based on the current
/// state and the length of bytes hashed.
uint64_t finalize(size_t length) {
return hash_16_bytes(hash_16_bytes(h3, h5) + shift_mix(h1) * k1 + h2,
@ -311,7 +311,7 @@ struct hash_state {
};
/// \brief A global, fixed seed-override variable.
/// A global, fixed seed-override variable.
///
/// This variable can be set using the \see llvm::set_fixed_execution_seed
/// function. See that function for details. Do not, under any circumstances,
@ -332,7 +332,7 @@ inline size_t get_execution_seed() {
}
/// \brief Trait to indicate whether a type's bits can be hashed directly.
/// Trait to indicate whether a type's bits can be hashed directly.
///
/// A type trait which is true if we want to combine values for hashing by
/// reading the underlying data. It is false if values of this type must
@ -359,14 +359,14 @@ template <typename T, typename U> struct is_hashable_data<std::pair<T, U> >
(sizeof(T) + sizeof(U)) ==
sizeof(std::pair<T, U>))> {};
/// \brief Helper to get the hashable data representation for a type.
/// Helper to get the hashable data representation for a type.
/// This variant is enabled when the type itself can be used.
template <typename T>
typename std::enable_if<is_hashable_data<T>::value, T>::type
get_hashable_data(const T &value) {
return value;
}
/// \brief Helper to get the hashable data representation for a type.
/// Helper to get the hashable data representation for a type.
/// This variant is enabled when we must first call hash_value and use the
/// result as our data.
template <typename T>
@ -376,7 +376,7 @@ get_hashable_data(const T &value) {
return hash_value(value);
}
/// \brief Helper to store data from a value into a buffer and advance the
/// Helper to store data from a value into a buffer and advance the
/// pointer into that buffer.
///
/// This routine first checks whether there is enough space in the provided
@ -395,7 +395,7 @@ bool store_and_advance(char *&buffer_ptr, char *buffer_end, const T& value,
return true;
}
/// \brief Implement the combining of integral values into a hash_code.
/// Implement the combining of integral values into a hash_code.
///
/// This overload is selected when the value type of the iterator is
/// integral. Rather than computing a hash_code for each object and then
@ -435,7 +435,7 @@ hash_code hash_combine_range_impl(InputIteratorT first, InputIteratorT last) {
return state.finalize(length);
}
/// \brief Implement the combining of integral values into a hash_code.
/// Implement the combining of integral values into a hash_code.
///
/// This overload is selected when the value type of the iterator is integral
/// and when the input iterator is actually a pointer. Rather than computing
@ -470,7 +470,7 @@ hash_combine_range_impl(ValueT *first, ValueT *last) {
} // namespace hashing
/// \brief Compute a hash_code for a sequence of values.
/// Compute a hash_code for a sequence of values.
///
/// This hashes a sequence of values. It produces the same hash_code as
/// 'hash_combine(a, b, c, ...)', but can run over arbitrary sized sequences
@ -486,7 +486,7 @@ hash_code hash_combine_range(InputIteratorT first, InputIteratorT last) {
namespace hashing {
namespace detail {
/// \brief Helper class to manage the recursive combining of hash_combine
/// Helper class to manage the recursive combining of hash_combine
/// arguments.
///
/// This class exists to manage the state and various calls involved in the
@ -499,14 +499,14 @@ struct hash_combine_recursive_helper {
const size_t seed;
public:
/// \brief Construct a recursive hash combining helper.
/// Construct a recursive hash combining helper.
///
/// This sets up the state for a recursive hash combine, including getting
/// the seed and buffer setup.
hash_combine_recursive_helper()
: seed(get_execution_seed()) {}
/// \brief Combine one chunk of data into the current in-flight hash.
/// Combine one chunk of data into the current in-flight hash.
///
/// This merges one chunk of data into the hash. First it tries to buffer
/// the data. If the buffer is full, it hashes the buffer into its
@ -547,7 +547,7 @@ public:
return buffer_ptr;
}
/// \brief Recursive, variadic combining method.
/// Recursive, variadic combining method.
///
/// This function recurses through each argument, combining that argument
/// into a single hash.
@ -560,7 +560,7 @@ public:
return combine(length, buffer_ptr, buffer_end, args...);
}
/// \brief Base case for recursive, variadic combining.
/// Base case for recursive, variadic combining.
///
/// The base case when combining arguments recursively is reached when all
/// arguments have been handled. It flushes the remaining buffer and
@ -588,7 +588,7 @@ public:
} // namespace detail
} // namespace hashing
/// \brief Combine values into a single hash_code.
/// Combine values into a single hash_code.
///
/// This routine accepts a varying number of arguments of any type. It will
/// attempt to combine them into a single hash_code. For user-defined types it
@ -610,7 +610,7 @@ template <typename ...Ts> hash_code hash_combine(const Ts &...args) {
namespace hashing {
namespace detail {
/// \brief Helper to hash the value of a single integer.
/// Helper to hash the value of a single integer.
///
/// Overloads for smaller integer types are not provided to ensure consistent
/// behavior in the presence of integral promotions. Essentially,

View File

@ -166,7 +166,7 @@ public:
if (ownsAllocator()) delete &getAllocator();
}
ImmutableList<T> concat(const T& Head, ImmutableList<T> Tail) {
LLVM_NODISCARD ImmutableList<T> concat(const T &Head, ImmutableList<T> Tail) {
// Profile the new list to see if it already exists in our cache.
FoldingSetNodeID ID;
void* InsertPos;
@ -188,7 +188,7 @@ public:
return L;
}
ImmutableList<T> add(const T& D, ImmutableList<T> L) {
LLVM_NODISCARD ImmutableList<T> add(const T& D, ImmutableList<T> L) {
return concat(D, L);
}

View File

@ -114,12 +114,13 @@ public:
ImmutableMap getEmptyMap() { return ImmutableMap(F.getEmptyTree()); }
ImmutableMap add(ImmutableMap Old, key_type_ref K, data_type_ref D) {
LLVM_NODISCARD ImmutableMap add(ImmutableMap Old, key_type_ref K,
data_type_ref D) {
TreeTy *T = F.add(Old.Root, std::pair<key_type,data_type>(K,D));
return ImmutableMap(Canonicalize ? F.getCanonicalTree(T): T);
}
ImmutableMap remove(ImmutableMap Old, key_type_ref K) {
LLVM_NODISCARD ImmutableMap remove(ImmutableMap Old, key_type_ref K) {
TreeTy *T = F.remove(Old.Root,K);
return ImmutableMap(Canonicalize ? F.getCanonicalTree(T): T);
}

View File

@ -1017,7 +1017,7 @@ public:
/// of this operation is logarithmic in the size of the original set.
/// The memory allocated to represent the set is released when the
/// factory object that created the set is destroyed.
ImmutableSet add(ImmutableSet Old, value_type_ref V) {
LLVM_NODISCARD ImmutableSet add(ImmutableSet Old, value_type_ref V) {
TreeTy *NewT = F.add(Old.Root, V);
return ImmutableSet(Canonicalize ? F.getCanonicalTree(NewT) : NewT);
}
@ -1029,7 +1029,7 @@ public:
/// of this operation is logarithmic in the size of the original set.
/// The memory allocated to represent the set is released when the
/// factory object that created the set is destroyed.
ImmutableSet remove(ImmutableSet Old, value_type_ref V) {
LLVM_NODISCARD ImmutableSet remove(ImmutableSet Old, value_type_ref V) {
TreeTy *NewT = F.remove(Old.Root, V);
return ImmutableSet(Canonicalize ? F.getCanonicalTree(NewT) : NewT);
}

View File

@ -36,13 +36,17 @@ template<typename KeyT, typename ValueT,
typename MapType = DenseMap<KeyT, unsigned>,
typename VectorType = std::vector<std::pair<KeyT, ValueT>>>
class MapVector {
using value_type = typename VectorType::value_type;
using size_type = typename VectorType::size_type;
MapType Map;
VectorType Vector;
static_assert(
std::is_integral<typename MapType::mapped_type>::value,
"The mapped_type of the specified Map must be an integral type");
public:
using value_type = typename VectorType::value_type;
using size_type = typename VectorType::size_type;
using iterator = typename VectorType::iterator;
using const_iterator = typename VectorType::const_iterator;
using reverse_iterator = typename VectorType::reverse_iterator;
@ -93,9 +97,9 @@ public:
}
ValueT &operator[](const KeyT &Key) {
std::pair<KeyT, unsigned> Pair = std::make_pair(Key, 0);
std::pair<KeyT, typename MapType::mapped_type> Pair = std::make_pair(Key, 0);
std::pair<typename MapType::iterator, bool> Result = Map.insert(Pair);
unsigned &I = Result.first->second;
auto &I = Result.first->second;
if (Result.second) {
Vector.push_back(std::make_pair(Key, ValueT()));
I = Vector.size() - 1;
@ -112,9 +116,9 @@ public:
}
std::pair<iterator, bool> insert(const std::pair<KeyT, ValueT> &KV) {
std::pair<KeyT, unsigned> Pair = std::make_pair(KV.first, 0);
std::pair<KeyT, typename MapType::mapped_type> Pair = std::make_pair(KV.first, 0);
std::pair<typename MapType::iterator, bool> Result = Map.insert(Pair);
unsigned &I = Result.first->second;
auto &I = Result.first->second;
if (Result.second) {
Vector.push_back(std::make_pair(KV.first, KV.second));
I = Vector.size() - 1;
@ -125,9 +129,9 @@ public:
std::pair<iterator, bool> insert(std::pair<KeyT, ValueT> &&KV) {
// Copy KV.first into the map, then move it into the vector.
std::pair<KeyT, unsigned> Pair = std::make_pair(KV.first, 0);
std::pair<KeyT, typename MapType::mapped_type> Pair = std::make_pair(KV.first, 0);
std::pair<typename MapType::iterator, bool> Result = Map.insert(Pair);
unsigned &I = Result.first->second;
auto &I = Result.first->second;
if (Result.second) {
Vector.push_back(std::move(KV));
I = Vector.size() - 1;
@ -153,14 +157,14 @@ public:
(Vector.begin() + Pos->second);
}
/// \brief Remove the last element from the vector.
/// Remove the last element from the vector.
void pop_back() {
typename MapType::iterator Pos = Map.find(Vector.back().first);
Map.erase(Pos);
Vector.pop_back();
}
/// \brief Remove the element given by Iterator.
/// Remove the element given by Iterator.
///
/// Returns an iterator to the element following the one which was removed,
/// which may be end().
@ -183,7 +187,7 @@ public:
return Next;
}
/// \brief Remove all elements with the key value Key.
/// Remove all elements with the key value Key.
///
/// Returns the number of elements removed.
size_type erase(const KeyT &Key) {
@ -194,7 +198,7 @@ public:
return 1;
}
/// \brief Remove the elements that match the predicate.
/// Remove the elements that match the predicate.
///
/// Erase all elements that match \c Pred in a single pass. Takes linear
/// time.
@ -223,7 +227,7 @@ void MapVector<KeyT, ValueT, MapType, VectorType>::remove_if(Function Pred) {
Vector.erase(O, Vector.end());
}
/// \brief A MapVector that performs no allocations if smaller than a certain
/// A MapVector that performs no allocations if smaller than a certain
/// size.
template <typename KeyT, typename ValueT, unsigned N>
struct SmallMapVector

View File

@ -17,7 +17,7 @@
#define LLVM_ADT_NONE_H
namespace llvm {
/// \brief A simple null object to allow implicit construction of Optional<T>
/// A simple null object to allow implicit construction of Optional<T>
/// and similar types without having to spell out the specialization's name.
// (constant value 1 in an attempt to workaround MSVC build issue... )
enum class NoneType { None = 1 };

View File

@ -27,124 +27,164 @@
namespace llvm {
template <typename T> class Optional {
namespace optional_detail {
/// Storage for any type.
template <typename T, bool IsPodLike> struct OptionalStorage {
AlignedCharArrayUnion<T> storage;
bool hasVal = false;
public:
using value_type = T;
OptionalStorage() = default;
Optional(NoneType) {}
explicit Optional() {}
Optional(const T &y) : hasVal(true) { new (storage.buffer) T(y); }
Optional(const Optional &O) : hasVal(O.hasVal) {
OptionalStorage(const T &y) : hasVal(true) { new (storage.buffer) T(y); }
OptionalStorage(const OptionalStorage &O) : hasVal(O.hasVal) {
if (hasVal)
new (storage.buffer) T(*O);
new (storage.buffer) T(*O.getPointer());
}
Optional(T &&y) : hasVal(true) { new (storage.buffer) T(std::forward<T>(y)); }
Optional(Optional<T> &&O) : hasVal(O) {
if (O) {
new (storage.buffer) T(std::move(*O));
O.reset();
OptionalStorage(T &&y) : hasVal(true) {
new (storage.buffer) T(std::forward<T>(y));
}
OptionalStorage(OptionalStorage &&O) : hasVal(O.hasVal) {
if (O.hasVal) {
new (storage.buffer) T(std::move(*O.getPointer()));
}
}
~Optional() { reset(); }
Optional &operator=(T &&y) {
OptionalStorage &operator=(T &&y) {
if (hasVal)
**this = std::move(y);
*getPointer() = std::move(y);
else {
new (storage.buffer) T(std::move(y));
hasVal = true;
}
return *this;
}
Optional &operator=(Optional &&O) {
if (!O)
OptionalStorage &operator=(OptionalStorage &&O) {
if (!O.hasVal)
reset();
else {
*this = std::move(*O);
O.reset();
*this = std::move(*O.getPointer());
}
return *this;
}
/// Create a new object by constructing it in place with the given arguments.
template <typename... ArgTypes> void emplace(ArgTypes &&... Args) {
reset();
hasVal = true;
new (storage.buffer) T(std::forward<ArgTypes>(Args)...);
}
static inline Optional create(const T *y) {
return y ? Optional(*y) : Optional();
}
// FIXME: these assignments (& the equivalent const T&/const Optional& ctors)
// could be made more efficient by passing by value, possibly unifying them
// with the rvalue versions above - but this could place a different set of
// requirements (notably: the existence of a default ctor) when implemented
// in that way. Careful SFINAE to avoid such pitfalls would be required.
Optional &operator=(const T &y) {
OptionalStorage &operator=(const T &y) {
if (hasVal)
**this = y;
*getPointer() = y;
else {
new (storage.buffer) T(y);
hasVal = true;
}
return *this;
}
Optional &operator=(const Optional &O) {
if (!O)
OptionalStorage &operator=(const OptionalStorage &O) {
if (!O.hasVal)
reset();
else
*this = *O;
*this = *O.getPointer();
return *this;
}
~OptionalStorage() { reset(); }
void reset() {
if (hasVal) {
(**this).~T();
(*getPointer()).~T();
hasVal = false;
}
}
const T *getPointer() const {
assert(hasVal);
return reinterpret_cast<const T *>(storage.buffer);
}
T *getPointer() {
assert(hasVal);
return reinterpret_cast<T *>(storage.buffer);
}
const T &getValue() const LLVM_LVALUE_FUNCTION {
const T *getPointer() const {
assert(hasVal);
return *getPointer();
return reinterpret_cast<const T *>(storage.buffer);
}
T &getValue() LLVM_LVALUE_FUNCTION {
assert(hasVal);
return *getPointer();
};
#if !defined(__GNUC__) || defined(__clang__) // GCC up to GCC7 miscompiles this.
/// Storage for trivially copyable types only.
template <typename T> struct OptionalStorage<T, true> {
AlignedCharArrayUnion<T> storage;
bool hasVal = false;
OptionalStorage() = default;
OptionalStorage(const T &y) : hasVal(true) { new (storage.buffer) T(y); }
OptionalStorage &operator=(const T &y) {
*reinterpret_cast<T *>(storage.buffer) = y;
hasVal = true;
return *this;
}
explicit operator bool() const { return hasVal; }
bool hasValue() const { return hasVal; }
void reset() { hasVal = false; }
};
#endif
} // namespace optional_detail
template <typename T> class Optional {
optional_detail::OptionalStorage<T, isPodLike<T>::value> Storage;
public:
using value_type = T;
constexpr Optional() {}
constexpr Optional(NoneType) {}
Optional(const T &y) : Storage(y) {}
Optional(const Optional &O) = default;
Optional(T &&y) : Storage(std::forward<T>(y)) {}
Optional(Optional &&O) = default;
Optional &operator=(T &&y) {
Storage = std::move(y);
return *this;
}
Optional &operator=(Optional &&O) = default;
/// Create a new object by constructing it in place with the given arguments.
template <typename... ArgTypes> void emplace(ArgTypes &&... Args) {
reset();
Storage.hasVal = true;
new (getPointer()) T(std::forward<ArgTypes>(Args)...);
}
static inline Optional create(const T *y) {
return y ? Optional(*y) : Optional();
}
Optional &operator=(const T &y) {
Storage = y;
return *this;
}
Optional &operator=(const Optional &O) = default;
void reset() { Storage.reset(); }
const T *getPointer() const {
assert(Storage.hasVal);
return reinterpret_cast<const T *>(Storage.storage.buffer);
}
T *getPointer() {
assert(Storage.hasVal);
return reinterpret_cast<T *>(Storage.storage.buffer);
}
const T &getValue() const LLVM_LVALUE_FUNCTION { return *getPointer(); }
T &getValue() LLVM_LVALUE_FUNCTION { return *getPointer(); }
explicit operator bool() const { return Storage.hasVal; }
bool hasValue() const { return Storage.hasVal; }
const T *operator->() const { return getPointer(); }
T *operator->() { return getPointer(); }
const T &operator*() const LLVM_LVALUE_FUNCTION {
assert(hasVal);
return *getPointer();
}
T &operator*() LLVM_LVALUE_FUNCTION {
assert(hasVal);
return *getPointer();
}
const T &operator*() const LLVM_LVALUE_FUNCTION { return *getPointer(); }
T &operator*() LLVM_LVALUE_FUNCTION { return *getPointer(); }
template <typename U>
constexpr T getValueOr(U &&value) const LLVM_LVALUE_FUNCTION {
@ -152,14 +192,8 @@ public:
}
#if LLVM_HAS_RVALUE_REFERENCE_THIS
T &&getValue() && {
assert(hasVal);
return std::move(*getPointer());
}
T &&operator*() && {
assert(hasVal);
return std::move(*getPointer());
}
T &&getValue() && { return std::move(*getPointer()); }
T &&operator*() && { return std::move(*getPointer()); }
template <typename U>
T getValueOr(U &&value) && {

View File

@ -65,7 +65,7 @@ protected:
}
};
/// \brief Store a vector of values using a specific number of bits for each
/// Store a vector of values using a specific number of bits for each
/// value. Both signed and unsigned types can be used, e.g
/// @code
/// PackedVector<signed, 2> vec;

View File

@ -346,6 +346,12 @@ struct PointerLikeTypeTraits<PointerUnion3<PT1, PT2, PT3>> {
};
};
template <typename PT1, typename PT2, typename PT3>
bool operator<(PointerUnion3<PT1, PT2, PT3> lhs,
PointerUnion3<PT1, PT2, PT3> rhs) {
return lhs.getOpaqueValue() < rhs.getOpaqueValue();
}
/// A pointer union of four pointer types. See documentation for PointerUnion
/// for usage.
template <typename PT1, typename PT2, typename PT3, typename PT4>

View File

@ -33,7 +33,7 @@
namespace llvm {
/// \brief Enumerate the SCCs of a directed graph in reverse topological order
/// Enumerate the SCCs of a directed graph in reverse topological order
/// of the SCC DAG.
///
/// This is implemented using Tarjan's DFS algorithm using an internal stack to
@ -104,7 +104,7 @@ public:
}
static scc_iterator end(const GraphT &) { return scc_iterator(); }
/// \brief Direct loop termination test which is more efficient than
/// Direct loop termination test which is more efficient than
/// comparison with \c end().
bool isAtEnd() const {
assert(!CurrentSCC.empty() || VisitStack.empty());
@ -125,7 +125,7 @@ public:
return CurrentSCC;
}
/// \brief Test if the current SCC has a loop.
/// Test if the current SCC has a loop.
///
/// If the SCC has more than one node, this is trivially true. If not, it may
/// still contain a loop if the node has an edge back to itself.
@ -222,12 +222,12 @@ bool scc_iterator<GraphT, GT>::hasLoop() const {
return false;
}
/// \brief Construct the begin iterator for a deduced graph type T.
/// Construct the begin iterator for a deduced graph type T.
template <class T> scc_iterator<T> scc_begin(const T &G) {
return scc_iterator<T>::begin(G);
}
/// \brief Construct the end iterator for a deduced graph type T.
/// Construct the end iterator for a deduced graph type T.
template <class T> scc_iterator<T> scc_end(const T &G) {
return scc_iterator<T>::end(G);
}

View File

@ -36,6 +36,10 @@
#include <type_traits>
#include <utility>
#ifdef EXPENSIVE_CHECKS
#include <random> // for std::mt19937
#endif
namespace llvm {
// Only used by compiler if both template types are the same. Useful when
@ -53,6 +57,19 @@ using ValueOfRange = typename std::remove_reference<decltype(
} // end namespace detail
//===----------------------------------------------------------------------===//
// Extra additions to <type_traits>
//===----------------------------------------------------------------------===//
template <typename T>
struct negation : std::integral_constant<bool, !bool(T::value)> {};
template <typename...> struct conjunction : std::true_type {};
template <typename B1> struct conjunction<B1> : B1 {};
template <typename B1, typename... Bn>
struct conjunction<B1, Bn...>
: std::conditional<bool(B1::value), conjunction<Bn...>, B1>::type {};
//===----------------------------------------------------------------------===//
// Extra additions to <functional>
//===----------------------------------------------------------------------===//
@ -101,6 +118,7 @@ class function_ref<Ret(Params...)> {
public:
function_ref() = default;
function_ref(std::nullptr_t) {}
template <typename Callable>
function_ref(Callable &&callable,
@ -266,60 +284,121 @@ auto reverse(
/// auto R = make_filter_range(A, [](int N) { return N % 2 == 1; });
/// // R contains { 1, 3 }.
/// \endcode
template <typename WrappedIteratorT, typename PredicateT>
class filter_iterator
///
/// Note: filter_iterator_base implements support for forward iteration.
/// filter_iterator_impl exists to provide support for bidirectional iteration,
/// conditional on whether the wrapped iterator supports it.
template <typename WrappedIteratorT, typename PredicateT, typename IterTag>
class filter_iterator_base
: public iterator_adaptor_base<
filter_iterator<WrappedIteratorT, PredicateT>, WrappedIteratorT,
filter_iterator_base<WrappedIteratorT, PredicateT, IterTag>,
WrappedIteratorT,
typename std::common_type<
std::forward_iterator_tag,
typename std::iterator_traits<
WrappedIteratorT>::iterator_category>::type> {
IterTag, typename std::iterator_traits<
WrappedIteratorT>::iterator_category>::type> {
using BaseT = iterator_adaptor_base<
filter_iterator<WrappedIteratorT, PredicateT>, WrappedIteratorT,
filter_iterator_base<WrappedIteratorT, PredicateT, IterTag>,
WrappedIteratorT,
typename std::common_type<
std::forward_iterator_tag,
typename std::iterator_traits<WrappedIteratorT>::iterator_category>::
type>;
IterTag, typename std::iterator_traits<
WrappedIteratorT>::iterator_category>::type>;
struct PayloadType {
WrappedIteratorT End;
PredicateT Pred;
};
Optional<PayloadType> Payload;
protected:
WrappedIteratorT End;
PredicateT Pred;
void findNextValid() {
assert(Payload && "Payload should be engaged when findNextValid is called");
while (this->I != Payload->End && !Payload->Pred(*this->I))
while (this->I != End && !Pred(*this->I))
BaseT::operator++();
}
// Construct the begin iterator. The begin iterator requires to know where end
// is, so that it can properly stop when it hits end.
filter_iterator(WrappedIteratorT Begin, WrappedIteratorT End, PredicateT Pred)
: BaseT(std::move(Begin)),
Payload(PayloadType{std::move(End), std::move(Pred)}) {
// Construct the iterator. The begin iterator needs to know where the end
// is, so that it can properly stop when it gets there. The end iterator only
// needs the predicate to support bidirectional iteration.
filter_iterator_base(WrappedIteratorT Begin, WrappedIteratorT End,
PredicateT Pred)
: BaseT(Begin), End(End), Pred(Pred) {
findNextValid();
}
// Construct the end iterator. It's not incrementable, so Payload doesn't
// have to be engaged.
filter_iterator(WrappedIteratorT End) : BaseT(End) {}
public:
using BaseT::operator++;
filter_iterator &operator++() {
filter_iterator_base &operator++() {
BaseT::operator++();
findNextValid();
return *this;
}
template <typename RT, typename PT>
friend iterator_range<filter_iterator<detail::IterOfRange<RT>, PT>>
make_filter_range(RT &&, PT);
};
/// Specialization of filter_iterator_base for forward iteration only.
template <typename WrappedIteratorT, typename PredicateT,
typename IterTag = std::forward_iterator_tag>
class filter_iterator_impl
: public filter_iterator_base<WrappedIteratorT, PredicateT, IterTag> {
using BaseT = filter_iterator_base<WrappedIteratorT, PredicateT, IterTag>;
public:
filter_iterator_impl(WrappedIteratorT Begin, WrappedIteratorT End,
PredicateT Pred)
: BaseT(Begin, End, Pred) {}
};
/// Specialization of filter_iterator_base for bidirectional iteration.
template <typename WrappedIteratorT, typename PredicateT>
class filter_iterator_impl<WrappedIteratorT, PredicateT,
std::bidirectional_iterator_tag>
: public filter_iterator_base<WrappedIteratorT, PredicateT,
std::bidirectional_iterator_tag> {
using BaseT = filter_iterator_base<WrappedIteratorT, PredicateT,
std::bidirectional_iterator_tag>;
void findPrevValid() {
while (!this->Pred(*this->I))
BaseT::operator--();
}
public:
using BaseT::operator--;
filter_iterator_impl(WrappedIteratorT Begin, WrappedIteratorT End,
PredicateT Pred)
: BaseT(Begin, End, Pred) {}
filter_iterator_impl &operator--() {
BaseT::operator--();
findPrevValid();
return *this;
}
};
namespace detail {
template <bool is_bidirectional> struct fwd_or_bidi_tag_impl {
using type = std::forward_iterator_tag;
};
template <> struct fwd_or_bidi_tag_impl<true> {
using type = std::bidirectional_iterator_tag;
};
/// Helper which sets its type member to forward_iterator_tag if the category
/// of \p IterT does not derive from bidirectional_iterator_tag, and to
/// bidirectional_iterator_tag otherwise.
template <typename IterT> struct fwd_or_bidi_tag {
using type = typename fwd_or_bidi_tag_impl<std::is_base_of<
std::bidirectional_iterator_tag,
typename std::iterator_traits<IterT>::iterator_category>::value>::type;
};
} // namespace detail
/// Defines filter_iterator to a suitable specialization of
/// filter_iterator_impl, based on the underlying iterator's category.
template <typename WrappedIteratorT, typename PredicateT>
using filter_iterator = filter_iterator_impl<
WrappedIteratorT, PredicateT,
typename detail::fwd_or_bidi_tag<WrappedIteratorT>::type>;
/// Convenience function that takes a range of elements and a predicate,
/// and return a new filter_iterator range.
///
@ -332,10 +411,11 @@ iterator_range<filter_iterator<detail::IterOfRange<RangeT>, PredicateT>>
make_filter_range(RangeT &&Range, PredicateT Pred) {
using FilterIteratorT =
filter_iterator<detail::IterOfRange<RangeT>, PredicateT>;
return make_range(FilterIteratorT(std::begin(std::forward<RangeT>(Range)),
std::end(std::forward<RangeT>(Range)),
std::move(Pred)),
FilterIteratorT(std::end(std::forward<RangeT>(Range))));
return make_range(
FilterIteratorT(std::begin(std::forward<RangeT>(Range)),
std::end(std::forward<RangeT>(Range)), Pred),
FilterIteratorT(std::end(std::forward<RangeT>(Range)),
std::end(std::forward<RangeT>(Range)), Pred));
}
// forward declarations required by zip_shortest/zip_first
@ -644,7 +724,7 @@ detail::concat_range<ValueT, RangeTs...> concat(RangeTs &&... Ranges) {
// Extra additions to <utility>
//===----------------------------------------------------------------------===//
/// \brief Function object to check whether the first component of a std::pair
/// Function object to check whether the first component of a std::pair
/// compares less than the first component of another std::pair.
struct less_first {
template <typename T> bool operator()(const T &lhs, const T &rhs) const {
@ -652,7 +732,7 @@ struct less_first {
}
};
/// \brief Function object to check whether the second component of a std::pair
/// Function object to check whether the second component of a std::pair
/// compares less than the second component of another std::pair.
struct less_second {
template <typename T> bool operator()(const T &lhs, const T &rhs) const {
@ -662,14 +742,14 @@ struct less_second {
// A subset of N3658. More stuff can be added as-needed.
/// \brief Represents a compile-time sequence of integers.
/// Represents a compile-time sequence of integers.
template <class T, T... I> struct integer_sequence {
using value_type = T;
static constexpr size_t size() { return sizeof...(I); }
};
/// \brief Alias for the common case of a sequence of size_ts.
/// Alias for the common case of a sequence of size_ts.
template <size_t... I>
struct index_sequence : integer_sequence<std::size_t, I...> {};
@ -678,7 +758,7 @@ struct build_index_impl : build_index_impl<N - 1, N - 1, I...> {};
template <std::size_t... I>
struct build_index_impl<0, I...> : index_sequence<I...> {};
/// \brief Creates a compile-time integer sequence for a parameter pack.
/// Creates a compile-time integer sequence for a parameter pack.
template <class... Ts>
struct index_sequence_for : build_index_impl<sizeof...(Ts)> {};
@ -687,7 +767,7 @@ struct index_sequence_for : build_index_impl<sizeof...(Ts)> {};
template <int N> struct rank : rank<N - 1> {};
template <> struct rank<0> {};
/// \brief traits class for checking whether type T is one of any of the given
/// traits class for checking whether type T is one of any of the given
/// types in the variadic list.
template <typename T, typename... Ts> struct is_one_of {
static const bool value = false;
@ -699,7 +779,7 @@ struct is_one_of<T, U, Ts...> {
std::is_same<T, U>::value || is_one_of<T, Ts...>::value;
};
/// \brief traits class for checking whether type T is a base class for all
/// traits class for checking whether type T is a base class for all
/// the given types in the variadic list.
template <typename T, typename... Ts> struct are_base_of {
static const bool value = true;
@ -761,6 +841,10 @@ inline void array_pod_sort(IteratorTy Start, IteratorTy End) {
// behavior with an empty sequence.
auto NElts = End - Start;
if (NElts <= 1) return;
#ifdef EXPENSIVE_CHECKS
std::mt19937 Generator(std::random_device{}());
std::shuffle(Start, End, Generator);
#endif
qsort(&*Start, NElts, sizeof(*Start), get_array_pod_sort_comparator(*Start));
}
@ -774,10 +858,34 @@ inline void array_pod_sort(
// behavior with an empty sequence.
auto NElts = End - Start;
if (NElts <= 1) return;
#ifdef EXPENSIVE_CHECKS
std::mt19937 Generator(std::random_device{}());
std::shuffle(Start, End, Generator);
#endif
qsort(&*Start, NElts, sizeof(*Start),
reinterpret_cast<int (*)(const void *, const void *)>(Compare));
}
// Provide wrappers to std::sort which shuffle the elements before sorting
// to help uncover non-deterministic behavior (PR35135).
template <typename IteratorTy>
inline void sort(IteratorTy Start, IteratorTy End) {
#ifdef EXPENSIVE_CHECKS
std::mt19937 Generator(std::random_device{}());
std::shuffle(Start, End, Generator);
#endif
std::sort(Start, End);
}
template <typename IteratorTy, typename Compare>
inline void sort(IteratorTy Start, IteratorTy End, Compare Comp) {
#ifdef EXPENSIVE_CHECKS
std::mt19937 Generator(std::random_device{}());
std::shuffle(Start, End, Generator);
#endif
std::sort(Start, End, Comp);
}
//===----------------------------------------------------------------------===//
// Extra additions to <algorithm>
//===----------------------------------------------------------------------===//
@ -861,6 +969,11 @@ OutputIt copy_if(R &&Range, OutputIt Out, UnaryPredicate P) {
return std::copy_if(adl_begin(Range), adl_end(Range), Out, P);
}
template <typename R, typename OutputIt>
OutputIt copy(R &&Range, OutputIt Out) {
return std::copy(adl_begin(Range), adl_end(Range), Out);
}
/// Wrapper function around std::find to detect if an element exists
/// in a container.
template <typename R, typename E>
@ -905,7 +1018,7 @@ auto lower_bound(R &&Range, ForwardIt I) -> decltype(adl_begin(Range)) {
return std::lower_bound(adl_begin(Range), adl_end(Range), I);
}
/// \brief Given a range of type R, iterate the entire range and return a
/// Given a range of type R, iterate the entire range and return a
/// SmallVector with elements of the vector. This is useful, for example,
/// when you want to iterate a range and then sort the results.
template <unsigned Size, typename R>
@ -926,13 +1039,25 @@ void erase_if(Container &C, UnaryPredicate P) {
C.erase(remove_if(C, P), C.end());
}
/// Get the size of a range. This is a wrapper function around std::distance
/// which is only enabled when the operation is O(1).
template <typename R>
auto size(R &&Range, typename std::enable_if<
std::is_same<typename std::iterator_traits<decltype(
Range.begin())>::iterator_category,
std::random_access_iterator_tag>::value,
void>::type * = nullptr)
-> decltype(std::distance(Range.begin(), Range.end())) {
return std::distance(Range.begin(), Range.end());
}
//===----------------------------------------------------------------------===//
// Extra additions to <memory>
//===----------------------------------------------------------------------===//
// Implement make_unique according to N3656.
/// \brief Constructs a `new T()` with the given args and returns a
/// Constructs a `new T()` with the given args and returns a
/// `unique_ptr<T>` which owns the object.
///
/// Example:
@ -945,7 +1070,7 @@ make_unique(Args &&... args) {
return std::unique_ptr<T>(new T(std::forward<Args>(args)...));
}
/// \brief Constructs a `new T[n]` with the given args and returns a
/// Constructs a `new T[n]` with the given args and returns a
/// `unique_ptr<T[]>` which owns the object.
///
/// \param n size of the new array.

View File

@ -25,14 +25,26 @@ namespace detail {
template <typename Callable> class scope_exit {
Callable ExitFunction;
bool Engaged = true; // False once moved-from or release()d.
public:
template <typename Fp>
explicit scope_exit(Fp &&F) : ExitFunction(std::forward<Fp>(F)) {}
scope_exit(scope_exit &&Rhs) : ExitFunction(std::move(Rhs.ExitFunction)) {}
scope_exit(scope_exit &&Rhs)
: ExitFunction(std::move(Rhs.ExitFunction)), Engaged(Rhs.Engaged) {
Rhs.release();
}
scope_exit(const scope_exit &) = delete;
scope_exit &operator=(scope_exit &&) = delete;
scope_exit &operator=(const scope_exit &) = delete;
~scope_exit() { ExitFunction(); }
void release() { Engaged = false; }
~scope_exit() {
if (Engaged)
ExitFunction();
}
};
} // end namespace detail

View File

@ -31,7 +31,7 @@
namespace llvm {
/// \brief A vector that has set insertion semantics.
/// A vector that has set insertion semantics.
///
/// This adapter class provides a way to keep a set of things that also has the
/// property of a deterministic iteration order. The order of iteration is the
@ -52,10 +52,10 @@ public:
using const_reverse_iterator = typename vector_type::const_reverse_iterator;
using size_type = typename vector_type::size_type;
/// \brief Construct an empty SetVector
/// Construct an empty SetVector
SetVector() = default;
/// \brief Initialize a SetVector with a range of elements
/// Initialize a SetVector with a range of elements
template<typename It>
SetVector(It Start, It End) {
insert(Start, End);
@ -69,75 +69,75 @@ public:
return std::move(vector_);
}
/// \brief Determine if the SetVector is empty or not.
/// Determine if the SetVector is empty or not.
bool empty() const {
return vector_.empty();
}
/// \brief Determine the number of elements in the SetVector.
/// Determine the number of elements in the SetVector.
size_type size() const {
return vector_.size();
}
/// \brief Get an iterator to the beginning of the SetVector.
/// Get an iterator to the beginning of the SetVector.
iterator begin() {
return vector_.begin();
}
/// \brief Get a const_iterator to the beginning of the SetVector.
/// Get a const_iterator to the beginning of the SetVector.
const_iterator begin() const {
return vector_.begin();
}
/// \brief Get an iterator to the end of the SetVector.
/// Get an iterator to the end of the SetVector.
iterator end() {
return vector_.end();
}
/// \brief Get a const_iterator to the end of the SetVector.
/// Get a const_iterator to the end of the SetVector.
const_iterator end() const {
return vector_.end();
}
/// \brief Get an reverse_iterator to the end of the SetVector.
/// Get an reverse_iterator to the end of the SetVector.
reverse_iterator rbegin() {
return vector_.rbegin();
}
/// \brief Get a const_reverse_iterator to the end of the SetVector.
/// Get a const_reverse_iterator to the end of the SetVector.
const_reverse_iterator rbegin() const {
return vector_.rbegin();
}
/// \brief Get a reverse_iterator to the beginning of the SetVector.
/// Get a reverse_iterator to the beginning of the SetVector.
reverse_iterator rend() {
return vector_.rend();
}
/// \brief Get a const_reverse_iterator to the beginning of the SetVector.
/// Get a const_reverse_iterator to the beginning of the SetVector.
const_reverse_iterator rend() const {
return vector_.rend();
}
/// \brief Return the first element of the SetVector.
/// Return the first element of the SetVector.
const T &front() const {
assert(!empty() && "Cannot call front() on empty SetVector!");
return vector_.front();
}
/// \brief Return the last element of the SetVector.
/// Return the last element of the SetVector.
const T &back() const {
assert(!empty() && "Cannot call back() on empty SetVector!");
return vector_.back();
}
/// \brief Index into the SetVector.
/// Index into the SetVector.
const_reference operator[](size_type n) const {
assert(n < vector_.size() && "SetVector access out of range!");
return vector_[n];
}
/// \brief Insert a new element into the SetVector.
/// Insert a new element into the SetVector.
/// \returns true if the element was inserted into the SetVector.
bool insert(const value_type &X) {
bool result = set_.insert(X).second;
@ -146,7 +146,7 @@ public:
return result;
}
/// \brief Insert a range of elements into the SetVector.
/// Insert a range of elements into the SetVector.
template<typename It>
void insert(It Start, It End) {
for (; Start != End; ++Start)
@ -154,7 +154,7 @@ public:
vector_.push_back(*Start);
}
/// \brief Remove an item from the set vector.
/// Remove an item from the set vector.
bool remove(const value_type& X) {
if (set_.erase(X)) {
typename vector_type::iterator I = find(vector_, X);
@ -183,7 +183,7 @@ public:
return vector_.erase(NI);
}
/// \brief Remove items from the set vector based on a predicate function.
/// Remove items from the set vector based on a predicate function.
///
/// This is intended to be equivalent to the following code, if we could
/// write it:
@ -206,19 +206,19 @@ public:
return true;
}
/// \brief Count the number of elements of a given key in the SetVector.
/// Count the number of elements of a given key in the SetVector.
/// \returns 0 if the element is not in the SetVector, 1 if it is.
size_type count(const key_type &key) const {
return set_.count(key);
}
/// \brief Completely clear the SetVector
/// Completely clear the SetVector
void clear() {
set_.clear();
vector_.clear();
}
/// \brief Remove the last element of the SetVector.
/// Remove the last element of the SetVector.
void pop_back() {
assert(!empty() && "Cannot remove an element from an empty SetVector!");
set_.erase(back());
@ -239,7 +239,7 @@ public:
return vector_ != that.vector_;
}
/// \brief Compute This := This u S, return whether 'This' changed.
/// Compute This := This u S, return whether 'This' changed.
/// TODO: We should be able to use set_union from SetOperations.h, but
/// SetVector interface is inconsistent with DenseSet.
template <class STy>
@ -254,7 +254,7 @@ public:
return Changed;
}
/// \brief Compute This := This - B
/// Compute This := This - B
/// TODO: We should be able to use set_subtract from SetOperations.h, but
/// SetVector interface is inconsistent with DenseSet.
template <class STy>
@ -265,7 +265,7 @@ public:
}
private:
/// \brief A wrapper predicate designed for use with std::remove_if.
/// A wrapper predicate designed for use with std::remove_if.
///
/// This predicate wraps a predicate suitable for use with std::remove_if to
/// call set_.erase(x) on each element which is slated for removal.
@ -292,7 +292,7 @@ private:
vector_type vector_; ///< The vector.
};
/// \brief A SetVector that performs no allocations if smaller than
/// A SetVector that performs no allocations if smaller than
/// a certain size.
template <typename T, unsigned N>
class SmallSetVector
@ -300,7 +300,7 @@ class SmallSetVector
public:
SmallSetVector() = default;
/// \brief Initialize a SmallSetVector with a range of elements
/// Initialize a SmallSetVector with a range of elements
template<typename It>
SmallSetVector(It Start, It End) {
this->insert(Start, End);

View File

@ -335,7 +335,7 @@ struct RoundUpToPowerOfTwo {
enum { Val = RoundUpToPowerOfTwoH<N, (N&(N-1)) == 0>::Val };
};
/// \brief A templated base class for \c SmallPtrSet which provides the
/// A templated base class for \c SmallPtrSet which provides the
/// typesafe interface that is common across all small sizes.
///
/// This is particularly useful for passing around between interface boundaries

View File

@ -17,21 +17,120 @@
#include "llvm/ADT/None.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/iterator.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/type_traits.h"
#include <cstddef>
#include <functional>
#include <set>
#include <type_traits>
#include <utility>
namespace llvm {
/// SmallSetIterator - This class implements a const_iterator for SmallSet by
/// delegating to the underlying SmallVector or Set iterators.
template <typename T, unsigned N, typename C>
class SmallSetIterator
: public iterator_facade_base<SmallSetIterator<T, N, C>,
std::forward_iterator_tag, T> {
private:
using SetIterTy = typename std::set<T, C>::const_iterator;
using VecIterTy = typename SmallVector<T, N>::const_iterator;
using SelfTy = SmallSetIterator<T, N, C>;
/// Iterators to the parts of the SmallSet containing the data. They are set
/// depending on isSmall.
union {
SetIterTy SetIter;
VecIterTy VecIter;
};
bool isSmall;
public:
SmallSetIterator(SetIterTy SetIter) : SetIter(SetIter), isSmall(false) {}
SmallSetIterator(VecIterTy VecIter) : VecIter(VecIter), isSmall(true) {}
// Spell out destructor, copy/move constructor and assignment operators for
// MSVC STL, where set<T>::const_iterator is not trivially copy constructible.
~SmallSetIterator() {
if (isSmall)
VecIter.~VecIterTy();
else
SetIter.~SetIterTy();
}
SmallSetIterator(const SmallSetIterator &Other) : isSmall(Other.isSmall) {
if (isSmall)
VecIter = Other.VecIter;
else
// Use placement new, to make sure SetIter is properly constructed, even
// if it is not trivially copy-able (e.g. in MSVC).
new (&SetIter) SetIterTy(Other.SetIter);
}
SmallSetIterator(SmallSetIterator &&Other) : isSmall(Other.isSmall) {
if (isSmall)
VecIter = std::move(Other.VecIter);
else
// Use placement new, to make sure SetIter is properly constructed, even
// if it is not trivially copy-able (e.g. in MSVC).
new (&SetIter) SetIterTy(std::move(Other.SetIter));
}
SmallSetIterator& operator=(const SmallSetIterator& Other) {
// Call destructor for SetIter, so it gets properly destroyed if it is
// not trivially destructible in case we are setting VecIter.
if (!isSmall)
SetIter.~SetIterTy();
isSmall = Other.isSmall;
if (isSmall)
VecIter = Other.VecIter;
else
new (&SetIter) SetIterTy(Other.SetIter);
return *this;
}
SmallSetIterator& operator=(SmallSetIterator&& Other) {
// Call destructor for SetIter, so it gets properly destroyed if it is
// not trivially destructible in case we are setting VecIter.
if (!isSmall)
SetIter.~SetIterTy();
isSmall = Other.isSmall;
if (isSmall)
VecIter = std::move(Other.VecIter);
else
new (&SetIter) SetIterTy(std::move(Other.SetIter));
return *this;
}
bool operator==(const SmallSetIterator &RHS) const {
if (isSmall != RHS.isSmall)
return false;
if (isSmall)
return VecIter == RHS.VecIter;
return SetIter == RHS.SetIter;
}
SmallSetIterator &operator++() { // Preincrement
if (isSmall)
VecIter++;
else
SetIter++;
return *this;
}
const T &operator*() const { return isSmall ? *VecIter : *SetIter; }
};
/// SmallSet - This maintains a set of unique values, optimizing for the case
/// when the set is small (less than N). In this case, the set can be
/// maintained with no mallocs. If the set gets large, we expand to using an
/// std::set to maintain reasonable lookup times.
///
/// Note that this set does not provide a way to iterate over members in the
/// set.
template <typename T, unsigned N, typename C = std::less<T>>
class SmallSet {
/// Use a SmallVector to hold the elements here (even though it will never
@ -50,6 +149,7 @@ class SmallSet {
public:
using size_type = size_t;
using const_iterator = SmallSetIterator<T, N, C>;
SmallSet() = default;
@ -121,6 +221,18 @@ public:
Set.clear();
}
const_iterator begin() const {
if (isSmall())
return {Vector.begin()};
return {Set.begin()};
}
const_iterator end() const {
if (isSmall())
return {Vector.end()};
return {Set.end()};
}
private:
bool isSmall() const { return Set.empty(); }

View File

@ -18,6 +18,7 @@
#include "llvm/Support/AlignOf.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/MathExtras.h"
#include "llvm/Support/MemAlloc.h"
#include "llvm/Support/type_traits.h"
#include "llvm/Support/ErrorHandling.h"
#include <algorithm>
@ -37,28 +38,42 @@ namespace llvm {
/// This is all the non-templated stuff common to all SmallVectors.
class SmallVectorBase {
protected:
void *BeginX, *EndX, *CapacityX;
void *BeginX;
unsigned Size = 0, Capacity;
protected:
SmallVectorBase(void *FirstEl, size_t Size)
: BeginX(FirstEl), EndX(FirstEl), CapacityX((char*)FirstEl+Size) {}
SmallVectorBase() = delete;
SmallVectorBase(void *FirstEl, size_t Capacity)
: BeginX(FirstEl), Capacity(Capacity) {}
/// This is an implementation of the grow() method which only works
/// on POD-like data types and is out of line to reduce code duplication.
void grow_pod(void *FirstEl, size_t MinSizeInBytes, size_t TSize);
void grow_pod(void *FirstEl, size_t MinCapacity, size_t TSize);
public:
/// This returns size()*sizeof(T).
size_t size_in_bytes() const {
return size_t((char*)EndX - (char*)BeginX);
}
size_t size() const { return Size; }
size_t capacity() const { return Capacity; }
/// capacity_in_bytes - This returns capacity()*sizeof(T).
size_t capacity_in_bytes() const {
return size_t((char*)CapacityX - (char*)BeginX);
}
LLVM_NODISCARD bool empty() const { return !Size; }
LLVM_NODISCARD bool empty() const { return BeginX == EndX; }
/// Set the array size to \p N, which the current array must have enough
/// capacity for.
///
/// This does not construct or destroy any elements in the vector.
///
/// Clients can use this in conjunction with capacity() to write past the end
/// of the buffer when they know that more elements are available, and only
/// update the size later. This avoids the cost of value initializing elements
/// which will only be overwritten.
void set_size(size_t Size) {
assert(Size <= capacity());
this->Size = Size;
}
};
/// Figure out the offset of the first element.
template <class T, typename = void> struct SmallVectorAlignmentAndSize {
AlignedCharArrayUnion<SmallVectorBase> Base;
AlignedCharArrayUnion<T> FirstEl;
};
/// This is the part of SmallVectorTemplateBase which does not depend on whether
@ -66,36 +81,34 @@ public:
/// to avoid unnecessarily requiring T to be complete.
template <typename T, typename = void>
class SmallVectorTemplateCommon : public SmallVectorBase {
private:
template <typename, unsigned> friend struct SmallVectorStorage;
// Allocate raw space for N elements of type T. If T has a ctor or dtor, we
// don't want it to be automatically run, so we need to represent the space as
// something else. Use an array of char of sufficient alignment.
using U = AlignedCharArrayUnion<T>;
U FirstEl;
/// Find the address of the first element. For this pointer math to be valid
/// with small-size of 0 for T with lots of alignment, it's important that
/// SmallVectorStorage is properly-aligned even for small-size of 0.
void *getFirstEl() const {
return const_cast<void *>(reinterpret_cast<const void *>(
reinterpret_cast<const char *>(this) +
offsetof(SmallVectorAlignmentAndSize<T>, FirstEl)));
}
// Space after 'FirstEl' is clobbered, do not add any instance vars after it.
protected:
SmallVectorTemplateCommon(size_t Size) : SmallVectorBase(&FirstEl, Size) {}
SmallVectorTemplateCommon(size_t Size)
: SmallVectorBase(getFirstEl(), Size) {}
void grow_pod(size_t MinSizeInBytes, size_t TSize) {
SmallVectorBase::grow_pod(&FirstEl, MinSizeInBytes, TSize);
void grow_pod(size_t MinCapacity, size_t TSize) {
SmallVectorBase::grow_pod(getFirstEl(), MinCapacity, TSize);
}
/// Return true if this is a smallvector which has not had dynamic
/// memory allocated for it.
bool isSmall() const {
return BeginX == static_cast<const void*>(&FirstEl);
}
bool isSmall() const { return BeginX == getFirstEl(); }
/// Put this vector in a state of being small.
void resetToSmall() {
BeginX = EndX = CapacityX = &FirstEl;
BeginX = getFirstEl();
Size = Capacity = 0; // FIXME: Setting Capacity to 0 is suspect.
}
void setEnd(T *P) { this->EndX = P; }
public:
using size_type = size_t;
using difference_type = ptrdiff_t;
@ -117,27 +130,20 @@ public:
LLVM_ATTRIBUTE_ALWAYS_INLINE
const_iterator begin() const { return (const_iterator)this->BeginX; }
LLVM_ATTRIBUTE_ALWAYS_INLINE
iterator end() { return (iterator)this->EndX; }
iterator end() { return begin() + size(); }
LLVM_ATTRIBUTE_ALWAYS_INLINE
const_iterator end() const { return (const_iterator)this->EndX; }
const_iterator end() const { return begin() + size(); }
protected:
iterator capacity_ptr() { return (iterator)this->CapacityX; }
const_iterator capacity_ptr() const { return (const_iterator)this->CapacityX;}
public:
// reverse iterator creation methods.
reverse_iterator rbegin() { return reverse_iterator(end()); }
const_reverse_iterator rbegin() const{ return const_reverse_iterator(end()); }
reverse_iterator rend() { return reverse_iterator(begin()); }
const_reverse_iterator rend() const { return const_reverse_iterator(begin());}
LLVM_ATTRIBUTE_ALWAYS_INLINE
size_type size() const { return end()-begin(); }
size_type size_in_bytes() const { return size() * sizeof(T); }
size_type max_size() const { return size_type(-1) / sizeof(T); }
/// Return the total number of elements in the currently allocated buffer.
size_t capacity() const { return capacity_ptr() - begin(); }
size_t capacity_in_bytes() const { return capacity() * sizeof(T); }
/// Return a pointer to the vector's buffer, even if empty().
pointer data() { return pointer(begin()); }
@ -210,21 +216,21 @@ protected:
public:
void push_back(const T &Elt) {
if (LLVM_UNLIKELY(this->EndX >= this->CapacityX))
if (LLVM_UNLIKELY(this->size() >= this->capacity()))
this->grow();
::new ((void*) this->end()) T(Elt);
this->setEnd(this->end()+1);
this->set_size(this->size() + 1);
}
void push_back(T &&Elt) {
if (LLVM_UNLIKELY(this->EndX >= this->CapacityX))
if (LLVM_UNLIKELY(this->size() >= this->capacity()))
this->grow();
::new ((void*) this->end()) T(::std::move(Elt));
this->setEnd(this->end()+1);
this->set_size(this->size() + 1);
}
void pop_back() {
this->setEnd(this->end()-1);
this->set_size(this->size() - 1);
this->end()->~T();
}
};
@ -232,15 +238,13 @@ public:
// Define this out-of-line to dissuade the C++ compiler from inlining it.
template <typename T, bool isPodLike>
void SmallVectorTemplateBase<T, isPodLike>::grow(size_t MinSize) {
size_t CurCapacity = this->capacity();
size_t CurSize = this->size();
if (MinSize > UINT32_MAX)
report_bad_alloc_error("SmallVector capacity overflow during allocation");
// Always grow, even from zero.
size_t NewCapacity = size_t(NextPowerOf2(CurCapacity+2));
if (NewCapacity < MinSize)
NewCapacity = MinSize;
T *NewElts = static_cast<T*>(malloc(NewCapacity*sizeof(T)));
if (NewElts == nullptr)
report_bad_alloc_error("Allocation of SmallVector element failed.");
size_t NewCapacity = size_t(NextPowerOf2(this->capacity() + 2));
NewCapacity = std::min(std::max(NewCapacity, MinSize), size_t(UINT32_MAX));
T *NewElts = static_cast<T*>(llvm::safe_malloc(NewCapacity*sizeof(T)));
// Move the elements over.
this->uninitialized_move(this->begin(), this->end(), NewElts);
@ -252,9 +256,8 @@ void SmallVectorTemplateBase<T, isPodLike>::grow(size_t MinSize) {
if (!this->isSmall())
free(this->begin());
this->setEnd(NewElts+CurSize);
this->BeginX = NewElts;
this->CapacityX = this->begin()+NewCapacity;
this->Capacity = NewCapacity;
}
@ -301,21 +304,17 @@ protected:
/// Double the size of the allocated memory, guaranteeing space for at
/// least one more element or MinSize if specified.
void grow(size_t MinSize = 0) {
this->grow_pod(MinSize*sizeof(T), sizeof(T));
}
void grow(size_t MinSize = 0) { this->grow_pod(MinSize, sizeof(T)); }
public:
void push_back(const T &Elt) {
if (LLVM_UNLIKELY(this->EndX >= this->CapacityX))
if (LLVM_UNLIKELY(this->size() >= this->capacity()))
this->grow();
memcpy(this->end(), &Elt, sizeof(T));
this->setEnd(this->end()+1);
this->set_size(this->size() + 1);
}
void pop_back() {
this->setEnd(this->end()-1);
}
void pop_back() { this->set_size(this->size() - 1); }
};
/// This class consists of common code factored out of the SmallVector class to
@ -332,16 +331,13 @@ public:
protected:
// Default ctor - Initialize to empty.
explicit SmallVectorImpl(unsigned N)
: SmallVectorTemplateBase<T, isPodLike<T>::value>(N*sizeof(T)) {
}
: SmallVectorTemplateBase<T, isPodLike<T>::value>(N) {}
public:
SmallVectorImpl(const SmallVectorImpl &) = delete;
~SmallVectorImpl() {
// Destroy the constructed elements in the vector.
this->destroy_range(this->begin(), this->end());
// Subclass has already destructed this vector's elements.
// If this wasn't grown from the inline copy, deallocate the old space.
if (!this->isSmall())
free(this->begin());
@ -349,31 +345,31 @@ public:
void clear() {
this->destroy_range(this->begin(), this->end());
this->EndX = this->BeginX;
this->Size = 0;
}
void resize(size_type N) {
if (N < this->size()) {
this->destroy_range(this->begin()+N, this->end());
this->setEnd(this->begin()+N);
this->set_size(N);
} else if (N > this->size()) {
if (this->capacity() < N)
this->grow(N);
for (auto I = this->end(), E = this->begin() + N; I != E; ++I)
new (&*I) T();
this->setEnd(this->begin()+N);
this->set_size(N);
}
}
void resize(size_type N, const T &NV) {
if (N < this->size()) {
this->destroy_range(this->begin()+N, this->end());
this->setEnd(this->begin()+N);
this->set_size(N);
} else if (N > this->size()) {
if (this->capacity() < N)
this->grow(N);
std::uninitialized_fill(this->end(), this->begin()+N, NV);
this->setEnd(this->begin()+N);
this->set_size(N);
}
}
@ -398,23 +394,23 @@ public:
void append(in_iter in_start, in_iter in_end) {
size_type NumInputs = std::distance(in_start, in_end);
// Grow allocated space if needed.
if (NumInputs > size_type(this->capacity_ptr()-this->end()))
if (NumInputs > this->capacity() - this->size())
this->grow(this->size()+NumInputs);
// Copy the new elements over.
this->uninitialized_copy(in_start, in_end, this->end());
this->setEnd(this->end() + NumInputs);
this->set_size(this->size() + NumInputs);
}
/// Add the specified range to the end of the SmallVector.
void append(size_type NumInputs, const T &Elt) {
// Grow allocated space if needed.
if (NumInputs > size_type(this->capacity_ptr()-this->end()))
if (NumInputs > this->capacity() - this->size())
this->grow(this->size()+NumInputs);
// Copy the new elements over.
std::uninitialized_fill_n(this->end(), NumInputs, Elt);
this->setEnd(this->end() + NumInputs);
this->set_size(this->size() + NumInputs);
}
void append(std::initializer_list<T> IL) {
@ -428,7 +424,7 @@ public:
clear();
if (this->capacity() < NumElts)
this->grow(NumElts);
this->setEnd(this->begin()+NumElts);
this->set_size(NumElts);
std::uninitialized_fill(this->begin(), this->end(), Elt);
}
@ -475,7 +471,7 @@ public:
iterator I = std::move(E, this->end(), S);
// Drop the last elts.
this->destroy_range(I, this->end());
this->setEnd(I);
this->set_size(I - this->begin());
return(N);
}
@ -488,7 +484,7 @@ public:
assert(I >= this->begin() && "Insertion iterator is out of bounds.");
assert(I <= this->end() && "Inserting past the end of the vector.");
if (this->EndX >= this->CapacityX) {
if (this->size() >= this->capacity()) {
size_t EltNo = I-this->begin();
this->grow();
I = this->begin()+EltNo;
@ -497,12 +493,12 @@ public:
::new ((void*) this->end()) T(::std::move(this->back()));
// Push everything else over.
std::move_backward(I, this->end()-1, this->end());
this->setEnd(this->end()+1);
this->set_size(this->size() + 1);
// If we just moved the element we're inserting, be sure to update
// the reference.
T *EltPtr = &Elt;
if (I <= EltPtr && EltPtr < this->EndX)
if (I <= EltPtr && EltPtr < this->end())
++EltPtr;
*I = ::std::move(*EltPtr);
@ -518,7 +514,7 @@ public:
assert(I >= this->begin() && "Insertion iterator is out of bounds.");
assert(I <= this->end() && "Inserting past the end of the vector.");
if (this->EndX >= this->CapacityX) {
if (this->size() >= this->capacity()) {
size_t EltNo = I-this->begin();
this->grow();
I = this->begin()+EltNo;
@ -526,12 +522,12 @@ public:
::new ((void*) this->end()) T(std::move(this->back()));
// Push everything else over.
std::move_backward(I, this->end()-1, this->end());
this->setEnd(this->end()+1);
this->set_size(this->size() + 1);
// If we just moved the element we're inserting, be sure to update
// the reference.
const T *EltPtr = &Elt;
if (I <= EltPtr && EltPtr < this->EndX)
if (I <= EltPtr && EltPtr < this->end())
++EltPtr;
*I = *EltPtr;
@ -577,7 +573,7 @@ public:
// Move over the elements that we're about to overwrite.
T *OldEnd = this->end();
this->setEnd(this->end() + NumToInsert);
this->set_size(this->size() + NumToInsert);
size_t NumOverwritten = OldEnd-I;
this->uninitialized_move(I, OldEnd, this->end()-NumOverwritten);
@ -634,7 +630,7 @@ public:
// Move over the elements that we're about to overwrite.
T *OldEnd = this->end();
this->setEnd(this->end() + NumToInsert);
this->set_size(this->size() + NumToInsert);
size_t NumOverwritten = OldEnd-I;
this->uninitialized_move(I, OldEnd, this->end()-NumOverwritten);
@ -654,10 +650,10 @@ public:
}
template <typename... ArgTypes> void emplace_back(ArgTypes &&... Args) {
if (LLVM_UNLIKELY(this->EndX >= this->CapacityX))
if (LLVM_UNLIKELY(this->size() >= this->capacity()))
this->grow();
::new ((void *)this->end()) T(std::forward<ArgTypes>(Args)...);
this->setEnd(this->end() + 1);
this->set_size(this->size() + 1);
}
SmallVectorImpl &operator=(const SmallVectorImpl &RHS);
@ -676,20 +672,6 @@ public:
return std::lexicographical_compare(this->begin(), this->end(),
RHS.begin(), RHS.end());
}
/// Set the array size to \p N, which the current array must have enough
/// capacity for.
///
/// This does not construct or destroy any elements in the vector.
///
/// Clients can use this in conjunction with capacity() to write past the end
/// of the buffer when they know that more elements are available, and only
/// update the size later. This avoids the cost of value initializing elements
/// which will only be overwritten.
void set_size(size_type N) {
assert(N <= this->capacity());
this->setEnd(this->begin() + N);
}
};
template <typename T>
@ -699,8 +681,8 @@ void SmallVectorImpl<T>::swap(SmallVectorImpl<T> &RHS) {
// We can only avoid copying elements if neither vector is small.
if (!this->isSmall() && !RHS.isSmall()) {
std::swap(this->BeginX, RHS.BeginX);
std::swap(this->EndX, RHS.EndX);
std::swap(this->CapacityX, RHS.CapacityX);
std::swap(this->Size, RHS.Size);
std::swap(this->Capacity, RHS.Capacity);
return;
}
if (RHS.size() > this->capacity())
@ -718,15 +700,15 @@ void SmallVectorImpl<T>::swap(SmallVectorImpl<T> &RHS) {
if (this->size() > RHS.size()) {
size_t EltDiff = this->size() - RHS.size();
this->uninitialized_copy(this->begin()+NumShared, this->end(), RHS.end());
RHS.setEnd(RHS.end()+EltDiff);
RHS.set_size(RHS.size() + EltDiff);
this->destroy_range(this->begin()+NumShared, this->end());
this->setEnd(this->begin()+NumShared);
this->set_size(NumShared);
} else if (RHS.size() > this->size()) {
size_t EltDiff = RHS.size() - this->size();
this->uninitialized_copy(RHS.begin()+NumShared, RHS.end(), this->end());
this->setEnd(this->end() + EltDiff);
this->set_size(this->size() + EltDiff);
this->destroy_range(RHS.begin()+NumShared, RHS.end());
RHS.setEnd(RHS.begin()+NumShared);
RHS.set_size(NumShared);
}
}
@ -752,7 +734,7 @@ SmallVectorImpl<T> &SmallVectorImpl<T>::
this->destroy_range(NewEnd, this->end());
// Trim.
this->setEnd(NewEnd);
this->set_size(RHSSize);
return *this;
}
@ -762,7 +744,7 @@ SmallVectorImpl<T> &SmallVectorImpl<T>::
if (this->capacity() < RHSSize) {
// Destroy current elements.
this->destroy_range(this->begin(), this->end());
this->setEnd(this->begin());
this->set_size(0);
CurSize = 0;
this->grow(RHSSize);
} else if (CurSize) {
@ -775,7 +757,7 @@ SmallVectorImpl<T> &SmallVectorImpl<T>::
this->begin()+CurSize);
// Set end.
this->setEnd(this->begin()+RHSSize);
this->set_size(RHSSize);
return *this;
}
@ -789,8 +771,8 @@ SmallVectorImpl<T> &SmallVectorImpl<T>::operator=(SmallVectorImpl<T> &&RHS) {
this->destroy_range(this->begin(), this->end());
if (!this->isSmall()) free(this->begin());
this->BeginX = RHS.BeginX;
this->EndX = RHS.EndX;
this->CapacityX = RHS.CapacityX;
this->Size = RHS.Size;
this->Capacity = RHS.Capacity;
RHS.resetToSmall();
return *this;
}
@ -807,7 +789,7 @@ SmallVectorImpl<T> &SmallVectorImpl<T>::operator=(SmallVectorImpl<T> &&RHS) {
// Destroy excess elements and trim the bounds.
this->destroy_range(NewEnd, this->end());
this->setEnd(NewEnd);
this->set_size(RHSSize);
// Clear the RHS.
RHS.clear();
@ -822,7 +804,7 @@ SmallVectorImpl<T> &SmallVectorImpl<T>::operator=(SmallVectorImpl<T> &&RHS) {
if (this->capacity() < RHSSize) {
// Destroy current elements.
this->destroy_range(this->begin(), this->end());
this->setEnd(this->begin());
this->set_size(0);
CurSize = 0;
this->grow(RHSSize);
} else if (CurSize) {
@ -835,22 +817,23 @@ SmallVectorImpl<T> &SmallVectorImpl<T>::operator=(SmallVectorImpl<T> &&RHS) {
this->begin()+CurSize);
// Set end.
this->setEnd(this->begin()+RHSSize);
this->set_size(RHSSize);
RHS.clear();
return *this;
}
/// Storage for the SmallVector elements which aren't contained in
/// SmallVectorTemplateCommon. There are 'N-1' elements here. The remaining '1'
/// element is in the base class. This is specialized for the N=1 and N=0 cases
/// Storage for the SmallVector elements. This is specialized for the N=0 case
/// to avoid allocating unnecessary storage.
template <typename T, unsigned N>
struct SmallVectorStorage {
typename SmallVectorTemplateCommon<T>::U InlineElts[N - 1];
AlignedCharArrayUnion<T> InlineElts[N];
};
template <typename T> struct SmallVectorStorage<T, 1> {};
template <typename T> struct SmallVectorStorage<T, 0> {};
/// We need the storage to be properly aligned even for small-size of 0 so that
/// the pointer math in \a SmallVectorTemplateCommon::getFirstEl() is
/// well-defined.
template <typename T> struct alignas(alignof(T)) SmallVectorStorage<T, 0> {};
/// This is a 'vector' (really, a variable-sized array), optimized
/// for the case when the array is small. It contains some number of elements
@ -861,13 +844,15 @@ template <typename T> struct SmallVectorStorage<T, 0> {};
/// Note that this does not attempt to be exception safe.
///
template <typename T, unsigned N>
class SmallVector : public SmallVectorImpl<T> {
/// Inline space for elements which aren't stored in the base class.
SmallVectorStorage<T, N> Storage;
class SmallVector : public SmallVectorImpl<T>, SmallVectorStorage<T, N> {
public:
SmallVector() : SmallVectorImpl<T>(N) {}
~SmallVector() {
// Destroy the constructed elements in the vector.
this->destroy_range(this->begin(), this->end());
}
explicit SmallVector(size_t Size, const T &Value = T())
: SmallVectorImpl<T>(N) {
this->assign(Size, Value);

View File

@ -211,7 +211,7 @@ public:
// The Sparse array doesn't actually need to be initialized, so malloc
// would be enough here, but that will cause tools like valgrind to
// complain about branching on uninitialized data.
Sparse = reinterpret_cast<SparseT*>(calloc(U, sizeof(SparseT)));
Sparse = static_cast<SparseT*>(safe_calloc(U, sizeof(SparseT)));
Universe = U;
}

View File

@ -22,6 +22,7 @@
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/Support/Allocator.h"
#include <cassert>
#include <cstdint>
#include <cstdlib>
@ -163,7 +164,7 @@ public:
// The Sparse array doesn't actually need to be initialized, so malloc
// would be enough here, but that will cause tools like valgrind to
// complain about branching on uninitialized data.
Sparse = reinterpret_cast<SparseT*>(calloc(U, sizeof(SparseT)));
Sparse = static_cast<SparseT*>(safe_calloc(U, sizeof(SparseT)));
Universe = U;
}

View File

@ -26,15 +26,24 @@
#ifndef LLVM_ADT_STATISTIC_H
#define LLVM_ADT_STATISTIC_H
#include "llvm/Support/Atomic.h"
#include "llvm/Config/llvm-config.h"
#include "llvm/Support/Compiler.h"
#include <atomic>
#include <memory>
#include <vector>
// Determine whether statistics should be enabled. We must do it here rather
// than in CMake because multi-config generators cannot determine this at
// configure time.
#if !defined(NDEBUG) || LLVM_FORCE_ENABLE_STATS
#define LLVM_ENABLE_STATS 1
#endif
namespace llvm {
class raw_ostream;
class raw_fd_ostream;
class StringRef;
class Statistic {
public:
@ -42,7 +51,7 @@ public:
const char *Name;
const char *Desc;
std::atomic<unsigned> Value;
bool Initialized;
std::atomic<bool> Initialized;
unsigned getValue() const { return Value.load(std::memory_order_relaxed); }
const char *getDebugType() const { return DebugType; }
@ -61,7 +70,7 @@ public:
// Allow use of this class as the value itself.
operator unsigned() const { return getValue(); }
#if !defined(NDEBUG) || defined(LLVM_ENABLE_STATS)
#if LLVM_ENABLE_STATS
const Statistic &operator=(unsigned Val) {
Value.store(Val, std::memory_order_relaxed);
return init();
@ -143,14 +152,12 @@ public:
void updateMax(unsigned V) {}
#endif // !defined(NDEBUG) || defined(LLVM_ENABLE_STATS)
#endif // LLVM_ENABLE_STATS
protected:
Statistic &init() {
bool tmp = Initialized;
sys::MemoryFence();
if (!tmp) RegisterStatistic();
TsanHappensAfter(this);
if (!Initialized.load(std::memory_order_acquire))
RegisterStatistic();
return *this;
}
@ -160,21 +167,21 @@ protected:
// STATISTIC - A macro to make definition of statistics really simple. This
// automatically passes the DEBUG_TYPE of the file into the statistic.
#define STATISTIC(VARNAME, DESC) \
static llvm::Statistic VARNAME = {DEBUG_TYPE, #VARNAME, DESC, {0}, false}
static llvm::Statistic VARNAME = {DEBUG_TYPE, #VARNAME, DESC, {0}, {false}}
/// \brief Enable the collection and printing of statistics.
/// Enable the collection and printing of statistics.
void EnableStatistics(bool PrintOnExit = true);
/// \brief Check if statistics are enabled.
/// Check if statistics are enabled.
bool AreStatisticsEnabled();
/// \brief Return a file stream to print our output on.
/// Return a file stream to print our output on.
std::unique_ptr<raw_fd_ostream> CreateInfoOutputFile();
/// \brief Print statistics to the file returned by CreateInfoOutputFile().
/// Print statistics to the file returned by CreateInfoOutputFile().
void PrintStatistics();
/// \brief Print statistics to the given output stream.
/// Print statistics to the given output stream.
void PrintStatistics(raw_ostream &OS);
/// Print statistics in JSON format. This does include all global timers (\see
@ -183,6 +190,30 @@ void PrintStatistics(raw_ostream &OS);
/// PrintStatisticsJSON().
void PrintStatisticsJSON(raw_ostream &OS);
/// Get the statistics. This can be used to look up the value of
/// statistics without needing to parse JSON.
///
/// This function does not prevent statistics being updated by other threads
/// during it's execution. It will return the value at the point that it is
/// read. However, it will prevent new statistics from registering until it
/// completes.
const std::vector<std::pair<StringRef, unsigned>> GetStatistics();
/// Reset the statistics. This can be used to zero and de-register the
/// statistics in order to measure a compilation.
///
/// When this function begins to call destructors prior to returning, all
/// statistics will be zero and unregistered. However, that might not remain the
/// case by the time this function finishes returning. Whether update from other
/// threads are lost or merely deferred until during the function return is
/// timing sensitive.
///
/// Callers who intend to use this to measure statistics for a single
/// compilation should ensure that no compilations are in progress at the point
/// this function is called and that only one compilation executes until calling
/// GetStatistics().
void ResetStatistics();
} // end namespace llvm
#endif // LLVM_ADT_STATISTIC_H

View File

@ -39,6 +39,16 @@ inline char hexdigit(unsigned X, bool LowerCase = false) {
return X < 10 ? '0' + X : HexChar + X - 10;
}
/// Given an array of c-style strings terminated by a null pointer, construct
/// a vector of StringRefs representing the same strings without the terminating
/// null string.
inline std::vector<StringRef> toStringRefArray(const char *const *Strings) {
std::vector<StringRef> Result;
while (*Strings)
Result.push_back(*Strings++);
return Result;
}
/// Construct a string ref from a boolean.
inline StringRef toStringRef(bool B) { return StringRef(B ? "true" : "false"); }
@ -78,6 +88,26 @@ inline bool isAlpha(char C) {
/// lowercase letter as classified by "C" locale.
inline bool isAlnum(char C) { return isAlpha(C) || isDigit(C); }
/// Checks whether character \p C is valid ASCII (high bit is zero).
inline bool isASCII(char C) { return static_cast<unsigned char>(C) <= 127; }
/// Checks whether all characters in S are ASCII.
inline bool isASCII(llvm::StringRef S) {
for (char C : S)
if (LLVM_UNLIKELY(!isASCII(C)))
return false;
return true;
}
/// Checks whether character \p C is printable.
///
/// Locale-independent version of the C standard library isprint whose results
/// may differ on different platforms.
inline bool isPrint(char C) {
unsigned char UC = static_cast<unsigned char>(C);
return (0x20 <= UC) && (UC <= 0x7E);
}
/// Returns the corresponding lowercase character if \p x is uppercase.
inline char toLower(char x) {
if (x >= 'A' && x <= 'Z')
@ -157,7 +187,7 @@ inline std::string fromHex(StringRef Input) {
return Output;
}
/// \brief Convert the string \p S to an integer of the specified type using
/// Convert the string \p S to an integer of the specified type using
/// the radix \p Base. If \p Base is 0, auto-detects the radix.
/// Returns true if the number was successfully converted, false otherwise.
template <typename N> bool to_integer(StringRef S, N &Num, unsigned Base = 0) {
@ -232,19 +262,6 @@ void SplitString(StringRef Source,
SmallVectorImpl<StringRef> &OutFragments,
StringRef Delimiters = " \t\n\v\f\r");
/// HashString - Hash function for strings.
///
/// This is the Bernstein hash function.
//
// FIXME: Investigate whether a modified bernstein hash function performs
// better: http://eternallyconfuzzled.com/tuts/algorithms/jsw_tut_hashing.aspx
// X*33+c -> X*33^c
inline unsigned HashString(StringRef Str, unsigned Result = 0) {
for (StringRef::size_type i = 0, e = Str.size(); i != e; ++i)
Result = Result * 33 + (unsigned char)Str[i];
return Result;
}
/// Returns the English suffix for an ordinal integer (-st, -nd, -rd, -th).
inline StringRef getOrdinalSuffix(unsigned Val) {
// It is critically important that we do this perfectly for
@ -264,9 +281,13 @@ inline StringRef getOrdinalSuffix(unsigned Val) {
}
}
/// PrintEscapedString - Print each character of the specified string, escaping
/// it if it is not printable or if it is an escape char.
void PrintEscapedString(StringRef Name, raw_ostream &Out);
/// Print each character of the specified string, escaping it if it is not
/// printable or if it is an escape char.
void printEscapedString(StringRef Name, raw_ostream &Out);
/// Print each character of the specified string, escaping HTML special
/// characters.
void printHTMLEscaped(StringRef String, raw_ostream &Out);
/// printLowerCase - Print each character as lowercase if it is uppercase.
void printLowerCase(StringRef String, raw_ostream &Out);

View File

@ -37,12 +37,12 @@ template<typename ValueTy> class StringMapKeyIterator;
/// StringMapEntryBase - Shared base class of StringMapEntry instances.
class StringMapEntryBase {
unsigned StrLen;
size_t StrLen;
public:
explicit StringMapEntryBase(unsigned Len) : StrLen(Len) {}
explicit StringMapEntryBase(size_t Len) : StrLen(Len) {}
unsigned getKeyLength() const { return StrLen; }
size_t getKeyLength() const { return StrLen; }
};
/// StringMapImpl - This is the base class of StringMap that is shared among
@ -127,10 +127,10 @@ class StringMapEntry : public StringMapEntryBase {
public:
ValueTy second;
explicit StringMapEntry(unsigned strLen)
explicit StringMapEntry(size_t strLen)
: StringMapEntryBase(strLen), second() {}
template <typename... InitTy>
StringMapEntry(unsigned strLen, InitTy &&... InitVals)
StringMapEntry(size_t strLen, InitTy &&... InitVals)
: StringMapEntryBase(strLen), second(std::forward<InitTy>(InitVals)...) {}
StringMapEntry(StringMapEntry &E) = delete;
@ -155,19 +155,16 @@ public:
template <typename AllocatorTy, typename... InitTy>
static StringMapEntry *Create(StringRef Key, AllocatorTy &Allocator,
InitTy &&... InitVals) {
unsigned KeyLength = Key.size();
size_t KeyLength = Key.size();
// Allocate a new item with space for the string at the end and a null
// terminator.
unsigned AllocSize = static_cast<unsigned>(sizeof(StringMapEntry))+
KeyLength+1;
unsigned Alignment = alignof(StringMapEntry);
size_t AllocSize = sizeof(StringMapEntry) + KeyLength + 1;
size_t Alignment = alignof(StringMapEntry);
StringMapEntry *NewItem =
static_cast<StringMapEntry*>(Allocator.Allocate(AllocSize,Alignment));
if (NewItem == nullptr)
report_bad_alloc_error("Allocation of StringMap entry failed.");
assert(NewItem && "Unhandled out-of-memory");
// Construct the value.
new (NewItem) StringMapEntry(KeyLength, std::forward<InitTy>(InitVals)...);
@ -203,8 +200,7 @@ public:
template<typename AllocatorTy>
void Destroy(AllocatorTy &Allocator) {
// Free memory referenced by the item.
unsigned AllocSize =
static_cast<unsigned>(sizeof(StringMapEntry)) + getKeyLength() + 1;
size_t AllocSize = sizeof(StringMapEntry) + getKeyLength() + 1;
this->~StringMapEntry();
Allocator.Deallocate(static_cast<void *>(this), AllocSize);
}

View File

@ -201,7 +201,7 @@ namespace llvm {
LLVM_NODISCARD
int compare_numeric(StringRef RHS) const;
/// \brief Determine the edit distance between this string and another
/// Determine the edit distance between this string and another
/// string.
///
/// \param Other the string to compare this string against.
@ -725,10 +725,7 @@ namespace llvm {
/// \returns The split substrings.
LLVM_NODISCARD
std::pair<StringRef, StringRef> split(char Separator) const {
size_t Idx = find(Separator);
if (Idx == npos)
return std::make_pair(*this, StringRef());
return std::make_pair(slice(0, Idx), slice(Idx+1, npos));
return split(StringRef(&Separator, 1));
}
/// Split into two substrings around the first occurrence of a separator
@ -749,6 +746,24 @@ namespace llvm {
return std::make_pair(slice(0, Idx), slice(Idx + Separator.size(), npos));
}
/// Split into two substrings around the last occurrence of a separator
/// string.
///
/// If \p Separator is in the string, then the result is a pair (LHS, RHS)
/// such that (*this == LHS + Separator + RHS) is true and RHS is
/// minimal. If \p Separator is not in the string, then the result is a
/// pair (LHS, RHS) where (*this == LHS) and (RHS == "").
///
/// \param Separator - The string to split on.
/// \return - The split substrings.
LLVM_NODISCARD
std::pair<StringRef, StringRef> rsplit(StringRef Separator) const {
size_t Idx = rfind(Separator);
if (Idx == npos)
return std::make_pair(*this, StringRef());
return std::make_pair(slice(0, Idx), slice(Idx + Separator.size(), npos));
}
/// Split into substrings around the occurrences of a separator string.
///
/// Each substring is stored in \p A. If \p MaxSplit is >= 0, at most
@ -796,10 +811,7 @@ namespace llvm {
/// \return - The split substrings.
LLVM_NODISCARD
std::pair<StringRef, StringRef> rsplit(char Separator) const {
size_t Idx = rfind(Separator);
if (Idx == npos)
return std::make_pair(*this, StringRef());
return std::make_pair(slice(0, Idx), slice(Idx+1, npos));
return rsplit(StringRef(&Separator, 1));
}
/// Return string with consecutive \p Char characters starting from the
@ -855,6 +867,10 @@ namespace llvm {
/// constexpr StringLiteral S("test");
///
class StringLiteral : public StringRef {
private:
constexpr StringLiteral(const char *Str, size_t N) : StringRef(Str, N) {
}
public:
template <size_t N>
constexpr StringLiteral(const char (&Str)[N])
@ -867,6 +883,12 @@ namespace llvm {
#endif
: StringRef(Str, N - 1) {
}
// Explicit construction for strings like "foo\0bar".
template <size_t N>
static constexpr StringLiteral withInnerNUL(const char (&Str)[N]) {
return StringLiteral(Str, N - 1);
}
};
/// @name StringRef Comparison Operators
@ -902,7 +924,7 @@ namespace llvm {
/// @}
/// \brief Compute a hash_code for a StringRef.
/// Compute a hash_code for a StringRef.
LLVM_NODISCARD
hash_code hash_value(StringRef S);

View File

@ -20,7 +20,7 @@
namespace llvm {
/// \brief A switch()-like statement whose cases are string literals.
/// A switch()-like statement whose cases are string literals.
///
/// The StringSwitch class is a simple form of a switch() statement that
/// determines whether the given string matches one of the given string
@ -41,216 +41,176 @@ namespace llvm {
/// \endcode
template<typename T, typename R = T>
class StringSwitch {
/// \brief The string we are matching.
StringRef Str;
/// The string we are matching.
const StringRef Str;
/// \brief The pointer to the result of this switch statement, once known,
/// The pointer to the result of this switch statement, once known,
/// null before that.
const T *Result;
Optional<T> Result;
public:
LLVM_ATTRIBUTE_ALWAYS_INLINE
explicit StringSwitch(StringRef S)
: Str(S), Result(nullptr) { }
: Str(S), Result() { }
// StringSwitch is not copyable.
StringSwitch(const StringSwitch &) = delete;
void operator=(const StringSwitch &) = delete;
StringSwitch(StringSwitch &&other) {
*this = std::move(other);
}
StringSwitch &operator=(StringSwitch &&other) {
Str = other.Str;
Result = other.Result;
return *this;
}
// StringSwitch is not assignable due to 'Str' being 'const'.
void operator=(const StringSwitch &) = delete;
void operator=(StringSwitch &&other) = delete;
StringSwitch(StringSwitch &&other)
: Str(other.Str), Result(std::move(other.Result)) { }
~StringSwitch() = default;
// Case-sensitive case matchers
template<unsigned N>
LLVM_ATTRIBUTE_ALWAYS_INLINE
StringSwitch& Case(const char (&S)[N], const T& Value) {
assert(N);
if (!Result && N-1 == Str.size() &&
(N == 1 || std::memcmp(S, Str.data(), N-1) == 0)) {
Result = &Value;
StringSwitch &Case(StringLiteral S, T Value) {
if (!Result && Str == S) {
Result = std::move(Value);
}
return *this;
}
template<unsigned N>
LLVM_ATTRIBUTE_ALWAYS_INLINE
StringSwitch& EndsWith(const char (&S)[N], const T &Value) {
assert(N);
if (!Result && Str.size() >= N-1 &&
(N == 1 || std::memcmp(S, Str.data() + Str.size() + 1 - N, N-1) == 0)) {
Result = &Value;
StringSwitch& EndsWith(StringLiteral S, T Value) {
if (!Result && Str.endswith(S)) {
Result = std::move(Value);
}
return *this;
}
template<unsigned N>
LLVM_ATTRIBUTE_ALWAYS_INLINE
StringSwitch& StartsWith(const char (&S)[N], const T &Value) {
assert(N);
if (!Result && Str.size() >= N-1 &&
(N == 1 || std::memcmp(S, Str.data(), N-1) == 0)) {
Result = &Value;
StringSwitch& StartsWith(StringLiteral S, T Value) {
if (!Result && Str.startswith(S)) {
Result = std::move(Value);
}
return *this;
}
template<unsigned N0, unsigned N1>
LLVM_ATTRIBUTE_ALWAYS_INLINE
StringSwitch &Cases(const char (&S0)[N0], const char (&S1)[N1],
const T& Value) {
StringSwitch &Cases(StringLiteral S0, StringLiteral S1, T Value) {
return Case(S0, Value).Case(S1, Value);
}
template<unsigned N0, unsigned N1, unsigned N2>
LLVM_ATTRIBUTE_ALWAYS_INLINE
StringSwitch &Cases(const char (&S0)[N0], const char (&S1)[N1],
const char (&S2)[N2], const T& Value) {
StringSwitch &Cases(StringLiteral S0, StringLiteral S1, StringLiteral S2,
T Value) {
return Case(S0, Value).Cases(S1, S2, Value);
}
template<unsigned N0, unsigned N1, unsigned N2, unsigned N3>
LLVM_ATTRIBUTE_ALWAYS_INLINE
StringSwitch &Cases(const char (&S0)[N0], const char (&S1)[N1],
const char (&S2)[N2], const char (&S3)[N3],
const T& Value) {
StringSwitch &Cases(StringLiteral S0, StringLiteral S1, StringLiteral S2,
StringLiteral S3, T Value) {
return Case(S0, Value).Cases(S1, S2, S3, Value);
}
template<unsigned N0, unsigned N1, unsigned N2, unsigned N3, unsigned N4>
LLVM_ATTRIBUTE_ALWAYS_INLINE
StringSwitch &Cases(const char (&S0)[N0], const char (&S1)[N1],
const char (&S2)[N2], const char (&S3)[N3],
const char (&S4)[N4], const T& Value) {
StringSwitch &Cases(StringLiteral S0, StringLiteral S1, StringLiteral S2,
StringLiteral S3, StringLiteral S4, T Value) {
return Case(S0, Value).Cases(S1, S2, S3, S4, Value);
}
template <unsigned N0, unsigned N1, unsigned N2, unsigned N3, unsigned N4,
unsigned N5>
LLVM_ATTRIBUTE_ALWAYS_INLINE
StringSwitch &Cases(const char (&S0)[N0], const char (&S1)[N1],
const char (&S2)[N2], const char (&S3)[N3],
const char (&S4)[N4], const char (&S5)[N5],
const T &Value) {
StringSwitch &Cases(StringLiteral S0, StringLiteral S1, StringLiteral S2,
StringLiteral S3, StringLiteral S4, StringLiteral S5,
T Value) {
return Case(S0, Value).Cases(S1, S2, S3, S4, S5, Value);
}
template <unsigned N0, unsigned N1, unsigned N2, unsigned N3, unsigned N4,
unsigned N5, unsigned N6>
LLVM_ATTRIBUTE_ALWAYS_INLINE
StringSwitch &Cases(const char (&S0)[N0], const char (&S1)[N1],
const char (&S2)[N2], const char (&S3)[N3],
const char (&S4)[N4], const char (&S5)[N5],
const char (&S6)[N6], const T &Value) {
StringSwitch &Cases(StringLiteral S0, StringLiteral S1, StringLiteral S2,
StringLiteral S3, StringLiteral S4, StringLiteral S5,
StringLiteral S6, T Value) {
return Case(S0, Value).Cases(S1, S2, S3, S4, S5, S6, Value);
}
template <unsigned N0, unsigned N1, unsigned N2, unsigned N3, unsigned N4,
unsigned N5, unsigned N6, unsigned N7>
LLVM_ATTRIBUTE_ALWAYS_INLINE
StringSwitch &Cases(const char (&S0)[N0], const char (&S1)[N1],
const char (&S2)[N2], const char (&S3)[N3],
const char (&S4)[N4], const char (&S5)[N5],
const char (&S6)[N6], const char (&S7)[N7],
const T &Value) {
StringSwitch &Cases(StringLiteral S0, StringLiteral S1, StringLiteral S2,
StringLiteral S3, StringLiteral S4, StringLiteral S5,
StringLiteral S6, StringLiteral S7, T Value) {
return Case(S0, Value).Cases(S1, S2, S3, S4, S5, S6, S7, Value);
}
template <unsigned N0, unsigned N1, unsigned N2, unsigned N3, unsigned N4,
unsigned N5, unsigned N6, unsigned N7, unsigned N8>
LLVM_ATTRIBUTE_ALWAYS_INLINE
StringSwitch &Cases(const char (&S0)[N0], const char (&S1)[N1],
const char (&S2)[N2], const char (&S3)[N3],
const char (&S4)[N4], const char (&S5)[N5],
const char (&S6)[N6], const char (&S7)[N7],
const char (&S8)[N8], const T &Value) {
StringSwitch &Cases(StringLiteral S0, StringLiteral S1, StringLiteral S2,
StringLiteral S3, StringLiteral S4, StringLiteral S5,
StringLiteral S6, StringLiteral S7, StringLiteral S8,
T Value) {
return Case(S0, Value).Cases(S1, S2, S3, S4, S5, S6, S7, S8, Value);
}
template <unsigned N0, unsigned N1, unsigned N2, unsigned N3, unsigned N4,
unsigned N5, unsigned N6, unsigned N7, unsigned N8, unsigned N9>
LLVM_ATTRIBUTE_ALWAYS_INLINE
StringSwitch &Cases(const char (&S0)[N0], const char (&S1)[N1],
const char (&S2)[N2], const char (&S3)[N3],
const char (&S4)[N4], const char (&S5)[N5],
const char (&S6)[N6], const char (&S7)[N7],
const char (&S8)[N8], const char (&S9)[N9],
const T &Value) {
StringSwitch &Cases(StringLiteral S0, StringLiteral S1, StringLiteral S2,
StringLiteral S3, StringLiteral S4, StringLiteral S5,
StringLiteral S6, StringLiteral S7, StringLiteral S8,
StringLiteral S9, T Value) {
return Case(S0, Value).Cases(S1, S2, S3, S4, S5, S6, S7, S8, S9, Value);
}
// Case-insensitive case matchers.
template <unsigned N>
LLVM_ATTRIBUTE_ALWAYS_INLINE StringSwitch &CaseLower(const char (&S)[N],
const T &Value) {
if (!Result && Str.equals_lower(StringRef(S, N - 1)))
Result = &Value;
LLVM_ATTRIBUTE_ALWAYS_INLINE
StringSwitch &CaseLower(StringLiteral S, T Value) {
if (!Result && Str.equals_lower(S))
Result = std::move(Value);
return *this;
}
template <unsigned N>
LLVM_ATTRIBUTE_ALWAYS_INLINE StringSwitch &EndsWithLower(const char (&S)[N],
const T &Value) {
if (!Result && Str.endswith_lower(StringRef(S, N - 1)))
Result = &Value;
LLVM_ATTRIBUTE_ALWAYS_INLINE
StringSwitch &EndsWithLower(StringLiteral S, T Value) {
if (!Result && Str.endswith_lower(S))
Result = Value;
return *this;
}
template <unsigned N>
LLVM_ATTRIBUTE_ALWAYS_INLINE StringSwitch &StartsWithLower(const char (&S)[N],
const T &Value) {
if (!Result && Str.startswith_lower(StringRef(S, N - 1)))
Result = &Value;
LLVM_ATTRIBUTE_ALWAYS_INLINE
StringSwitch &StartsWithLower(StringLiteral S, T Value) {
if (!Result && Str.startswith_lower(S))
Result = std::move(Value);
return *this;
}
template <unsigned N0, unsigned N1>
LLVM_ATTRIBUTE_ALWAYS_INLINE StringSwitch &
CasesLower(const char (&S0)[N0], const char (&S1)[N1], const T &Value) {
LLVM_ATTRIBUTE_ALWAYS_INLINE
StringSwitch &CasesLower(StringLiteral S0, StringLiteral S1, T Value) {
return CaseLower(S0, Value).CaseLower(S1, Value);
}
template <unsigned N0, unsigned N1, unsigned N2>
LLVM_ATTRIBUTE_ALWAYS_INLINE StringSwitch &
CasesLower(const char (&S0)[N0], const char (&S1)[N1], const char (&S2)[N2],
const T &Value) {
LLVM_ATTRIBUTE_ALWAYS_INLINE
StringSwitch &CasesLower(StringLiteral S0, StringLiteral S1, StringLiteral S2,
T Value) {
return CaseLower(S0, Value).CasesLower(S1, S2, Value);
}
template <unsigned N0, unsigned N1, unsigned N2, unsigned N3>
LLVM_ATTRIBUTE_ALWAYS_INLINE StringSwitch &
CasesLower(const char (&S0)[N0], const char (&S1)[N1], const char (&S2)[N2],
const char (&S3)[N3], const T &Value) {
LLVM_ATTRIBUTE_ALWAYS_INLINE
StringSwitch &CasesLower(StringLiteral S0, StringLiteral S1, StringLiteral S2,
StringLiteral S3, T Value) {
return CaseLower(S0, Value).CasesLower(S1, S2, S3, Value);
}
template <unsigned N0, unsigned N1, unsigned N2, unsigned N3, unsigned N4>
LLVM_ATTRIBUTE_ALWAYS_INLINE StringSwitch &
CasesLower(const char (&S0)[N0], const char (&S1)[N1], const char (&S2)[N2],
const char (&S3)[N3], const char (&S4)[N4], const T &Value) {
LLVM_ATTRIBUTE_ALWAYS_INLINE
StringSwitch &CasesLower(StringLiteral S0, StringLiteral S1, StringLiteral S2,
StringLiteral S3, StringLiteral S4, T Value) {
return CaseLower(S0, Value).CasesLower(S1, S2, S3, S4, Value);
}
LLVM_NODISCARD
LLVM_ATTRIBUTE_ALWAYS_INLINE
R Default(const T &Value) const {
R Default(T Value) {
if (Result)
return *Result;
return std::move(*Result);
return Value;
}
LLVM_NODISCARD
LLVM_ATTRIBUTE_ALWAYS_INLINE
operator R() const {
operator R() {
assert(Result && "Fell off the end of a string-switch");
return *Result;
return std::move(*Result);
}
};

View File

@ -108,6 +108,12 @@ public:
return *this;
}
TinyPtrVector(std::initializer_list<EltTy> IL)
: Val(IL.size() == 0
? PtrUnion()
: IL.size() == 1 ? PtrUnion(*IL.begin())
: PtrUnion(new VecTy(IL.begin(), IL.end()))) {}
/// Constructor from an ArrayRef.
///
/// This also is a constructor for individual array elements due to the single

View File

@ -101,6 +101,7 @@ public:
enum SubArchType {
NoSubArch,
ARMSubArch_v8_4a,
ARMSubArch_v8_3a,
ARMSubArch_v8_2a,
ARMSubArch_v8_1a,
@ -144,7 +145,8 @@ public:
AMD,
Mesa,
SUSE,
LastVendorType = SUSE
OpenEmbedded,
LastVendorType = OpenEmbedded
};
enum OSType {
UnknownOS,
@ -202,9 +204,7 @@ public:
MSVC,
Itanium,
Cygnus,
AMDOpenCL,
CoreCLR,
OpenCL,
Simulator, // Simulator variants of other systems, e.g., Apple's iOS
LastEnvironmentType = Simulator
};
@ -660,9 +660,29 @@ public:
return getArch() == Triple::aarch64 || getArch() == Triple::aarch64_be;
}
/// Tests wether the target supports comdat
/// Tests whether the target is MIPS 32-bit (little and big endian).
bool isMIPS32() const {
return getArch() == Triple::mips || getArch() == Triple::mipsel;
}
/// Tests whether the target is MIPS 64-bit (little and big endian).
bool isMIPS64() const {
return getArch() == Triple::mips64 || getArch() == Triple::mips64el;
}
/// Tests whether the target is MIPS (little and big endian, 32- or 64-bit).
bool isMIPS() const {
return isMIPS32() || isMIPS64();
}
/// Tests whether the target supports comdat
bool supportsCOMDAT() const {
return !isOSBinFormatMachO() && !isOSBinFormatWasm();
return !isOSBinFormatMachO();
}
/// Tests whether the target uses emulated TLS as default.
bool hasDefaultEmulatedTLS() const {
return isAndroid() || isOSOpenBSD() || isWindowsCygwinEnvironment();
}
/// @}

View File

@ -72,16 +72,16 @@ public:
return Vector[ID - 1];
}
/// \brief Return an iterator to the start of the vector.
/// Return an iterator to the start of the vector.
iterator begin() { return Vector.begin(); }
/// \brief Return an iterator to the start of the vector.
/// Return an iterator to the start of the vector.
const_iterator begin() const { return Vector.begin(); }
/// \brief Return an iterator to the end of the vector.
/// Return an iterator to the end of the vector.
iterator end() { return Vector.end(); }
/// \brief Return an iterator to the end of the vector.
/// Return an iterator to the end of the vector.
const_iterator end() const { return Vector.end(); }
/// size - Returns the number of entries in the vector.

View File

@ -53,7 +53,7 @@ namespace llvm {
#define LLVM_COMMA_JOIN31(x) LLVM_COMMA_JOIN30(x), x ## 30
#define LLVM_COMMA_JOIN32(x) LLVM_COMMA_JOIN31(x), x ## 31
/// \brief Class which can simulate a type-safe variadic function.
/// Class which can simulate a type-safe variadic function.
///
/// The VariadicFunction class template makes it easy to define
/// type-safe variadic functions where all arguments have the same

View File

@ -22,7 +22,7 @@
namespace llvm {
/// \brief Determine the edit distance between two sequences.
/// Determine the edit distance between two sequences.
///
/// \param FromArray the first sequence to compare.
///

View File

@ -84,21 +84,11 @@ template <typename NodeTy>
struct ilist_node_traits : ilist_alloc_traits<NodeTy>,
ilist_callback_traits<NodeTy> {};
/// Default template traits for intrusive list.
///
/// By inheriting from this, you can easily use default implementations for all
/// common operations.
///
/// TODO: Remove this customization point. Specializing ilist_traits is
/// already fully general.
template <typename NodeTy>
struct ilist_default_traits : public ilist_node_traits<NodeTy> {};
/// Template traits for intrusive list.
///
/// Customize callbacks and allocation semantics.
template <typename NodeTy>
struct ilist_traits : public ilist_default_traits<NodeTy> {};
struct ilist_traits : public ilist_node_traits<NodeTy> {};
/// Const traits should never be instantiated.
template <typename Ty> struct ilist_traits<const Ty> {};
@ -178,9 +168,6 @@ template <class IntrusiveListT, class TraitsT>
class iplist_impl : public TraitsT, IntrusiveListT {
typedef IntrusiveListT base_list_type;
protected:
typedef iplist_impl iplist_impl_type;
public:
typedef typename base_list_type::pointer pointer;
typedef typename base_list_type::const_pointer const_pointer;
@ -369,26 +356,26 @@ public:
using base_list_type::sort;
/// \brief Get the previous node, or \c nullptr for the list head.
/// Get the previous node, or \c nullptr for the list head.
pointer getPrevNode(reference N) const {
auto I = N.getIterator();
if (I == begin())
return nullptr;
return &*std::prev(I);
}
/// \brief Get the previous node, or \c nullptr for the list head.
/// Get the previous node, or \c nullptr for the list head.
const_pointer getPrevNode(const_reference N) const {
return getPrevNode(const_cast<reference >(N));
}
/// \brief Get the next node, or \c nullptr for the list tail.
/// Get the next node, or \c nullptr for the list tail.
pointer getNextNode(reference N) const {
auto Next = std::next(N.getIterator());
if (Next == end())
return nullptr;
return &*Next;
}
/// \brief Get the next node, or \c nullptr for the list tail.
/// Get the next node, or \c nullptr for the list tail.
const_pointer getNextNode(const_reference N) const {
return getNextNode(const_cast<reference >(N));
}
@ -402,7 +389,7 @@ public:
template <class T, class... Options>
class iplist
: public iplist_impl<simple_ilist<T, Options...>, ilist_traits<T>> {
typedef typename iplist::iplist_impl_type iplist_impl_type;
using iplist_impl_type = typename iplist::iplist_impl;
public:
iplist() = default;

View File

@ -271,7 +271,7 @@ private:
public:
/// @name Adjacent Node Accessors
/// @{
/// \brief Get the previous node, or \c nullptr for the list head.
/// Get the previous node, or \c nullptr for the list head.
NodeTy *getPrevNode() {
// Should be separated to a reused function, but then we couldn't use auto
// (and would need the type of the list).
@ -280,12 +280,12 @@ public:
return List.getPrevNode(*static_cast<NodeTy *>(this));
}
/// \brief Get the previous node, or \c nullptr for the list head.
/// Get the previous node, or \c nullptr for the list head.
const NodeTy *getPrevNode() const {
return const_cast<ilist_node_with_parent *>(this)->getPrevNode();
}
/// \brief Get the next node, or \c nullptr for the list tail.
/// Get the next node, or \c nullptr for the list tail.
NodeTy *getNextNode() {
// Should be separated to a reused function, but then we couldn't use auto
// (and would need the type of the list).
@ -294,7 +294,7 @@ public:
return List.getNextNode(*static_cast<NodeTy *>(this));
}
/// \brief Get the next node, or \c nullptr for the list tail.
/// Get the next node, or \c nullptr for the list tail.
const NodeTy *getNextNode() const {
return const_cast<ilist_node_with_parent *>(this)->getNextNode();
}

View File

@ -11,7 +11,6 @@
#define LLVM_ADT_ILIST_NODE_OPTIONS_H
#include "llvm/Config/abi-breaking.h"
#include "llvm/Config/llvm-config.h"
#include <type_traits>

View File

@ -19,7 +19,7 @@
namespace llvm {
/// \brief CRTP base class which implements the entire standard iterator facade
/// CRTP base class which implements the entire standard iterator facade
/// in terms of a minimal subset of the interface.
///
/// Use this when it is reasonable to implement most of the iterator
@ -183,7 +183,7 @@ public:
}
};
/// \brief CRTP base class for adapting an iterator to a different type.
/// CRTP base class for adapting an iterator to a different type.
///
/// This class can be used through CRTP to adapt one iterator into another.
/// Typically this is done through providing in the derived class a custom \c
@ -274,7 +274,7 @@ public:
ReferenceT operator*() const { return *I; }
};
/// \brief An iterator type that allows iterating over the pointees via some
/// An iterator type that allows iterating over the pointees via some
/// other iterator.
///
/// The typical usage of this is to expose a type that iterates over Ts, but
@ -288,7 +288,7 @@ template <typename WrappedIteratorT,
decltype(**std::declval<WrappedIteratorT>())>::type>
struct pointee_iterator
: iterator_adaptor_base<
pointee_iterator<WrappedIteratorT>, WrappedIteratorT,
pointee_iterator<WrappedIteratorT, T>, WrappedIteratorT,
typename std::iterator_traits<WrappedIteratorT>::iterator_category,
T> {
pointee_iterator() = default;
@ -311,7 +311,7 @@ make_pointee_range(RangeT &&Range) {
template <typename WrappedIteratorT,
typename T = decltype(&*std::declval<WrappedIteratorT>())>
class pointer_iterator
: public iterator_adaptor_base<pointer_iterator<WrappedIteratorT>,
: public iterator_adaptor_base<pointer_iterator<WrappedIteratorT, T>,
WrappedIteratorT, T> {
mutable T Ptr;

View File

@ -24,7 +24,7 @@
namespace llvm {
/// \brief A range adaptor for a pair of iterators.
/// A range adaptor for a pair of iterators.
///
/// This just wraps two iterators into a range-compatible interface. Nothing
/// fancy at all.
@ -47,7 +47,7 @@ public:
IteratorT end() const { return end_iterator; }
};
/// \brief Convenience function for iterating over sub-ranges.
/// Convenience function for iterating over sub-ranges.
///
/// This provides a bit of syntactic sugar to make using sub-ranges
/// in for loops a bit easier. Analogous to std::make_pair().
@ -59,9 +59,10 @@ template <typename T> iterator_range<T> make_range(std::pair<T, T> p) {
return iterator_range<T>(std::move(p.first), std::move(p.second));
}
template<typename T>
iterator_range<decltype(begin(std::declval<T>()))> drop_begin(T &&t, int n) {
return make_range(std::next(begin(t), n), end(t));
template <typename T>
iterator_range<decltype(adl_begin(std::declval<T>()))> drop_begin(T &&t,
int n) {
return make_range(std::next(adl_begin(t), n), adl_end(t));
}
}

View File

@ -76,7 +76,7 @@ class Value;
///
/// See docs/AliasAnalysis.html for more information on the specific meanings
/// of these values.
enum AliasResult {
enum AliasResult : uint8_t {
/// The two locations do not alias at all.
///
/// This value is arranged to convert to false, while all other values
@ -91,13 +91,16 @@ enum AliasResult {
MustAlias,
};
/// << operator for AliasResult.
raw_ostream &operator<<(raw_ostream &OS, AliasResult AR);
/// Flags indicating whether a memory access modifies or references memory.
///
/// This is no access at all, a modification, a reference, or both
/// a modification and a reference. These are specifically structured such that
/// they form a three bit matrix and bit-tests for 'mod' or 'ref' or 'must'
/// work with any of the possible values.
enum class ModRefInfo {
enum class ModRefInfo : uint8_t {
/// Must is provided for completeness, but no routines will return only
/// Must today. See definition of Must below.
Must = 0,
@ -325,8 +328,8 @@ public:
AliasResult alias(const MemoryLocation &LocA, const MemoryLocation &LocB);
/// A convenience wrapper around the primary \c alias interface.
AliasResult alias(const Value *V1, uint64_t V1Size, const Value *V2,
uint64_t V2Size) {
AliasResult alias(const Value *V1, LocationSize V1Size, const Value *V2,
LocationSize V2Size) {
return alias(MemoryLocation(V1, V1Size), MemoryLocation(V2, V2Size));
}
@ -343,8 +346,8 @@ public:
}
/// A convenience wrapper around the \c isNoAlias helper interface.
bool isNoAlias(const Value *V1, uint64_t V1Size, const Value *V2,
uint64_t V2Size) {
bool isNoAlias(const Value *V1, LocationSize V1Size, const Value *V2,
LocationSize V2Size) {
return isNoAlias(MemoryLocation(V1, V1Size), MemoryLocation(V2, V2Size));
}
@ -501,7 +504,7 @@ public:
/// getModRefInfo (for call sites) - A convenience wrapper.
ModRefInfo getModRefInfo(ImmutableCallSite CS, const Value *P,
uint64_t Size) {
LocationSize Size) {
return getModRefInfo(CS, MemoryLocation(P, Size));
}
@ -512,7 +515,8 @@ public:
}
/// getModRefInfo (for calls) - A convenience wrapper.
ModRefInfo getModRefInfo(const CallInst *C, const Value *P, uint64_t Size) {
ModRefInfo getModRefInfo(const CallInst *C, const Value *P,
LocationSize Size) {
return getModRefInfo(C, MemoryLocation(P, Size));
}
@ -523,7 +527,8 @@ public:
}
/// getModRefInfo (for invokes) - A convenience wrapper.
ModRefInfo getModRefInfo(const InvokeInst *I, const Value *P, uint64_t Size) {
ModRefInfo getModRefInfo(const InvokeInst *I, const Value *P,
LocationSize Size) {
return getModRefInfo(I, MemoryLocation(P, Size));
}
@ -532,7 +537,8 @@ public:
ModRefInfo getModRefInfo(const LoadInst *L, const MemoryLocation &Loc);
/// getModRefInfo (for loads) - A convenience wrapper.
ModRefInfo getModRefInfo(const LoadInst *L, const Value *P, uint64_t Size) {
ModRefInfo getModRefInfo(const LoadInst *L, const Value *P,
LocationSize Size) {
return getModRefInfo(L, MemoryLocation(P, Size));
}
@ -541,7 +547,8 @@ public:
ModRefInfo getModRefInfo(const StoreInst *S, const MemoryLocation &Loc);
/// getModRefInfo (for stores) - A convenience wrapper.
ModRefInfo getModRefInfo(const StoreInst *S, const Value *P, uint64_t Size) {
ModRefInfo getModRefInfo(const StoreInst *S, const Value *P,
LocationSize Size) {
return getModRefInfo(S, MemoryLocation(P, Size));
}
@ -550,7 +557,8 @@ public:
ModRefInfo getModRefInfo(const FenceInst *S, const MemoryLocation &Loc);
/// getModRefInfo (for fences) - A convenience wrapper.
ModRefInfo getModRefInfo(const FenceInst *S, const Value *P, uint64_t Size) {
ModRefInfo getModRefInfo(const FenceInst *S, const Value *P,
LocationSize Size) {
return getModRefInfo(S, MemoryLocation(P, Size));
}
@ -580,7 +588,8 @@ public:
ModRefInfo getModRefInfo(const VAArgInst *I, const MemoryLocation &Loc);
/// getModRefInfo (for va_args) - A convenience wrapper.
ModRefInfo getModRefInfo(const VAArgInst *I, const Value *P, uint64_t Size) {
ModRefInfo getModRefInfo(const VAArgInst *I, const Value *P,
LocationSize Size) {
return getModRefInfo(I, MemoryLocation(P, Size));
}
@ -590,7 +599,7 @@ public:
/// getModRefInfo (for catchpads) - A convenience wrapper.
ModRefInfo getModRefInfo(const CatchPadInst *I, const Value *P,
uint64_t Size) {
LocationSize Size) {
return getModRefInfo(I, MemoryLocation(P, Size));
}
@ -600,7 +609,7 @@ public:
/// getModRefInfo (for catchrets) - A convenience wrapper.
ModRefInfo getModRefInfo(const CatchReturnInst *I, const Value *P,
uint64_t Size) {
LocationSize Size) {
return getModRefInfo(I, MemoryLocation(P, Size));
}
@ -646,7 +655,7 @@ public:
/// A convenience wrapper for constructing the memory location.
ModRefInfo getModRefInfo(const Instruction *I, const Value *P,
uint64_t Size) {
LocationSize Size) {
return getModRefInfo(I, MemoryLocation(P, Size));
}
@ -659,7 +668,7 @@ public:
/// http://llvm.org/docs/AliasAnalysis.html#ModRefInfo
ModRefInfo getModRefInfo(ImmutableCallSite CS1, ImmutableCallSite CS2);
/// \brief Return information about whether a particular call site modifies
/// Return information about whether a particular call site modifies
/// or reads the specified memory location \p MemLoc before instruction \p I
/// in a BasicBlock. An ordered basic block \p OBB can be used to speed up
/// instruction ordering queries inside the BasicBlock containing \p I.
@ -669,9 +678,9 @@ public:
const MemoryLocation &MemLoc, DominatorTree *DT,
OrderedBasicBlock *OBB = nullptr);
/// \brief A convenience wrapper to synthesize a memory location.
/// A convenience wrapper to synthesize a memory location.
ModRefInfo callCapturesBefore(const Instruction *I, const Value *P,
uint64_t Size, DominatorTree *DT,
LocationSize Size, DominatorTree *DT,
OrderedBasicBlock *OBB = nullptr) {
return callCapturesBefore(I, MemoryLocation(P, Size), DT, OBB);
}
@ -687,7 +696,7 @@ public:
/// A convenience wrapper synthesizing a memory location.
bool canBasicBlockModify(const BasicBlock &BB, const Value *P,
uint64_t Size) {
LocationSize Size) {
return canBasicBlockModify(BB, MemoryLocation(P, Size));
}
@ -702,7 +711,7 @@ public:
/// A convenience wrapper synthesizing a memory location.
bool canInstructionRangeModRef(const Instruction &I1, const Instruction &I2,
const Value *Ptr, uint64_t Size,
const Value *Ptr, LocationSize Size,
const ModRefInfo Mode) {
return canInstructionRangeModRef(I1, I2, MemoryLocation(Ptr, Size), Mode);
}

View File

@ -56,7 +56,7 @@ public:
}
~AAEvaluator();
/// \brief Run the pass over the function.
/// Run the pass over the function.
PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
private:

View File

@ -37,8 +37,8 @@ namespace llvm {
class AliasSetTracker;
class BasicBlock;
class LoadInst;
class MemSetInst;
class MemTransferInst;
class AnyMemSetInst;
class AnyMemTransferInst;
class raw_ostream;
class StoreInst;
class VAArgInst;
@ -52,7 +52,7 @@ class AliasSet : public ilist_node<AliasSet> {
PointerRec **PrevInList = nullptr;
PointerRec *NextInList = nullptr;
AliasSet *AS = nullptr;
uint64_t Size = 0;
LocationSize Size = 0;
AAMDNodes AAInfo;
public:
@ -69,7 +69,7 @@ class AliasSet : public ilist_node<AliasSet> {
return &NextInList;
}
bool updateSizeAndAAInfo(uint64_t NewSize, const AAMDNodes &NewAAInfo) {
bool updateSizeAndAAInfo(LocationSize NewSize, const AAMDNodes &NewAAInfo) {
bool SizeChanged = false;
if (NewSize > Size) {
Size = NewSize;
@ -91,7 +91,7 @@ class AliasSet : public ilist_node<AliasSet> {
return SizeChanged;
}
uint64_t getSize() const { return Size; }
LocationSize getSize() const { return Size; }
/// Return the AAInfo, or null if there is no information or conflicting
/// information.
@ -247,7 +247,7 @@ public:
value_type *operator->() const { return &operator*(); }
Value *getPointer() const { return CurNode->getValue(); }
uint64_t getSize() const { return CurNode->getSize(); }
LocationSize getSize() const { return CurNode->getSize(); }
AAMDNodes getAAInfo() const { return CurNode->getAAInfo(); }
iterator& operator++() { // Preincrement
@ -287,9 +287,8 @@ private:
void removeFromTracker(AliasSetTracker &AST);
void addPointer(AliasSetTracker &AST, PointerRec &Entry, uint64_t Size,
const AAMDNodes &AAInfo,
bool KnownMustAlias = false);
void addPointer(AliasSetTracker &AST, PointerRec &Entry, LocationSize Size,
const AAMDNodes &AAInfo, bool KnownMustAlias = false);
void addUnknownInst(Instruction *I, AliasAnalysis &AA);
void removeUnknownInst(AliasSetTracker &AST, Instruction *I) {
@ -309,8 +308,8 @@ private:
public:
/// Return true if the specified pointer "may" (or must) alias one of the
/// members in the set.
bool aliasesPointer(const Value *Ptr, uint64_t Size, const AAMDNodes &AAInfo,
AliasAnalysis &AA) const;
bool aliasesPointer(const Value *Ptr, LocationSize Size,
const AAMDNodes &AAInfo, AliasAnalysis &AA) const;
bool aliasesUnknownInst(const Instruction *Inst, AliasAnalysis &AA) const;
};
@ -364,12 +363,12 @@ public:
/// These methods return true if inserting the instruction resulted in the
/// addition of a new alias set (i.e., the pointer did not alias anything).
///
void add(Value *Ptr, uint64_t Size, const AAMDNodes &AAInfo); // Add a loc.
void add(Value *Ptr, LocationSize Size, const AAMDNodes &AAInfo); // Add a loc
void add(LoadInst *LI);
void add(StoreInst *SI);
void add(VAArgInst *VAAI);
void add(MemSetInst *MSI);
void add(MemTransferInst *MTI);
void add(AnyMemSetInst *MSI);
void add(AnyMemTransferInst *MTI);
void add(Instruction *I); // Dispatch to one of the other add methods...
void add(BasicBlock &BB); // Add all instructions in basic block
void add(const AliasSetTracker &AST); // Add alias relations from another AST
@ -384,12 +383,12 @@ public:
/// argument is non-null, this method sets the value to true if a new alias
/// set is created to contain the pointer (because the pointer didn't alias
/// anything).
AliasSet &getAliasSetForPointer(Value *P, uint64_t Size,
AliasSet &getAliasSetForPointer(Value *P, LocationSize Size,
const AAMDNodes &AAInfo);
/// Return the alias set containing the location specified if one exists,
/// otherwise return null.
AliasSet *getAliasSetForPointerIfExists(const Value *P, uint64_t Size,
AliasSet *getAliasSetForPointerIfExists(const Value *P, LocationSize Size,
const AAMDNodes &AAInfo) {
return mergeAliasSetsForPointer(P, Size, AAInfo);
}
@ -446,9 +445,9 @@ private:
return *Entry;
}
AliasSet &addPointer(Value *P, uint64_t Size, const AAMDNodes &AAInfo,
AliasSet &addPointer(Value *P, LocationSize Size, const AAMDNodes &AAInfo,
AliasSet::AccessLattice E);
AliasSet *mergeAliasSetsForPointer(const Value *Ptr, uint64_t Size,
AliasSet *mergeAliasSetsForPointer(const Value *Ptr, LocationSize Size,
const AAMDNodes &AAInfo);
/// Merge all alias sets into a single set that is considered to alias any

View File

@ -32,20 +32,20 @@ class Function;
class raw_ostream;
class Value;
/// \brief A cache of @llvm.assume calls within a function.
/// A cache of \@llvm.assume calls within a function.
///
/// This cache provides fast lookup of assumptions within a function by caching
/// them and amortizing the cost of scanning for them across all queries. Passes
/// that create new assumptions are required to call registerAssumption() to
/// register any new @llvm.assume calls that they create. Deletions of
/// @llvm.assume calls do not require special handling.
/// register any new \@llvm.assume calls that they create. Deletions of
/// \@llvm.assume calls do not require special handling.
class AssumptionCache {
/// \brief The function for which this cache is handling assumptions.
/// The function for which this cache is handling assumptions.
///
/// We track this to lazily populate our assumptions.
Function &F;
/// \brief Vector of weak value handles to calls of the @llvm.assume
/// Vector of weak value handles to calls of the \@llvm.assume
/// intrinsic.
SmallVector<WeakTrackingVH, 4> AssumeHandles;
@ -64,7 +64,7 @@ class AssumptionCache {
friend AffectedValueCallbackVH;
/// \brief A map of values about which an assumption might be providing
/// A map of values about which an assumption might be providing
/// information to the relevant set of assumptions.
using AffectedValuesMap =
DenseMap<AffectedValueCallbackVH, SmallVector<WeakTrackingVH, 1>,
@ -77,17 +77,17 @@ class AssumptionCache {
/// Copy affected values in the cache for OV to be affected values for NV.
void copyAffectedValuesInCache(Value *OV, Value *NV);
/// \brief Flag tracking whether we have scanned the function yet.
/// Flag tracking whether we have scanned the function yet.
///
/// We want to be as lazy about this as possible, and so we scan the function
/// at the last moment.
bool Scanned = false;
/// \brief Scan the function for assumptions and add them to the cache.
/// Scan the function for assumptions and add them to the cache.
void scanFunction();
public:
/// \brief Construct an AssumptionCache from a function by scanning all of
/// Construct an AssumptionCache from a function by scanning all of
/// its instructions.
AssumptionCache(Function &F) : F(F) {}
@ -98,17 +98,17 @@ public:
return false;
}
/// \brief Add an @llvm.assume intrinsic to this function's cache.
/// Add an \@llvm.assume intrinsic to this function's cache.
///
/// The call passed in must be an instruction within this function and must
/// not already be in the cache.
void registerAssumption(CallInst *CI);
/// \brief Update the cache of values being affected by this assumption (i.e.
/// Update the cache of values being affected by this assumption (i.e.
/// the values about which this assumption provides information).
void updateAffectedValues(CallInst *CI);
/// \brief Clear the cache of @llvm.assume intrinsics for a function.
/// Clear the cache of \@llvm.assume intrinsics for a function.
///
/// It will be re-scanned the next time it is requested.
void clear() {
@ -117,7 +117,7 @@ public:
Scanned = false;
}
/// \brief Access the list of assumption handles currently tracked for this
/// Access the list of assumption handles currently tracked for this
/// function.
///
/// Note that these produce weak handles that may be null. The caller must
@ -131,7 +131,7 @@ public:
return AssumeHandles;
}
/// \brief Access the list of assumptions which affect this value.
/// Access the list of assumptions which affect this value.
MutableArrayRef<WeakTrackingVH> assumptionsFor(const Value *V) {
if (!Scanned)
scanFunction();
@ -144,7 +144,7 @@ public:
}
};
/// \brief A function analysis which provides an \c AssumptionCache.
/// A function analysis which provides an \c AssumptionCache.
///
/// This analysis is intended for use with the new pass manager and will vend
/// assumption caches for a given function.
@ -161,7 +161,7 @@ public:
}
};
/// \brief Printer pass for the \c AssumptionAnalysis results.
/// Printer pass for the \c AssumptionAnalysis results.
class AssumptionPrinterPass : public PassInfoMixin<AssumptionPrinterPass> {
raw_ostream &OS;
@ -171,7 +171,7 @@ public:
PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
};
/// \brief An immutable pass that tracks lazily created \c AssumptionCache
/// An immutable pass that tracks lazily created \c AssumptionCache
/// objects.
///
/// This is essentially a workaround for the legacy pass manager's weaknesses
@ -203,7 +203,7 @@ class AssumptionCacheTracker : public ImmutablePass {
FunctionCallsMap AssumptionCaches;
public:
/// \brief Get the cached assumptions for a function.
/// Get the cached assumptions for a function.
///
/// If no assumptions are cached, this will scan the function. Otherwise, the
/// existing cache will be returned.

View File

@ -55,26 +55,27 @@ class BasicAAResult : public AAResultBase<BasicAAResult> {
friend AAResultBase<BasicAAResult>;
const DataLayout &DL;
const Function &F;
const TargetLibraryInfo &TLI;
AssumptionCache &AC;
DominatorTree *DT;
LoopInfo *LI;
public:
BasicAAResult(const DataLayout &DL, const TargetLibraryInfo &TLI,
AssumptionCache &AC, DominatorTree *DT = nullptr,
LoopInfo *LI = nullptr)
: AAResultBase(), DL(DL), TLI(TLI), AC(AC), DT(DT), LI(LI) {}
BasicAAResult(const DataLayout &DL, const Function &F,
const TargetLibraryInfo &TLI, AssumptionCache &AC,
DominatorTree *DT = nullptr, LoopInfo *LI = nullptr)
: AAResultBase(), DL(DL), F(F), TLI(TLI), AC(AC), DT(DT), LI(LI) {}
BasicAAResult(const BasicAAResult &Arg)
: AAResultBase(Arg), DL(Arg.DL), TLI(Arg.TLI), AC(Arg.AC), DT(Arg.DT),
LI(Arg.LI) {}
BasicAAResult(BasicAAResult &&Arg)
: AAResultBase(std::move(Arg)), DL(Arg.DL), TLI(Arg.TLI), AC(Arg.AC),
: AAResultBase(Arg), DL(Arg.DL), F(Arg.F), TLI(Arg.TLI), AC(Arg.AC),
DT(Arg.DT), LI(Arg.LI) {}
BasicAAResult(BasicAAResult &&Arg)
: AAResultBase(std::move(Arg)), DL(Arg.DL), F(Arg.F), TLI(Arg.TLI),
AC(Arg.AC), DT(Arg.DT), LI(Arg.LI) {}
/// Handle invalidation events in the new pass manager.
bool invalidate(Function &F, const PreservedAnalyses &PA,
bool invalidate(Function &Fn, const PreservedAnalyses &PA,
FunctionAnalysisManager::Invalidator &Inv);
AliasResult alias(const MemoryLocation &LocA, const MemoryLocation &LocB);
@ -94,7 +95,7 @@ public:
/// Returns the behavior when calling the given function. For use when the
/// call site is not known.
FunctionModRefBehavior getModRefBehavior(const Function *F);
FunctionModRefBehavior getModRefBehavior(const Function *Fn);
private:
// A linear transformation of a Value; this class represents ZExt(SExt(V,
@ -171,9 +172,9 @@ private:
static bool isGEPBaseAtNegativeOffset(const GEPOperator *GEPOp,
const DecomposedGEP &DecompGEP, const DecomposedGEP &DecompObject,
uint64_t ObjectAccessSize);
LocationSize ObjectAccessSize);
/// \brief A Heuristic for aliasGEP that searches for a constant offset
/// A Heuristic for aliasGEP that searches for a constant offset
/// between the variables.
///
/// GetLinearExpression has some limitations, as generally zext(%x + 1)
@ -183,31 +184,33 @@ private:
/// the addition overflows.
bool
constantOffsetHeuristic(const SmallVectorImpl<VariableGEPIndex> &VarIndices,
uint64_t V1Size, uint64_t V2Size, int64_t BaseOffset,
AssumptionCache *AC, DominatorTree *DT);
LocationSize V1Size, LocationSize V2Size,
int64_t BaseOffset, AssumptionCache *AC,
DominatorTree *DT);
bool isValueEqualInPotentialCycles(const Value *V1, const Value *V2);
void GetIndexDifference(SmallVectorImpl<VariableGEPIndex> &Dest,
const SmallVectorImpl<VariableGEPIndex> &Src);
AliasResult aliasGEP(const GEPOperator *V1, uint64_t V1Size,
AliasResult aliasGEP(const GEPOperator *V1, LocationSize V1Size,
const AAMDNodes &V1AAInfo, const Value *V2,
uint64_t V2Size, const AAMDNodes &V2AAInfo,
LocationSize V2Size, const AAMDNodes &V2AAInfo,
const Value *UnderlyingV1, const Value *UnderlyingV2);
AliasResult aliasPHI(const PHINode *PN, uint64_t PNSize,
AliasResult aliasPHI(const PHINode *PN, LocationSize PNSize,
const AAMDNodes &PNAAInfo, const Value *V2,
uint64_t V2Size, const AAMDNodes &V2AAInfo,
LocationSize V2Size, const AAMDNodes &V2AAInfo,
const Value *UnderV2);
AliasResult aliasSelect(const SelectInst *SI, uint64_t SISize,
AliasResult aliasSelect(const SelectInst *SI, LocationSize SISize,
const AAMDNodes &SIAAInfo, const Value *V2,
uint64_t V2Size, const AAMDNodes &V2AAInfo,
LocationSize V2Size, const AAMDNodes &V2AAInfo,
const Value *UnderV2);
AliasResult aliasCheck(const Value *V1, uint64_t V1Size, AAMDNodes V1AATag,
const Value *V2, uint64_t V2Size, AAMDNodes V2AATag,
AliasResult aliasCheck(const Value *V1, LocationSize V1Size,
AAMDNodes V1AATag, const Value *V2,
LocationSize V2Size, AAMDNodes V2AATag,
const Value *O1 = nullptr, const Value *O2 = nullptr);
};

View File

@ -65,17 +65,17 @@ public:
/// floating points.
BlockFrequency getBlockFreq(const BasicBlock *BB) const;
/// \brief Returns the estimated profile count of \p BB.
/// Returns the estimated profile count of \p BB.
/// This computes the relative block frequency of \p BB and multiplies it by
/// the enclosing function's count (if available) and returns the value.
Optional<uint64_t> getBlockProfileCount(const BasicBlock *BB) const;
/// \brief Returns the estimated profile count of \p Freq.
/// Returns the estimated profile count of \p Freq.
/// This uses the frequency \p Freq and multiplies it by
/// the enclosing function's count (if available) and returns the value.
Optional<uint64_t> getProfileCountFromFreq(uint64_t Freq) const;
/// \brief Returns true if \p BB is an irreducible loop header
/// Returns true if \p BB is an irreducible loop header
/// block. Otherwise false.
bool isIrrLoopHeader(const BasicBlock *BB);
@ -105,7 +105,7 @@ public:
void print(raw_ostream &OS) const;
};
/// \brief Analysis pass which computes \c BlockFrequencyInfo.
/// Analysis pass which computes \c BlockFrequencyInfo.
class BlockFrequencyAnalysis
: public AnalysisInfoMixin<BlockFrequencyAnalysis> {
friend AnalysisInfoMixin<BlockFrequencyAnalysis>;
@ -113,14 +113,14 @@ class BlockFrequencyAnalysis
static AnalysisKey Key;
public:
/// \brief Provide the result type for this analysis pass.
/// Provide the result type for this analysis pass.
using Result = BlockFrequencyInfo;
/// \brief Run the analysis pass over a function and produce BFI.
/// Run the analysis pass over a function and produce BFI.
Result run(Function &F, FunctionAnalysisManager &AM);
};
/// \brief Printer pass for the \c BlockFrequencyInfo results.
/// Printer pass for the \c BlockFrequencyInfo results.
class BlockFrequencyPrinterPass
: public PassInfoMixin<BlockFrequencyPrinterPass> {
raw_ostream &OS;
@ -131,7 +131,7 @@ public:
PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
};
/// \brief Legacy analysis pass which computes \c BlockFrequencyInfo.
/// Legacy analysis pass which computes \c BlockFrequencyInfo.
class BlockFrequencyInfoWrapperPass : public FunctionPass {
BlockFrequencyInfo BFI;

View File

@ -66,7 +66,7 @@ struct IrreducibleGraph;
// This is part of a workaround for a GCC 4.7 crash on lambdas.
template <class BT> struct BlockEdgesAdder;
/// \brief Mass of a block.
/// Mass of a block.
///
/// This class implements a sort of fixed-point fraction always between 0.0 and
/// 1.0. getMass() == std::numeric_limits<uint64_t>::max() indicates a value of
@ -100,7 +100,7 @@ public:
bool operator!() const { return isEmpty(); }
/// \brief Add another mass.
/// Add another mass.
///
/// Adds another mass, saturating at \a isFull() rather than overflowing.
BlockMass &operator+=(BlockMass X) {
@ -109,7 +109,7 @@ public:
return *this;
}
/// \brief Subtract another mass.
/// Subtract another mass.
///
/// Subtracts another mass, saturating at \a isEmpty() rather than
/// undeflowing.
@ -131,7 +131,7 @@ public:
bool operator<(BlockMass X) const { return Mass < X.Mass; }
bool operator>(BlockMass X) const { return Mass > X.Mass; }
/// \brief Convert to scaled number.
/// Convert to scaled number.
///
/// Convert to \a ScaledNumber. \a isFull() gives 1.0, while \a isEmpty()
/// gives slightly above 0.0.
@ -164,7 +164,7 @@ template <> struct isPodLike<bfi_detail::BlockMass> {
static const bool value = true;
};
/// \brief Base class for BlockFrequencyInfoImpl
/// Base class for BlockFrequencyInfoImpl
///
/// BlockFrequencyInfoImplBase has supporting data structures and some
/// algorithms for BlockFrequencyInfoImplBase. Only algorithms that depend on
@ -177,7 +177,7 @@ public:
using Scaled64 = ScaledNumber<uint64_t>;
using BlockMass = bfi_detail::BlockMass;
/// \brief Representative of a block.
/// Representative of a block.
///
/// This is a simple wrapper around an index into the reverse-post-order
/// traversal of the blocks.
@ -206,13 +206,13 @@ public:
}
};
/// \brief Stats about a block itself.
/// Stats about a block itself.
struct FrequencyData {
Scaled64 Scaled;
uint64_t Integer;
};
/// \brief Data about a loop.
/// Data about a loop.
///
/// Contains the data necessary to represent a loop as a pseudo-node once it's
/// packaged.
@ -270,7 +270,7 @@ public:
}
};
/// \brief Index of loop information.
/// Index of loop information.
struct WorkingData {
BlockNode Node; ///< This node.
LoopData *Loop = nullptr; ///< The loop this block is inside.
@ -293,7 +293,7 @@ public:
return Loop->Parent->Parent;
}
/// \brief Resolve a node to its representative.
/// Resolve a node to its representative.
///
/// Get the node currently representing Node, which could be a containing
/// loop.
@ -320,7 +320,7 @@ public:
return L;
}
/// \brief Get the appropriate mass for a node.
/// Get the appropriate mass for a node.
///
/// Get appropriate mass for Node. If Node is a loop-header (whose loop
/// has been packaged), returns the mass of its pseudo-node. If it's a
@ -333,19 +333,19 @@ public:
return Loop->Parent->Mass;
}
/// \brief Has ContainingLoop been packaged up?
/// Has ContainingLoop been packaged up?
bool isPackaged() const { return getResolvedNode() != Node; }
/// \brief Has Loop been packaged up?
/// Has Loop been packaged up?
bool isAPackage() const { return isLoopHeader() && Loop->IsPackaged; }
/// \brief Has Loop been packaged up twice?
/// Has Loop been packaged up twice?
bool isADoublePackage() const {
return isDoubleLoopHeader() && Loop->Parent->IsPackaged;
}
};
/// \brief Unscaled probability weight.
/// Unscaled probability weight.
///
/// Probability weight for an edge in the graph (including the
/// successor/target node).
@ -369,7 +369,7 @@ public:
: Type(Type), TargetNode(TargetNode), Amount(Amount) {}
};
/// \brief Distribution of unscaled probability weight.
/// Distribution of unscaled probability weight.
///
/// Distribution of unscaled probability weight to a set of successors.
///
@ -398,7 +398,7 @@ public:
add(Node, Amount, Weight::Backedge);
}
/// \brief Normalize the distribution.
/// Normalize the distribution.
///
/// Combines multiple edges to the same \a Weight::TargetNode and scales
/// down so that \a Total fits into 32-bits.
@ -413,26 +413,26 @@ public:
void add(const BlockNode &Node, uint64_t Amount, Weight::DistType Type);
};
/// \brief Data about each block. This is used downstream.
/// Data about each block. This is used downstream.
std::vector<FrequencyData> Freqs;
/// \brief Whether each block is an irreducible loop header.
/// Whether each block is an irreducible loop header.
/// This is used downstream.
SparseBitVector<> IsIrrLoopHeader;
/// \brief Loop data: see initializeLoops().
/// Loop data: see initializeLoops().
std::vector<WorkingData> Working;
/// \brief Indexed information about loops.
/// Indexed information about loops.
std::list<LoopData> Loops;
/// \brief Virtual destructor.
/// Virtual destructor.
///
/// Need a virtual destructor to mask the compiler warning about
/// getBlockName().
virtual ~BlockFrequencyInfoImplBase() = default;
/// \brief Add all edges out of a packaged loop to the distribution.
/// Add all edges out of a packaged loop to the distribution.
///
/// Adds all edges from LocalLoopHead to Dist. Calls addToDist() to add each
/// successor edge.
@ -441,7 +441,7 @@ public:
bool addLoopSuccessorsToDist(const LoopData *OuterLoop, LoopData &Loop,
Distribution &Dist);
/// \brief Add an edge to the distribution.
/// Add an edge to the distribution.
///
/// Adds an edge to Succ to Dist. If \c LoopHead.isValid(), then whether the
/// edge is local/exit/backedge is in the context of LoopHead. Otherwise,
@ -457,7 +457,7 @@ public:
return *Working[Head.Index].Loop;
}
/// \brief Analyze irreducible SCCs.
/// Analyze irreducible SCCs.
///
/// Separate irreducible SCCs from \c G, which is an explict graph of \c
/// OuterLoop (or the top-level function, if \c OuterLoop is \c nullptr).
@ -468,7 +468,7 @@ public:
analyzeIrreducible(const bfi_detail::IrreducibleGraph &G, LoopData *OuterLoop,
std::list<LoopData>::iterator Insert);
/// \brief Update a loop after packaging irreducible SCCs inside of it.
/// Update a loop after packaging irreducible SCCs inside of it.
///
/// Update \c OuterLoop. Before finding irreducible control flow, it was
/// partway through \a computeMassInLoop(), so \a LoopData::Exits and \a
@ -476,7 +476,7 @@ public:
/// up need to be removed from \a OuterLoop::Nodes.
void updateLoopWithIrreducible(LoopData &OuterLoop);
/// \brief Distribute mass according to a distribution.
/// Distribute mass according to a distribution.
///
/// Distributes the mass in Source according to Dist. If LoopHead.isValid(),
/// backedges and exits are stored in its entry in Loops.
@ -485,7 +485,7 @@ public:
void distributeMass(const BlockNode &Source, LoopData *OuterLoop,
Distribution &Dist);
/// \brief Compute the loop scale for a loop.
/// Compute the loop scale for a loop.
void computeLoopScale(LoopData &Loop);
/// Adjust the mass of all headers in an irreducible loop.
@ -500,19 +500,19 @@ public:
void distributeIrrLoopHeaderMass(Distribution &Dist);
/// \brief Package up a loop.
/// Package up a loop.
void packageLoop(LoopData &Loop);
/// \brief Unwrap loops.
/// Unwrap loops.
void unwrapLoops();
/// \brief Finalize frequency metrics.
/// Finalize frequency metrics.
///
/// Calculates final frequencies and cleans up no-longer-needed data
/// structures.
void finalizeMetrics();
/// \brief Clear all memory.
/// Clear all memory.
void clear();
virtual std::string getBlockName(const BlockNode &Node) const;
@ -560,7 +560,7 @@ template <> struct TypeMap<MachineBasicBlock> {
using LoopInfoT = MachineLoopInfo;
};
/// \brief Get the name of a MachineBasicBlock.
/// Get the name of a MachineBasicBlock.
///
/// Get the name of a MachineBasicBlock. It's templated so that including from
/// CodeGen is unnecessary (that would be a layering issue).
@ -574,13 +574,13 @@ template <class BlockT> std::string getBlockName(const BlockT *BB) {
return (MachineName + "[" + BB->getName() + "]").str();
return MachineName.str();
}
/// \brief Get the name of a BasicBlock.
/// Get the name of a BasicBlock.
template <> inline std::string getBlockName(const BasicBlock *BB) {
assert(BB && "Unexpected nullptr");
return BB->getName().str();
}
/// \brief Graph of irreducible control flow.
/// Graph of irreducible control flow.
///
/// This graph is used for determining the SCCs in a loop (or top-level
/// function) that has irreducible control flow.
@ -619,7 +619,7 @@ struct IrreducibleGraph {
std::vector<IrrNode> Nodes;
SmallDenseMap<uint32_t, IrrNode *, 4> Lookup;
/// \brief Construct an explicit graph containing irreducible control flow.
/// Construct an explicit graph containing irreducible control flow.
///
/// Construct an explicit graph of the control flow in \c OuterLoop (or the
/// top-level function, if \c OuterLoop is \c nullptr). Uses \c
@ -687,7 +687,7 @@ void IrreducibleGraph::addEdges(const BlockNode &Node,
} // end namespace bfi_detail
/// \brief Shared implementation for block frequency analysis.
/// Shared implementation for block frequency analysis.
///
/// This is a shared implementation of BlockFrequencyInfo and
/// MachineBlockFrequencyInfo, and calculates the relative frequencies of
@ -878,12 +878,12 @@ template <class BT> class BlockFrequencyInfoImpl : BlockFrequencyInfoImplBase {
return RPOT[Node.Index];
}
/// \brief Run (and save) a post-order traversal.
/// Run (and save) a post-order traversal.
///
/// Saves a reverse post-order traversal of all the nodes in \a F.
void initializeRPOT();
/// \brief Initialize loop data.
/// Initialize loop data.
///
/// Build up \a Loops using \a LoopInfo. \a LoopInfo gives us a mapping from
/// each block to the deepest loop it's in, but we need the inverse. For each
@ -892,7 +892,7 @@ template <class BT> class BlockFrequencyInfoImpl : BlockFrequencyInfoImplBase {
/// the loop that are not in sub-loops.
void initializeLoops();
/// \brief Propagate to a block's successors.
/// Propagate to a block's successors.
///
/// In the context of distributing mass through \c OuterLoop, divide the mass
/// currently assigned to \c Node between its successors.
@ -900,7 +900,7 @@ template <class BT> class BlockFrequencyInfoImpl : BlockFrequencyInfoImplBase {
/// \return \c true unless there's an irreducible backedge.
bool propagateMassToSuccessors(LoopData *OuterLoop, const BlockNode &Node);
/// \brief Compute mass in a particular loop.
/// Compute mass in a particular loop.
///
/// Assign mass to \c Loop's header, and then for each block in \c Loop in
/// reverse post-order, distribute mass to its successors. Only visits nodes
@ -910,7 +910,7 @@ template <class BT> class BlockFrequencyInfoImpl : BlockFrequencyInfoImplBase {
/// \return \c true unless there's an irreducible backedge.
bool computeMassInLoop(LoopData &Loop);
/// \brief Try to compute mass in the top-level function.
/// Try to compute mass in the top-level function.
///
/// Assign mass to the entry block, and then for each block in reverse
/// post-order, distribute mass to its successors. Skips nodes that have
@ -920,7 +920,7 @@ template <class BT> class BlockFrequencyInfoImpl : BlockFrequencyInfoImplBase {
/// \return \c true unless there's an irreducible backedge.
bool tryToComputeMassInFunction();
/// \brief Compute mass in (and package up) irreducible SCCs.
/// Compute mass in (and package up) irreducible SCCs.
///
/// Find the irreducible SCCs in \c OuterLoop, add them to \a Loops (in front
/// of \c Insert), and call \a computeMassInLoop() on each of them.
@ -935,7 +935,7 @@ template <class BT> class BlockFrequencyInfoImpl : BlockFrequencyInfoImplBase {
void computeIrreducibleMass(LoopData *OuterLoop,
std::list<LoopData>::iterator Insert);
/// \brief Compute mass in all loops.
/// Compute mass in all loops.
///
/// For each loop bottom-up, call \a computeMassInLoop().
///
@ -946,7 +946,7 @@ template <class BT> class BlockFrequencyInfoImpl : BlockFrequencyInfoImplBase {
/// \post \a computeMassInLoop() has returned \c true for every loop.
void computeMassInLoops();
/// \brief Compute mass in the top-level function.
/// Compute mass in the top-level function.
///
/// Uses \a tryToComputeMassInFunction() and \a computeIrreducibleMass() to
/// compute mass in the top-level function.
@ -994,7 +994,7 @@ public:
const BranchProbabilityInfoT &getBPI() const { return *BPI; }
/// \brief Print the frequencies for the current function.
/// Print the frequencies for the current function.
///
/// Prints the frequencies for the blocks in the current function.
///
@ -1030,8 +1030,9 @@ void BlockFrequencyInfoImpl<BT>::calculate(const FunctionT &F,
Nodes.clear();
// Initialize.
DEBUG(dbgs() << "\nblock-frequency: " << F.getName() << "\n================="
<< std::string(F.getName().size(), '=') << "\n");
LLVM_DEBUG(dbgs() << "\nblock-frequency: " << F.getName()
<< "\n================="
<< std::string(F.getName().size(), '=') << "\n");
initializeRPOT();
initializeLoops();
@ -1067,10 +1068,11 @@ template <class BT> void BlockFrequencyInfoImpl<BT>::initializeRPOT() {
assert(RPOT.size() - 1 <= BlockNode::getMaxIndex() &&
"More nodes in function than Block Frequency Info supports");
DEBUG(dbgs() << "reverse-post-order-traversal\n");
LLVM_DEBUG(dbgs() << "reverse-post-order-traversal\n");
for (rpot_iterator I = rpot_begin(), E = rpot_end(); I != E; ++I) {
BlockNode Node = getNode(I);
DEBUG(dbgs() << " - " << getIndex(I) << ": " << getBlockName(Node) << "\n");
LLVM_DEBUG(dbgs() << " - " << getIndex(I) << ": " << getBlockName(Node)
<< "\n");
Nodes[*I] = Node;
}
@ -1081,7 +1083,7 @@ template <class BT> void BlockFrequencyInfoImpl<BT>::initializeRPOT() {
}
template <class BT> void BlockFrequencyInfoImpl<BT>::initializeLoops() {
DEBUG(dbgs() << "loop-detection\n");
LLVM_DEBUG(dbgs() << "loop-detection\n");
if (LI->empty())
return;
@ -1099,7 +1101,7 @@ template <class BT> void BlockFrequencyInfoImpl<BT>::initializeLoops() {
Loops.emplace_back(Parent, Header);
Working[Header.Index].Loop = &Loops.back();
DEBUG(dbgs() << " - loop = " << getBlockName(Header) << "\n");
LLVM_DEBUG(dbgs() << " - loop = " << getBlockName(Header) << "\n");
for (const LoopT *L : *Loop)
Q.emplace_back(L, &Loops.back());
@ -1128,8 +1130,8 @@ template <class BT> void BlockFrequencyInfoImpl<BT>::initializeLoops() {
Working[Index].Loop = HeaderData.Loop;
HeaderData.Loop->Nodes.push_back(Index);
DEBUG(dbgs() << " - loop = " << getBlockName(Header)
<< ": member = " << getBlockName(Index) << "\n");
LLVM_DEBUG(dbgs() << " - loop = " << getBlockName(Header)
<< ": member = " << getBlockName(Index) << "\n");
}
}
@ -1150,10 +1152,10 @@ template <class BT> void BlockFrequencyInfoImpl<BT>::computeMassInLoops() {
template <class BT>
bool BlockFrequencyInfoImpl<BT>::computeMassInLoop(LoopData &Loop) {
// Compute mass in loop.
DEBUG(dbgs() << "compute-mass-in-loop: " << getLoopName(Loop) << "\n");
LLVM_DEBUG(dbgs() << "compute-mass-in-loop: " << getLoopName(Loop) << "\n");
if (Loop.isIrreducible()) {
DEBUG(dbgs() << "isIrreducible = true\n");
LLVM_DEBUG(dbgs() << "isIrreducible = true\n");
Distribution Dist;
unsigned NumHeadersWithWeight = 0;
Optional<uint64_t> MinHeaderWeight;
@ -1165,14 +1167,14 @@ bool BlockFrequencyInfoImpl<BT>::computeMassInLoop(LoopData &Loop) {
IsIrrLoopHeader.set(Loop.Nodes[H].Index);
Optional<uint64_t> HeaderWeight = Block->getIrrLoopHeaderWeight();
if (!HeaderWeight) {
DEBUG(dbgs() << "Missing irr loop header metadata on "
<< getBlockName(HeaderNode) << "\n");
LLVM_DEBUG(dbgs() << "Missing irr loop header metadata on "
<< getBlockName(HeaderNode) << "\n");
HeadersWithoutWeight.insert(H);
continue;
}
DEBUG(dbgs() << getBlockName(HeaderNode)
<< " has irr loop header weight " << HeaderWeight.getValue()
<< "\n");
LLVM_DEBUG(dbgs() << getBlockName(HeaderNode)
<< " has irr loop header weight "
<< HeaderWeight.getValue() << "\n");
NumHeadersWithWeight++;
uint64_t HeaderWeightValue = HeaderWeight.getValue();
if (!MinHeaderWeight || HeaderWeightValue < MinHeaderWeight)
@ -1194,8 +1196,8 @@ bool BlockFrequencyInfoImpl<BT>::computeMassInLoop(LoopData &Loop) {
assert(!getBlock(HeaderNode)->getIrrLoopHeaderWeight() &&
"Shouldn't have a weight metadata");
uint64_t MinWeight = MinHeaderWeight.getValue();
DEBUG(dbgs() << "Giving weight " << MinWeight
<< " to " << getBlockName(HeaderNode) << "\n");
LLVM_DEBUG(dbgs() << "Giving weight " << MinWeight << " to "
<< getBlockName(HeaderNode) << "\n");
if (MinWeight)
Dist.addLocal(HeaderNode, MinWeight);
}
@ -1224,7 +1226,7 @@ bool BlockFrequencyInfoImpl<BT>::computeMassInLoop(LoopData &Loop) {
template <class BT>
bool BlockFrequencyInfoImpl<BT>::tryToComputeMassInFunction() {
// Compute mass in function.
DEBUG(dbgs() << "compute-mass-in-function\n");
LLVM_DEBUG(dbgs() << "compute-mass-in-function\n");
assert(!Working.empty() && "no blocks in function");
assert(!Working[0].isLoopHeader() && "entry block is a loop header");
@ -1276,9 +1278,10 @@ template <class BT> struct BlockEdgesAdder {
template <class BT>
void BlockFrequencyInfoImpl<BT>::computeIrreducibleMass(
LoopData *OuterLoop, std::list<LoopData>::iterator Insert) {
DEBUG(dbgs() << "analyze-irreducible-in-";
if (OuterLoop) dbgs() << "loop: " << getLoopName(*OuterLoop) << "\n";
else dbgs() << "function\n");
LLVM_DEBUG(dbgs() << "analyze-irreducible-in-";
if (OuterLoop) dbgs()
<< "loop: " << getLoopName(*OuterLoop) << "\n";
else dbgs() << "function\n");
using namespace bfi_detail;
@ -1304,7 +1307,7 @@ template <class BT>
bool
BlockFrequencyInfoImpl<BT>::propagateMassToSuccessors(LoopData *OuterLoop,
const BlockNode &Node) {
DEBUG(dbgs() << " - node: " << getBlockName(Node) << "\n");
LLVM_DEBUG(dbgs() << " - node: " << getBlockName(Node) << "\n");
// Calculate probability for successors.
Distribution Dist;
if (auto *Loop = Working[Node.Index].getPackagedLoop()) {

View File

@ -38,7 +38,7 @@ class raw_ostream;
class TargetLibraryInfo;
class Value;
/// \brief Analysis providing branch probability information.
/// Analysis providing branch probability information.
///
/// This is a function analysis which provides information on the relative
/// probabilities of each "edge" in the function's CFG where such an edge is
@ -79,7 +79,7 @@ public:
void print(raw_ostream &OS) const;
/// \brief Get an edge's probability, relative to other out-edges of the Src.
/// Get an edge's probability, relative to other out-edges of the Src.
///
/// This routine provides access to the fractional probability between zero
/// (0%) and one (100%) of this edge executing, relative to other edges
@ -88,7 +88,7 @@ public:
BranchProbability getEdgeProbability(const BasicBlock *Src,
unsigned IndexInSuccessors) const;
/// \brief Get the probability of going from Src to Dst.
/// Get the probability of going from Src to Dst.
///
/// It returns the sum of all probabilities for edges from Src to Dst.
BranchProbability getEdgeProbability(const BasicBlock *Src,
@ -97,19 +97,19 @@ public:
BranchProbability getEdgeProbability(const BasicBlock *Src,
succ_const_iterator Dst) const;
/// \brief Test if an edge is hot relative to other out-edges of the Src.
/// Test if an edge is hot relative to other out-edges of the Src.
///
/// Check whether this edge out of the source block is 'hot'. We define hot
/// as having a relative probability >= 80%.
bool isEdgeHot(const BasicBlock *Src, const BasicBlock *Dst) const;
/// \brief Retrieve the hot successor of a block if one exists.
/// Retrieve the hot successor of a block if one exists.
///
/// Given a basic block, look through its successors and if one exists for
/// which \see isEdgeHot would return true, return that successor block.
const BasicBlock *getHotSucc(const BasicBlock *BB) const;
/// \brief Print an edge's probability.
/// Print an edge's probability.
///
/// Retrieves an edge's probability similarly to \see getEdgeProbability, but
/// then prints that probability to the provided stream. That stream is then
@ -117,7 +117,7 @@ public:
raw_ostream &printEdgeProbability(raw_ostream &OS, const BasicBlock *Src,
const BasicBlock *Dst) const;
/// \brief Set the raw edge probability for the given edge.
/// Set the raw edge probability for the given edge.
///
/// This allows a pass to explicitly set the edge probability for an edge. It
/// can be used when updating the CFG to update and preserve the branch
@ -179,13 +179,13 @@ private:
DenseMap<Edge, BranchProbability> Probs;
/// \brief Track the last function we run over for printing.
/// Track the last function we run over for printing.
const Function *LastF;
/// \brief Track the set of blocks directly succeeded by a returning block.
/// Track the set of blocks directly succeeded by a returning block.
SmallPtrSet<const BasicBlock *, 16> PostDominatedByUnreachable;
/// \brief Track the set of blocks that always lead to a cold call.
/// Track the set of blocks that always lead to a cold call.
SmallPtrSet<const BasicBlock *, 16> PostDominatedByColdCall;
void updatePostDominatedByUnreachable(const BasicBlock *BB);
@ -201,7 +201,7 @@ private:
bool calcInvokeHeuristics(const BasicBlock *BB);
};
/// \brief Analysis pass which computes \c BranchProbabilityInfo.
/// Analysis pass which computes \c BranchProbabilityInfo.
class BranchProbabilityAnalysis
: public AnalysisInfoMixin<BranchProbabilityAnalysis> {
friend AnalysisInfoMixin<BranchProbabilityAnalysis>;
@ -209,14 +209,14 @@ class BranchProbabilityAnalysis
static AnalysisKey Key;
public:
/// \brief Provide the result type for this analysis pass.
/// Provide the result type for this analysis pass.
using Result = BranchProbabilityInfo;
/// \brief Run the analysis pass over a function and produce BPI.
/// Run the analysis pass over a function and produce BPI.
BranchProbabilityInfo run(Function &F, FunctionAnalysisManager &AM);
};
/// \brief Printer pass for the \c BranchProbabilityAnalysis results.
/// Printer pass for the \c BranchProbabilityAnalysis results.
class BranchProbabilityPrinterPass
: public PassInfoMixin<BranchProbabilityPrinterPass> {
raw_ostream &OS;
@ -227,7 +227,7 @@ public:
PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
};
/// \brief Legacy analysis pass which computes \c BranchProbabilityInfo.
/// Legacy analysis pass which computes \c BranchProbabilityInfo.
class BranchProbabilityInfoWrapperPass : public FunctionPass {
BranchProbabilityInfo BPI;

View File

@ -49,7 +49,7 @@ unsigned GetSuccessorNumber(const BasicBlock *BB, const BasicBlock *Succ);
bool isCriticalEdge(const TerminatorInst *TI, unsigned SuccNum,
bool AllowIdenticalEdges = false);
/// \brief Determine whether instruction 'To' is reachable from 'From',
/// Determine whether instruction 'To' is reachable from 'From',
/// returning true if uncertain.
///
/// Determine whether there is a path from From to To within a single function.
@ -68,7 +68,7 @@ bool isPotentiallyReachable(const Instruction *From, const Instruction *To,
const DominatorTree *DT = nullptr,
const LoopInfo *LI = nullptr);
/// \brief Determine whether block 'To' is reachable from 'From', returning
/// Determine whether block 'To' is reachable from 'From', returning
/// true if uncertain.
///
/// Determine whether there is a path from From to To within a single function.
@ -78,7 +78,7 @@ bool isPotentiallyReachable(const BasicBlock *From, const BasicBlock *To,
const DominatorTree *DT = nullptr,
const LoopInfo *LI = nullptr);
/// \brief Determine whether there is at least one path from a block in
/// Determine whether there is at least one path from a block in
/// 'Worklist' to 'StopBB', returning true if uncertain.
///
/// Determine whether there is a path from at least one block in Worklist to
@ -89,6 +89,73 @@ bool isPotentiallyReachableFromMany(SmallVectorImpl<BasicBlock *> &Worklist,
BasicBlock *StopBB,
const DominatorTree *DT = nullptr,
const LoopInfo *LI = nullptr);
/// Return true if the control flow in \p RPOTraversal is irreducible.
///
/// This is a generic implementation to detect CFG irreducibility based on loop
/// info analysis. It can be used for any kind of CFG (Loop, MachineLoop,
/// Function, MachineFunction, etc.) by providing an RPO traversal (\p
/// RPOTraversal) and the loop info analysis (\p LI) of the CFG. This utility
/// function is only recommended when loop info analysis is available. If loop
/// info analysis isn't available, please, don't compute it explicitly for this
/// purpose. There are more efficient ways to detect CFG irreducibility that
/// don't require recomputing loop info analysis (e.g., T1/T2 or Tarjan's
/// algorithm).
///
/// Requirements:
/// 1) GraphTraits must be implemented for NodeT type. It is used to access
/// NodeT successors.
// 2) \p RPOTraversal must be a valid reverse post-order traversal of the
/// target CFG with begin()/end() iterator interfaces.
/// 3) \p LI must be a valid LoopInfoBase that contains up-to-date loop
/// analysis information of the CFG.
///
/// This algorithm uses the information about reducible loop back-edges already
/// computed in \p LI. When a back-edge is found during the RPO traversal, the
/// algorithm checks whether the back-edge is one of the reducible back-edges in
/// loop info. If it isn't, the CFG is irreducible. For example, for the CFG
/// below (canonical irreducible graph) loop info won't contain any loop, so the
/// algorithm will return that the CFG is irreducible when checking the B <-
/// -> C back-edge.
///
/// (A->B, A->C, B->C, C->B, C->D)
/// A
/// / \
/// B<- ->C
/// |
/// D
///
template <class NodeT, class RPOTraversalT, class LoopInfoT,
class GT = GraphTraits<NodeT>>
bool containsIrreducibleCFG(RPOTraversalT &RPOTraversal, const LoopInfoT &LI) {
/// Check whether the edge (\p Src, \p Dst) is a reducible loop backedge
/// according to LI. I.e., check if there exists a loop that contains Src and
/// where Dst is the loop header.
auto isProperBackedge = [&](NodeT Src, NodeT Dst) {
for (const auto *Lp = LI.getLoopFor(Src); Lp; Lp = Lp->getParentLoop()) {
if (Lp->getHeader() == Dst)
return true;
}
return false;
};
SmallPtrSet<NodeT, 32> Visited;
for (NodeT Node : RPOTraversal) {
Visited.insert(Node);
for (NodeT Succ : make_range(GT::child_begin(Node), GT::child_end(Node))) {
// Succ hasn't been visited yet
if (!Visited.count(Succ))
continue;
// We already visited Succ, thus Node->Succ must be a backedge. Check that
// the head matches what we have in the loop information. Otherwise, we
// have an irreducible graph.
if (!isProperBackedge(Node, Succ))
return true;
}
}
return false;
}
} // End llvm namespace
#endif

View File

@ -56,7 +56,7 @@ public:
/// Evict the given function from cache
void evict(const Function *Fn);
/// \brief Get the alias summary for the given function
/// Get the alias summary for the given function
/// Return nullptr if the summary is not found or not available
const cflaa::AliasSummary *getAliasSummary(const Function &);
@ -64,19 +64,19 @@ public:
AliasResult alias(const MemoryLocation &, const MemoryLocation &);
private:
/// \brief Ensures that the given function is available in the cache.
/// Ensures that the given function is available in the cache.
/// Returns the appropriate entry from the cache.
const Optional<FunctionInfo> &ensureCached(const Function &);
/// \brief Inserts the given Function into the cache.
/// Inserts the given Function into the cache.
void scan(const Function &);
/// \brief Build summary for a given function
/// Build summary for a given function
FunctionInfo buildInfoFrom(const Function &);
const TargetLibraryInfo &TLI;
/// \brief Cached mapping of Functions to their StratifiedSets.
/// Cached mapping of Functions to their StratifiedSets.
/// If a function's sets are currently being built, it is marked
/// in the cache as an Optional without a value. This way, if we
/// have any kind of recursion, it is discernable from a function

View File

@ -55,16 +55,16 @@ public:
return false;
}
/// \brief Inserts the given Function into the cache.
/// Inserts the given Function into the cache.
void scan(Function *Fn);
void evict(Function *Fn);
/// \brief Ensures that the given function is available in the cache.
/// Ensures that the given function is available in the cache.
/// Returns the appropriate entry from the cache.
const Optional<FunctionInfo> &ensureCached(Function *Fn);
/// \brief Get the alias summary for the given function
/// Get the alias summary for the given function
/// Return nullptr if the summary is not found or not available
const cflaa::AliasSummary *getAliasSummary(Function &Fn);
@ -92,7 +92,7 @@ public:
private:
const TargetLibraryInfo &TLI;
/// \brief Cached mapping of Functions to their StratifiedSets.
/// Cached mapping of Functions to their StratifiedSets.
/// If a function's sets are currently being built, it is marked
/// in the cache as an Optional without a value. This way, if we
/// have any kind of recursion, it is discernable from a function

View File

@ -119,7 +119,7 @@ extern template class AllAnalysesOn<LazyCallGraph::SCC>;
extern template class AnalysisManager<LazyCallGraph::SCC, LazyCallGraph &>;
/// \brief The CGSCC analysis manager.
/// The CGSCC analysis manager.
///
/// See the documentation for the AnalysisManager template for detail
/// documentation. This type serves as a convenient way to refer to this
@ -140,7 +140,7 @@ PassManager<LazyCallGraph::SCC, CGSCCAnalysisManager, LazyCallGraph &,
extern template class PassManager<LazyCallGraph::SCC, CGSCCAnalysisManager,
LazyCallGraph &, CGSCCUpdateResult &>;
/// \brief The CGSCC pass manager.
/// The CGSCC pass manager.
///
/// See the documentation for the PassManager template for details. It runs
/// a sequence of SCC passes over each SCC that the manager is run over. This
@ -175,10 +175,10 @@ public:
explicit Result(CGSCCAnalysisManager &InnerAM, LazyCallGraph &G)
: InnerAM(&InnerAM), G(&G) {}
/// \brief Accessor for the analysis manager.
/// Accessor for the analysis manager.
CGSCCAnalysisManager &getManager() { return *InnerAM; }
/// \brief Handler for invalidation of the Module.
/// Handler for invalidation of the Module.
///
/// If the proxy analysis itself is preserved, then we assume that the set of
/// SCCs in the Module hasn't changed. Thus any pointers to SCCs in the
@ -302,7 +302,7 @@ struct CGSCCUpdateResult {
&InlinedInternalEdges;
};
/// \brief The core module pass which does a post-order walk of the SCCs and
/// The core module pass which does a post-order walk of the SCCs and
/// runs a CGSCC pass over each one.
///
/// Designed to allow composition of a CGSCCPass(Manager) and
@ -338,7 +338,7 @@ public:
return *this;
}
/// \brief Runs the CGSCC pass across every SCC in the module.
/// Runs the CGSCC pass across every SCC in the module.
PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM) {
// Setup the CGSCC analysis manager from its proxy.
CGSCCAnalysisManager &CGAM =
@ -387,17 +387,17 @@ public:
do {
LazyCallGraph::RefSCC *RC = RCWorklist.pop_back_val();
if (InvalidRefSCCSet.count(RC)) {
DEBUG(dbgs() << "Skipping an invalid RefSCC...\n");
LLVM_DEBUG(dbgs() << "Skipping an invalid RefSCC...\n");
continue;
}
assert(CWorklist.empty() &&
"Should always start with an empty SCC worklist");
DEBUG(dbgs() << "Running an SCC pass across the RefSCC: " << *RC
<< "\n");
LLVM_DEBUG(dbgs() << "Running an SCC pass across the RefSCC: " << *RC
<< "\n");
// Push the initial SCCs in reverse post-order as we'll pop off the the
// Push the initial SCCs in reverse post-order as we'll pop off the
// back and so see this in post-order.
for (LazyCallGraph::SCC &C : llvm::reverse(*RC))
CWorklist.insert(&C);
@ -409,12 +409,13 @@ public:
// other RefSCCs should be queued above, so we just need to skip both
// scenarios here.
if (InvalidSCCSet.count(C)) {
DEBUG(dbgs() << "Skipping an invalid SCC...\n");
LLVM_DEBUG(dbgs() << "Skipping an invalid SCC...\n");
continue;
}
if (&C->getOuterRefSCC() != RC) {
DEBUG(dbgs() << "Skipping an SCC that is now part of some other "
"RefSCC...\n");
LLVM_DEBUG(dbgs()
<< "Skipping an SCC that is now part of some other "
"RefSCC...\n");
continue;
}
@ -436,7 +437,8 @@ public:
// If the CGSCC pass wasn't able to provide a valid updated SCC,
// the current SCC may simply need to be skipped if invalid.
if (UR.InvalidatedSCCs.count(C)) {
DEBUG(dbgs() << "Skipping invalidated root or island SCC!\n");
LLVM_DEBUG(dbgs()
<< "Skipping invalidated root or island SCC!\n");
break;
}
// Check that we didn't miss any update scenario.
@ -464,9 +466,10 @@ public:
// FIXME: If we ever start having RefSCC passes, we'll want to
// iterate there too.
if (UR.UpdatedC)
DEBUG(dbgs() << "Re-running SCC passes after a refinement of the "
"current SCC: "
<< *UR.UpdatedC << "\n");
LLVM_DEBUG(dbgs()
<< "Re-running SCC passes after a refinement of the "
"current SCC: "
<< *UR.UpdatedC << "\n");
// Note that both `C` and `RC` may at this point refer to deleted,
// invalid SCC and RefSCCs respectively. But we will short circuit
@ -494,7 +497,7 @@ private:
CGSCCPassT Pass;
};
/// \brief A function to deduce a function pass type and wrap it in the
/// A function to deduce a function pass type and wrap it in the
/// templated adaptor.
template <typename CGSCCPassT>
ModuleToPostOrderCGSCCPassAdaptor<CGSCCPassT>
@ -517,7 +520,7 @@ public:
public:
explicit Result(FunctionAnalysisManager &FAM) : FAM(&FAM) {}
/// \brief Accessor for the analysis manager.
/// Accessor for the analysis manager.
FunctionAnalysisManager &getManager() { return *FAM; }
bool invalidate(LazyCallGraph::SCC &C, const PreservedAnalyses &PA,
@ -552,7 +555,7 @@ LazyCallGraph::SCC &updateCGAndAnalysisManagerForFunctionPass(
LazyCallGraph &G, LazyCallGraph::SCC &C, LazyCallGraph::Node &N,
CGSCCAnalysisManager &AM, CGSCCUpdateResult &UR);
/// \brief Adaptor that maps from a SCC to its functions.
/// Adaptor that maps from a SCC to its functions.
///
/// Designed to allow composition of a FunctionPass(Manager) and
/// a CGSCCPassManager. Note that if this pass is constructed with a pointer
@ -585,7 +588,7 @@ public:
return *this;
}
/// \brief Runs the function pass across every function in the module.
/// Runs the function pass across every function in the module.
PreservedAnalyses run(LazyCallGraph::SCC &C, CGSCCAnalysisManager &AM,
LazyCallGraph &CG, CGSCCUpdateResult &UR) {
// Setup the function analysis manager from its proxy.
@ -601,7 +604,8 @@ public:
// a pointer we can overwrite.
LazyCallGraph::SCC *CurrentC = &C;
DEBUG(dbgs() << "Running function passes across an SCC: " << C << "\n");
LLVM_DEBUG(dbgs() << "Running function passes across an SCC: " << C
<< "\n");
PreservedAnalyses PA = PreservedAnalyses::all();
for (LazyCallGraph::Node *N : Nodes) {
@ -652,7 +656,7 @@ private:
FunctionPassT Pass;
};
/// \brief A function to deduce a function pass type and wrap it in the
/// A function to deduce a function pass type and wrap it in the
/// templated adaptor.
template <typename FunctionPassT>
CGSCCToFunctionPassAdaptor<FunctionPassT>
@ -757,9 +761,9 @@ public:
if (!F)
return false;
DEBUG(dbgs() << "Found devirutalized call from "
<< CS.getParent()->getParent()->getName() << " to "
<< F->getName() << "\n");
LLVM_DEBUG(dbgs() << "Found devirutalized call from "
<< CS.getParent()->getParent()->getName() << " to "
<< F->getName() << "\n");
// We now have a direct call where previously we had an indirect call,
// so iterate to process this devirtualization site.
@ -793,16 +797,18 @@ public:
// Otherwise, if we've already hit our max, we're done.
if (Iteration >= MaxIterations) {
DEBUG(dbgs() << "Found another devirtualization after hitting the max "
"number of repetitions ("
<< MaxIterations << ") on SCC: " << *C << "\n");
LLVM_DEBUG(
dbgs() << "Found another devirtualization after hitting the max "
"number of repetitions ("
<< MaxIterations << ") on SCC: " << *C << "\n");
PA.intersect(std::move(PassPA));
break;
}
DEBUG(dbgs()
<< "Repeating an SCC pass after finding a devirtualization in: "
<< *C << "\n");
LLVM_DEBUG(
dbgs()
<< "Repeating an SCC pass after finding a devirtualization in: " << *C
<< "\n");
// Move over the new call counts in preparation for iterating.
CallCounts = std::move(NewCallCounts);
@ -824,7 +830,7 @@ private:
int MaxIterations;
};
/// \brief A function to deduce a function pass type and wrap it in the
/// A function to deduce a function pass type and wrap it in the
/// templated adaptor.
template <typename PassT>
DevirtSCCRepeatedPass<PassT> createDevirtSCCRepeatedPass(PassT Pass,

View File

@ -66,7 +66,7 @@ class CallGraphNode;
class Module;
class raw_ostream;
/// \brief The basic data container for the call graph of a \c Module of IR.
/// The basic data container for the call graph of a \c Module of IR.
///
/// This class exposes both the interface to the call graph for a module of IR.
///
@ -77,25 +77,25 @@ class CallGraph {
using FunctionMapTy =
std::map<const Function *, std::unique_ptr<CallGraphNode>>;
/// \brief A map from \c Function* to \c CallGraphNode*.
/// A map from \c Function* to \c CallGraphNode*.
FunctionMapTy FunctionMap;
/// \brief This node has edges to all external functions and those internal
/// This node has edges to all external functions and those internal
/// functions that have their address taken.
CallGraphNode *ExternalCallingNode;
/// \brief This node has edges to it from all functions making indirect calls
/// This node has edges to it from all functions making indirect calls
/// or calling an external function.
std::unique_ptr<CallGraphNode> CallsExternalNode;
/// \brief Replace the function represented by this node by another.
/// Replace the function represented by this node by another.
///
/// This does not rescan the body of the function, so it is suitable when
/// splicing the body of one function to another while also updating all
/// callers from the old function to the new.
void spliceFunction(const Function *From, const Function *To);
/// \brief Add a function to the call graph, and link the node to all of the
/// Add a function to the call graph, and link the node to all of the
/// functions that it calls.
void addToCallGraph(Function *F);
@ -110,7 +110,7 @@ public:
using iterator = FunctionMapTy::iterator;
using const_iterator = FunctionMapTy::const_iterator;
/// \brief Returns the module the call graph corresponds to.
/// Returns the module the call graph corresponds to.
Module &getModule() const { return M; }
inline iterator begin() { return FunctionMap.begin(); }
@ -118,21 +118,21 @@ public:
inline const_iterator begin() const { return FunctionMap.begin(); }
inline const_iterator end() const { return FunctionMap.end(); }
/// \brief Returns the call graph node for the provided function.
/// Returns the call graph node for the provided function.
inline const CallGraphNode *operator[](const Function *F) const {
const_iterator I = FunctionMap.find(F);
assert(I != FunctionMap.end() && "Function not in callgraph!");
return I->second.get();
}
/// \brief Returns the call graph node for the provided function.
/// Returns the call graph node for the provided function.
inline CallGraphNode *operator[](const Function *F) {
const_iterator I = FunctionMap.find(F);
assert(I != FunctionMap.end() && "Function not in callgraph!");
return I->second.get();
}
/// \brief Returns the \c CallGraphNode which is used to represent
/// Returns the \c CallGraphNode which is used to represent
/// undetermined calls into the callgraph.
CallGraphNode *getExternalCallingNode() const { return ExternalCallingNode; }
@ -145,7 +145,7 @@ public:
// modified.
//
/// \brief Unlink the function from this module, returning it.
/// Unlink the function from this module, returning it.
///
/// Because this removes the function from the module, the call graph node is
/// destroyed. This is only valid if the function does not call any other
@ -153,25 +153,25 @@ public:
/// this is to dropAllReferences before calling this.
Function *removeFunctionFromModule(CallGraphNode *CGN);
/// \brief Similar to operator[], but this will insert a new CallGraphNode for
/// Similar to operator[], but this will insert a new CallGraphNode for
/// \c F if one does not already exist.
CallGraphNode *getOrInsertFunction(const Function *F);
};
/// \brief A node in the call graph for a module.
/// A node in the call graph for a module.
///
/// Typically represents a function in the call graph. There are also special
/// "null" nodes used to represent theoretical entries in the call graph.
class CallGraphNode {
public:
/// \brief A pair of the calling instruction (a call or invoke)
/// A pair of the calling instruction (a call or invoke)
/// and the call graph node being called.
using CallRecord = std::pair<WeakTrackingVH, CallGraphNode *>;
public:
using CalledFunctionsVector = std::vector<CallRecord>;
/// \brief Creates a node for the specified function.
/// Creates a node for the specified function.
inline CallGraphNode(Function *F) : F(F) {}
CallGraphNode(const CallGraphNode &) = delete;
@ -184,7 +184,7 @@ public:
using iterator = std::vector<CallRecord>::iterator;
using const_iterator = std::vector<CallRecord>::const_iterator;
/// \brief Returns the function that this call graph node represents.
/// Returns the function that this call graph node represents.
Function *getFunction() const { return F; }
inline iterator begin() { return CalledFunctions.begin(); }
@ -194,17 +194,17 @@ public:
inline bool empty() const { return CalledFunctions.empty(); }
inline unsigned size() const { return (unsigned)CalledFunctions.size(); }
/// \brief Returns the number of other CallGraphNodes in this CallGraph that
/// Returns the number of other CallGraphNodes in this CallGraph that
/// reference this node in their callee list.
unsigned getNumReferences() const { return NumReferences; }
/// \brief Returns the i'th called function.
/// Returns the i'th called function.
CallGraphNode *operator[](unsigned i) const {
assert(i < CalledFunctions.size() && "Invalid index");
return CalledFunctions[i].second;
}
/// \brief Print out this call graph node.
/// Print out this call graph node.
void dump() const;
void print(raw_ostream &OS) const;
@ -213,7 +213,7 @@ public:
// modified
//
/// \brief Removes all edges from this CallGraphNode to any functions it
/// Removes all edges from this CallGraphNode to any functions it
/// calls.
void removeAllCalledFunctions() {
while (!CalledFunctions.empty()) {
@ -222,14 +222,14 @@ public:
}
}
/// \brief Moves all the callee information from N to this node.
/// Moves all the callee information from N to this node.
void stealCalledFunctionsFrom(CallGraphNode *N) {
assert(CalledFunctions.empty() &&
"Cannot steal callsite information if I already have some");
std::swap(CalledFunctions, N->CalledFunctions);
}
/// \brief Adds a function to the list of functions called by this one.
/// Adds a function to the list of functions called by this one.
void addCalledFunction(CallSite CS, CallGraphNode *M) {
assert(!CS.getInstruction() || !CS.getCalledFunction() ||
!CS.getCalledFunction()->isIntrinsic() ||
@ -244,23 +244,23 @@ public:
CalledFunctions.pop_back();
}
/// \brief Removes the edge in the node for the specified call site.
/// Removes the edge in the node for the specified call site.
///
/// Note that this method takes linear time, so it should be used sparingly.
void removeCallEdgeFor(CallSite CS);
/// \brief Removes all call edges from this node to the specified callee
/// Removes all call edges from this node to the specified callee
/// function.
///
/// This takes more time to execute than removeCallEdgeTo, so it should not
/// be used unless necessary.
void removeAnyCallEdgeTo(CallGraphNode *Callee);
/// \brief Removes one edge associated with a null callsite from this node to
/// Removes one edge associated with a null callsite from this node to
/// the specified callee function.
void removeOneAbstractEdgeTo(CallGraphNode *Callee);
/// \brief Replaces the edge in the node for the specified call site with a
/// Replaces the edge in the node for the specified call site with a
/// new one.
///
/// Note that this method takes linear time, so it should be used sparingly.
@ -273,18 +273,18 @@ private:
std::vector<CallRecord> CalledFunctions;
/// \brief The number of times that this CallGraphNode occurs in the
/// The number of times that this CallGraphNode occurs in the
/// CalledFunctions array of this or other CallGraphNodes.
unsigned NumReferences = 0;
void DropRef() { --NumReferences; }
void AddRef() { ++NumReferences; }
/// \brief A special function that should only be used by the CallGraph class.
/// A special function that should only be used by the CallGraph class.
void allReferencesDropped() { NumReferences = 0; }
};
/// \brief An analysis pass to compute the \c CallGraph for a \c Module.
/// An analysis pass to compute the \c CallGraph for a \c Module.
///
/// This class implements the concept of an analysis pass used by the \c
/// ModuleAnalysisManager to run an analysis over a module and cache the
@ -295,16 +295,16 @@ class CallGraphAnalysis : public AnalysisInfoMixin<CallGraphAnalysis> {
static AnalysisKey Key;
public:
/// \brief A formulaic type to inform clients of the result type.
/// A formulaic type to inform clients of the result type.
using Result = CallGraph;
/// \brief Compute the \c CallGraph for the module \c M.
/// Compute the \c CallGraph for the module \c M.
///
/// The real work here is done in the \c CallGraph constructor.
CallGraph run(Module &M, ModuleAnalysisManager &) { return CallGraph(M); }
};
/// \brief Printer pass for the \c CallGraphAnalysis results.
/// Printer pass for the \c CallGraphAnalysis results.
class CallGraphPrinterPass : public PassInfoMixin<CallGraphPrinterPass> {
raw_ostream &OS;
@ -314,7 +314,7 @@ public:
PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM);
};
/// \brief The \c ModulePass which wraps up a \c CallGraph and the logic to
/// The \c ModulePass which wraps up a \c CallGraph and the logic to
/// build it.
///
/// This class exposes both the interface to the call graph container and the
@ -330,7 +330,7 @@ public:
CallGraphWrapperPass();
~CallGraphWrapperPass() override;
/// \brief The internal \c CallGraph around which the rest of this interface
/// The internal \c CallGraph around which the rest of this interface
/// is wrapped.
const CallGraph &getCallGraph() const { return *G; }
CallGraph &getCallGraph() { return *G; }
@ -338,7 +338,7 @@ public:
using iterator = CallGraph::iterator;
using const_iterator = CallGraph::const_iterator;
/// \brief Returns the module the call graph corresponds to.
/// Returns the module the call graph corresponds to.
Module &getModule() const { return G->getModule(); }
inline iterator begin() { return G->begin(); }
@ -346,15 +346,15 @@ public:
inline const_iterator begin() const { return G->begin(); }
inline const_iterator end() const { return G->end(); }
/// \brief Returns the call graph node for the provided function.
/// Returns the call graph node for the provided function.
inline const CallGraphNode *operator[](const Function *F) const {
return (*G)[F];
}
/// \brief Returns the call graph node for the provided function.
/// Returns the call graph node for the provided function.
inline CallGraphNode *operator[](const Function *F) { return (*G)[F]; }
/// \brief Returns the \c CallGraphNode which is used to represent
/// Returns the \c CallGraphNode which is used to represent
/// undetermined calls into the callgraph.
CallGraphNode *getExternalCallingNode() const {
return G->getExternalCallingNode();
@ -369,7 +369,7 @@ public:
// modified.
//
/// \brief Unlink the function from this module, returning it.
/// Unlink the function from this module, returning it.
///
/// Because this removes the function from the module, the call graph node is
/// destroyed. This is only valid if the function does not call any other
@ -379,7 +379,7 @@ public:
return G->removeFunctionFromModule(CGN);
}
/// \brief Similar to operator[], but this will insert a new CallGraphNode for
/// Similar to operator[], but this will insert a new CallGraphNode for
/// \c F if one does not already exist.
CallGraphNode *getOrInsertFunction(const Function *F) {
return G->getOrInsertFunction(F);
@ -426,12 +426,14 @@ template <> struct GraphTraits<CallGraphNode *> {
template <> struct GraphTraits<const CallGraphNode *> {
using NodeRef = const CallGraphNode *;
using CGNPairTy = CallGraphNode::CallRecord;
using EdgeRef = const CallGraphNode::CallRecord &;
static NodeRef getEntryNode(const CallGraphNode *CGN) { return CGN; }
static const CallGraphNode *CGNGetValue(CGNPairTy P) { return P.second; }
using ChildIteratorType =
mapped_iterator<CallGraphNode::const_iterator, decltype(&CGNGetValue)>;
using ChildEdgeIteratorType = CallGraphNode::const_iterator;
static ChildIteratorType child_begin(NodeRef N) {
return ChildIteratorType(N->begin(), &CGNGetValue);
@ -440,6 +442,13 @@ template <> struct GraphTraits<const CallGraphNode *> {
static ChildIteratorType child_end(NodeRef N) {
return ChildIteratorType(N->end(), &CGNGetValue);
}
static ChildEdgeIteratorType child_edge_begin(NodeRef N) {
return N->begin();
}
static ChildEdgeIteratorType child_edge_end(NodeRef N) { return N->end(); }
static NodeRef edge_dest(EdgeRef E) { return E.second; }
};
template <>

View File

@ -46,7 +46,7 @@ namespace llvm {
/// to speed up capture-tracker queries.
bool PointerMayBeCapturedBefore(const Value *V, bool ReturnCaptures,
bool StoreCaptures, const Instruction *I,
DominatorTree *DT, bool IncludeI = false,
const DominatorTree *DT, bool IncludeI = false,
OrderedBasicBlock *OBB = nullptr);
/// This callback is used in conjunction with PointerMayBeCaptured. In

View File

@ -29,7 +29,7 @@ class DataLayout;
class TargetTransformInfo;
class Value;
/// \brief Check whether a call will lower to something small.
/// Check whether a call will lower to something small.
///
/// This tests checks whether this callsite will lower to something
/// significantly cheaper than a traditional call, often a single
@ -37,64 +37,64 @@ class Value;
/// return true, so will this function.
bool callIsSmall(ImmutableCallSite CS);
/// \brief Utility to calculate the size and a few similar metrics for a set
/// Utility to calculate the size and a few similar metrics for a set
/// of basic blocks.
struct CodeMetrics {
/// \brief True if this function contains a call to setjmp or other functions
/// True if this function contains a call to setjmp or other functions
/// with attribute "returns twice" without having the attribute itself.
bool exposesReturnsTwice = false;
/// \brief True if this function calls itself.
/// True if this function calls itself.
bool isRecursive = false;
/// \brief True if this function cannot be duplicated.
/// True if this function cannot be duplicated.
///
/// True if this function contains one or more indirect branches, or it contains
/// one or more 'noduplicate' instructions.
bool notDuplicatable = false;
/// \brief True if this function contains a call to a convergent function.
/// True if this function contains a call to a convergent function.
bool convergent = false;
/// \brief True if this function calls alloca (in the C sense).
/// True if this function calls alloca (in the C sense).
bool usesDynamicAlloca = false;
/// \brief Number of instructions in the analyzed blocks.
/// Number of instructions in the analyzed blocks.
unsigned NumInsts = false;
/// \brief Number of analyzed blocks.
/// Number of analyzed blocks.
unsigned NumBlocks = false;
/// \brief Keeps track of basic block code size estimates.
/// Keeps track of basic block code size estimates.
DenseMap<const BasicBlock *, unsigned> NumBBInsts;
/// \brief Keep track of the number of calls to 'big' functions.
/// Keep track of the number of calls to 'big' functions.
unsigned NumCalls = false;
/// \brief The number of calls to internal functions with a single caller.
/// The number of calls to internal functions with a single caller.
///
/// These are likely targets for future inlining, likely exposed by
/// interleaved devirtualization.
unsigned NumInlineCandidates = 0;
/// \brief How many instructions produce vector values.
/// How many instructions produce vector values.
///
/// The inliner is more aggressive with inlining vector kernels.
unsigned NumVectorInsts = 0;
/// \brief How many 'ret' instructions the blocks contain.
/// How many 'ret' instructions the blocks contain.
unsigned NumRets = 0;
/// \brief Add information about a block to the current state.
/// Add information about a block to the current state.
void analyzeBasicBlock(const BasicBlock *BB, const TargetTransformInfo &TTI,
const SmallPtrSetImpl<const Value*> &EphValues);
/// \brief Collect a loop's ephemeral values (those used only by an assume
/// Collect a loop's ephemeral values (those used only by an assume
/// or similar intrinsics in the loop).
static void collectEphemeralValues(const Loop *L, AssumptionCache *AC,
SmallPtrSetImpl<const Value *> &EphValues);
/// \brief Collect a functions's ephemeral values (those used only by an
/// Collect a functions's ephemeral values (those used only by an
/// assume or similar intrinsics in the function).
static void collectEphemeralValues(const Function *L, AssumptionCache *AC,
SmallPtrSetImpl<const Value *> &EphValues);

View File

@ -73,19 +73,19 @@ ConstantFoldCompareInstOperands(unsigned Predicate, Constant *LHS,
Constant *RHS, const DataLayout &DL,
const TargetLibraryInfo *TLI = nullptr);
/// \brief Attempt to constant fold a binary operation with the specified
/// Attempt to constant fold a binary operation with the specified
/// operands. If it fails, it returns a constant expression of the specified
/// operands.
Constant *ConstantFoldBinaryOpOperands(unsigned Opcode, Constant *LHS,
Constant *RHS, const DataLayout &DL);
/// \brief Attempt to constant fold a select instruction with the specified
/// Attempt to constant fold a select instruction with the specified
/// operands. The constant result is returned if successful; if not, null is
/// returned.
Constant *ConstantFoldSelectInstruction(Constant *Cond, Constant *V1,
Constant *V2);
/// \brief Attempt to constant fold a cast with the specified operand. If it
/// Attempt to constant fold a cast with the specified operand. If it
/// fails, it returns a constant expression of the specified operand.
Constant *ConstantFoldCastOperand(unsigned Opcode, Constant *C, Type *DestTy,
const DataLayout &DL);
@ -96,25 +96,25 @@ Constant *ConstantFoldCastOperand(unsigned Opcode, Constant *C, Type *DestTy,
Constant *ConstantFoldInsertValueInstruction(Constant *Agg, Constant *Val,
ArrayRef<unsigned> Idxs);
/// \brief Attempt to constant fold an extractvalue instruction with the
/// Attempt to constant fold an extractvalue instruction with the
/// specified operands and indices. The constant result is returned if
/// successful; if not, null is returned.
Constant *ConstantFoldExtractValueInstruction(Constant *Agg,
ArrayRef<unsigned> Idxs);
/// \brief Attempt to constant fold an insertelement instruction with the
/// Attempt to constant fold an insertelement instruction with the
/// specified operands and indices. The constant result is returned if
/// successful; if not, null is returned.
Constant *ConstantFoldInsertElementInstruction(Constant *Val,
Constant *Elt,
Constant *Idx);
/// \brief Attempt to constant fold an extractelement instruction with the
/// Attempt to constant fold an extractelement instruction with the
/// specified operands and indices. The constant result is returned if
/// successful; if not, null is returned.
Constant *ConstantFoldExtractElementInstruction(Constant *Val, Constant *Idx);
/// \brief Attempt to constant fold a shufflevector instruction with the
/// Attempt to constant fold a shufflevector instruction with the
/// specified operands and indices. The constant result is returned if
/// successful; if not, null is returned.
Constant *ConstantFoldShuffleVectorInstruction(Constant *V1, Constant *V2,
@ -147,7 +147,13 @@ Constant *ConstantFoldCall(ImmutableCallSite CS, Function *F,
ArrayRef<Constant *> Operands,
const TargetLibraryInfo *TLI = nullptr);
/// \brief Check whether the given call has no side-effects.
/// ConstantFoldLoadThroughBitcast - try to cast constant to destination type
/// returning null if unsuccessful. Can cast pointer to pointer or pointer to
/// integer and vice versa if their sizes are equal.
Constant *ConstantFoldLoadThroughBitcast(Constant *C, Type *DestTy,
const DataLayout &DL);
/// Check whether the given call has no side-effects.
/// Specifically checks for math routimes which sometimes set errno.
bool isMathLibCallNoop(CallSite CS, const TargetLibraryInfo *TLI);
}

View File

@ -20,7 +20,7 @@
namespace llvm {
/// \brief Default traits class for extracting a graph from an analysis pass.
/// Default traits class for extracting a graph from an analysis pass.
///
/// This assumes that 'GraphT' is 'AnalysisT *' and so just passes it through.
template <typename AnalysisT, typename GraphT = AnalysisT *>
@ -36,7 +36,7 @@ public:
DOTGraphTraitsViewer(StringRef GraphName, char &ID)
: FunctionPass(ID), Name(GraphName) {}
/// @brief Return true if this function should be processed.
/// Return true if this function should be processed.
///
/// An implementation of this class my override this function to indicate that
/// only certain functions should be viewed.
@ -78,7 +78,7 @@ public:
DOTGraphTraitsPrinter(StringRef GraphName, char &ID)
: FunctionPass(ID), Name(GraphName) {}
/// @brief Return true if this function should be processed.
/// Return true if this function should be processed.
///
/// An implementation of this class my override this function to indicate that
/// only certain functions should be printed.

View File

@ -96,15 +96,15 @@ class DemandedBitsAnalysis : public AnalysisInfoMixin<DemandedBitsAnalysis> {
static AnalysisKey Key;
public:
/// \brief Provide the result type for this analysis pass.
/// Provide the result type for this analysis pass.
using Result = DemandedBits;
/// \brief Run the analysis pass over a function and produce demanded bits
/// Run the analysis pass over a function and produce demanded bits
/// information.
DemandedBits run(Function &F, FunctionAnalysisManager &AM);
};
/// \brief Printer pass for DemandedBits
/// Printer pass for DemandedBits
class DemandedBitsPrinterPass : public PassInfoMixin<DemandedBitsPrinterPass> {
raw_ostream &OS;

View File

@ -557,6 +557,17 @@ template <typename T> class ArrayRef;
const SCEV *X,
const SCEV *Y) const;
/// isKnownLessThan - Compare to see if S is less than Size
/// Another wrapper for isKnownNegative(S - max(Size, 1)) with some extra
/// checking if S is an AddRec and we can prove lessthan using the loop
/// bounds.
bool isKnownLessThan(const SCEV *S, const SCEV *Size) const;
/// isKnownNonNegative - Compare to see if S is known not to be negative
/// Uses the fact that S comes from Ptr, which may be an inbound GEP,
/// Proving there is no wrapping going on.
bool isKnownNonNegative(const SCEV *S, const Value *Ptr) const;
/// collectUpperBound - All subscripts are the same type (on my machine,
/// an i64). The loop bound may be a smaller type. collectUpperBound
/// find the bound, if available, and zero extends it to the Type T.
@ -914,7 +925,7 @@ template <typename T> class ArrayRef;
SmallVectorImpl<Subscript> &Pair);
}; // class DependenceInfo
/// \brief AnalysisPass to compute dependence information in a function
/// AnalysisPass to compute dependence information in a function
class DependenceAnalysis : public AnalysisInfoMixin<DependenceAnalysis> {
public:
typedef DependenceInfo Result;
@ -925,7 +936,7 @@ template <typename T> class ArrayRef;
friend struct AnalysisInfoMixin<DependenceAnalysis>;
}; // class DependenceAnalysis
/// \brief Legacy pass manager pass to access dependence information
/// Legacy pass manager pass to access dependence information
class DependenceAnalysisWrapperPass : public FunctionPass {
public:
static char ID; // Class identification, replacement for typeinfo

View File

@ -13,6 +13,8 @@
// better decisions.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_ANALYSIS_DIVERGENCE_ANALYSIS_H
#define LLVM_ANALYSIS_DIVERGENCE_ANALYSIS_H
#include "llvm/ADT/DenseSet.h"
#include "llvm/IR/Function.h"
@ -35,14 +37,25 @@ public:
// Print all divergent branches in the function.
void print(raw_ostream &OS, const Module *) const override;
// Returns true if V is divergent.
// Returns true if V is divergent at its definition.
//
// Even if this function returns false, V may still be divergent when used
// in a different basic block.
bool isDivergent(const Value *V) const { return DivergentValues.count(V); }
// Returns true if V is uniform/non-divergent.
//
// Even if this function returns true, V may still be divergent when used
// in a different basic block.
bool isUniform(const Value *V) const { return !isDivergent(V); }
// Keep the analysis results uptodate by removing an erased value.
void removeValue(const Value *V) { DivergentValues.erase(V); }
private:
// Stores all divergent values.
DenseSet<const Value *> DivergentValues;
};
} // End llvm namespace
#endif //LLVM_ANALYSIS_DIVERGENCE_ANALYSIS_H

View File

@ -19,6 +19,7 @@
#define LLVM_ANALYSIS_DOMINANCEFRONTIER_H
#include "llvm/ADT/GraphTraits.h"
#include "llvm/Config/llvm-config.h"
#include "llvm/IR/PassManager.h"
#include "llvm/Pass.h"
#include "llvm/Support/GenericDomTree.h"
@ -179,7 +180,7 @@ extern template class DominanceFrontierBase<BasicBlock, false>;
extern template class DominanceFrontierBase<BasicBlock, true>;
extern template class ForwardDominanceFrontierBase<BasicBlock>;
/// \brief Analysis pass which computes a \c DominanceFrontier.
/// Analysis pass which computes a \c DominanceFrontier.
class DominanceFrontierAnalysis
: public AnalysisInfoMixin<DominanceFrontierAnalysis> {
friend AnalysisInfoMixin<DominanceFrontierAnalysis>;
@ -187,14 +188,14 @@ class DominanceFrontierAnalysis
static AnalysisKey Key;
public:
/// \brief Provide the result type for this analysis pass.
/// Provide the result type for this analysis pass.
using Result = DominanceFrontier;
/// \brief Run the analysis pass over a function and produce a dominator tree.
/// Run the analysis pass over a function and produce a dominator tree.
DominanceFrontier run(Function &F, FunctionAnalysisManager &AM);
};
/// \brief Printer pass for the \c DominanceFrontier.
/// Printer pass for the \c DominanceFrontier.
class DominanceFrontierPrinterPass
: public PassInfoMixin<DominanceFrontierPrinterPass> {
raw_ostream &OS;

View File

@ -21,6 +21,7 @@
#include "llvm/ADT/GraphTraits.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/Analysis/DominanceFrontier.h"
#include "llvm/Config/llvm-config.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/GenericDomTree.h"
#include "llvm/Support/raw_ostream.h"

View File

@ -32,10 +32,11 @@ enum class EHPersonality {
MSVC_Win64SEH,
MSVC_CXX,
CoreCLR,
Rust
Rust,
Wasm_CXX
};
/// \brief See if the given exception handling personality function is one
/// See if the given exception handling personality function is one
/// that we understand. If so, return a description of it; otherwise return
/// Unknown.
EHPersonality classifyEHPersonality(const Value *Pers);
@ -44,7 +45,7 @@ StringRef getEHPersonalityName(EHPersonality Pers);
EHPersonality getDefaultEHPersonality(const Triple &T);
/// \brief Returns true if this personality function catches asynchronous
/// Returns true if this personality function catches asynchronous
/// exceptions.
inline bool isAsynchronousEHPersonality(EHPersonality Pers) {
// The two SEH personality functions can catch asynch exceptions. We assume
@ -59,7 +60,7 @@ inline bool isAsynchronousEHPersonality(EHPersonality Pers) {
llvm_unreachable("invalid enum");
}
/// \brief Returns true if this is a personality function that invokes
/// Returns true if this is a personality function that invokes
/// handler funclets (which must return to it).
inline bool isFuncletEHPersonality(EHPersonality Pers) {
switch (Pers) {
@ -74,7 +75,23 @@ inline bool isFuncletEHPersonality(EHPersonality Pers) {
llvm_unreachable("invalid enum");
}
/// \brief Return true if this personality may be safely removed if there
/// Returns true if this personality uses scope-style EH IR instructions:
/// catchswitch, catchpad/ret, and cleanuppad/ret.
inline bool isScopedEHPersonality(EHPersonality Pers) {
switch (Pers) {
case EHPersonality::MSVC_CXX:
case EHPersonality::MSVC_X86SEH:
case EHPersonality::MSVC_Win64SEH:
case EHPersonality::CoreCLR:
case EHPersonality::Wasm_CXX:
return true;
default:
return false;
}
llvm_unreachable("invalid enum");
}
/// Return true if this personality may be safely removed if there
/// are no invoke instructions remaining in the current function.
inline bool isNoOpWithoutInvoke(EHPersonality Pers) {
switch (Pers) {
@ -91,7 +108,7 @@ bool canSimplifyInvokeNoUnwind(const Function *F);
typedef TinyPtrVector<BasicBlock *> ColorVector;
/// \brief If an EH funclet personality is in use (see isFuncletEHPersonality),
/// If an EH funclet personality is in use (see isFuncletEHPersonality),
/// this will recompute which blocks are in which funclet. It is possible that
/// some blocks are in multiple funclets. Consider this analysis to be
/// expensive.

View File

@ -48,7 +48,7 @@ private:
public:
ICallPromotionAnalysis();
/// \brief Returns reference to array of InstrProfValueData for the given
/// Returns reference to array of InstrProfValueData for the given
/// instruction \p I.
///
/// The \p NumVals, \p TotalCount and \p NumCandidates

View File

@ -52,7 +52,7 @@ const int NoreturnPenalty = 10000;
const unsigned TotalAllocaSizeRecursiveCaller = 1024;
}
/// \brief Represents the cost of inlining a function.
/// Represents the cost of inlining a function.
///
/// This supports special values for functions which should "always" or
/// "never" be inlined. Otherwise, the cost represents a unitless amount;
@ -68,10 +68,10 @@ class InlineCost {
NeverInlineCost = INT_MAX
};
/// \brief The estimated cost of inlining this callsite.
/// The estimated cost of inlining this callsite.
const int Cost;
/// \brief The adjusted threshold against which this cost was computed.
/// The adjusted threshold against which this cost was computed.
const int Threshold;
// Trivial constructor, interesting logic in the factory functions below.
@ -90,7 +90,7 @@ public:
return InlineCost(NeverInlineCost, 0);
}
/// \brief Test whether the inline cost is low enough for inlining.
/// Test whether the inline cost is low enough for inlining.
explicit operator bool() const {
return Cost < Threshold;
}
@ -99,20 +99,20 @@ public:
bool isNever() const { return Cost == NeverInlineCost; }
bool isVariable() const { return !isAlways() && !isNever(); }
/// \brief Get the inline cost estimate.
/// Get the inline cost estimate.
/// It is an error to call this on an "always" or "never" InlineCost.
int getCost() const {
assert(isVariable() && "Invalid access of InlineCost");
return Cost;
}
/// \brief Get the threshold against which the cost was computed
/// Get the threshold against which the cost was computed
int getThreshold() const {
assert(isVariable() && "Invalid access of InlineCost");
return Threshold;
}
/// \brief Get the cost delta from the threshold for inlining.
/// Get the cost delta from the threshold for inlining.
/// Only valid if the cost is of the variable kind. Returns a negative
/// value if the cost is too high to inline.
int getCostDelta() const { return Threshold - getCost(); }
@ -170,7 +170,7 @@ InlineParams getInlineParams(int Threshold);
/// line options. If -inline-threshold option is not explicitly passed,
/// the default threshold is computed from \p OptLevel and \p SizeOptLevel.
/// An \p OptLevel value above 3 is considered an aggressive optimization mode.
/// \p SizeOptLevel of 1 corresponds to the the -Os flag and 2 corresponds to
/// \p SizeOptLevel of 1 corresponds to the -Os flag and 2 corresponds to
/// the -Oz flag.
InlineParams getInlineParams(unsigned OptLevel, unsigned SizeOptLevel);
@ -178,7 +178,7 @@ InlineParams getInlineParams(unsigned OptLevel, unsigned SizeOptLevel);
/// and the call/return instruction.
int getCallsiteCost(CallSite CS, const DataLayout &DL);
/// \brief Get an InlineCost object representing the cost of inlining this
/// Get an InlineCost object representing the cost of inlining this
/// callsite.
///
/// Note that a default threshold is passed into this function. This threshold
@ -195,7 +195,7 @@ InlineCost getInlineCost(
Optional<function_ref<BlockFrequencyInfo &(Function &)>> GetBFI,
ProfileSummaryInfo *PSI, OptimizationRemarkEmitter *ORE = nullptr);
/// \brief Get an InlineCost with the callee explicitly specified.
/// Get an InlineCost with the callee explicitly specified.
/// This allows you to calculate the cost of inlining a function via a
/// pointer. This behaves exactly as the version with no explicit callee
/// parameter in all other respects.
@ -207,7 +207,7 @@ getInlineCost(CallSite CS, Function *Callee, const InlineParams &Params,
Optional<function_ref<BlockFrequencyInfo &(Function &)>> GetBFI,
ProfileSummaryInfo *PSI, OptimizationRemarkEmitter *ORE);
/// \brief Minimal filter to detect invalid constructs for inlining.
/// Minimal filter to detect invalid constructs for inlining.
bool isInlineViable(Function &Callee);
}

View File

@ -7,7 +7,7 @@
//
//===----------------------------------------------------------------------===//
//
/// \brief Compute iterated dominance frontiers using a linear time algorithm.
/// Compute iterated dominance frontiers using a linear time algorithm.
///
/// The algorithm used here is based on:
///
@ -32,7 +32,7 @@
namespace llvm {
/// \brief Determine the iterated dominance frontier, given a set of defining
/// Determine the iterated dominance frontier, given a set of defining
/// blocks, and optionally, a set of live-in blocks.
///
/// In turn, the results can be used to place phi nodes.
@ -48,7 +48,7 @@ class IDFCalculator {
IDFCalculator(DominatorTreeBase<BasicBlock, IsPostDom> &DT)
: DT(DT), useLiveIn(false) {}
/// \brief Give the IDF calculator the set of blocks in which the value is
/// Give the IDF calculator the set of blocks in which the value is
/// defined. This is equivalent to the set of starting blocks it should be
/// calculating the IDF for (though later gets pruned based on liveness).
///
@ -57,7 +57,7 @@ class IDFCalculator {
DefBlocks = &Blocks;
}
/// \brief Give the IDF calculator the set of blocks in which the value is
/// Give the IDF calculator the set of blocks in which the value is
/// live on entry to the block. This is used to prune the IDF calculation to
/// not include blocks where any phi insertion would be dead.
///
@ -68,14 +68,14 @@ class IDFCalculator {
useLiveIn = true;
}
/// \brief Reset the live-in block set to be empty, and tell the IDF
/// Reset the live-in block set to be empty, and tell the IDF
/// calculator to not use liveness anymore.
void resetLiveInBlocks() {
LiveInBlocks = nullptr;
useLiveIn = false;
}
/// \brief Calculate iterated dominance frontiers
/// Calculate iterated dominance frontiers
///
/// This uses the linear-time phi algorithm based on DJ-graphs mentioned in
/// the file-level comment. It performs DF->IDF pruning using the live-in

View File

@ -75,7 +75,7 @@ private:
const LoopInfoT *LI;
};
/// \brief This is an alternative analysis pass to
/// This is an alternative analysis pass to
/// BlockFrequencyInfoWrapperPass. The difference is that with this pass the
/// block frequencies are not computed when the analysis pass is executed but
/// rather when the BFI result is explicitly requested by the analysis client.
@ -109,10 +109,10 @@ public:
LazyBlockFrequencyInfoPass();
/// \brief Compute and return the block frequencies.
/// Compute and return the block frequencies.
BlockFrequencyInfo &getBFI() { return LBFI.getCalculated(); }
/// \brief Compute and return the block frequencies.
/// Compute and return the block frequencies.
const BlockFrequencyInfo &getBFI() const { return LBFI.getCalculated(); }
void getAnalysisUsage(AnalysisUsage &AU) const override;
@ -126,7 +126,7 @@ public:
void print(raw_ostream &OS, const Module *M) const override;
};
/// \brief Helper for client passes to initialize dependent passes for LBFI.
/// Helper for client passes to initialize dependent passes for LBFI.
void initializeLazyBFIPassPass(PassRegistry &Registry);
}
#endif

View File

@ -26,7 +26,7 @@ class Function;
class LoopInfo;
class TargetLibraryInfo;
/// \brief This is an alternative analysis pass to
/// This is an alternative analysis pass to
/// BranchProbabilityInfoWrapperPass. The difference is that with this pass the
/// branch probabilities are not computed when the analysis pass is executed but
/// rather when the BPI results is explicitly requested by the analysis client.
@ -89,10 +89,10 @@ public:
LazyBranchProbabilityInfoPass();
/// \brief Compute and return the branch probabilities.
/// Compute and return the branch probabilities.
BranchProbabilityInfo &getBPI() { return LBPI->getCalculated(); }
/// \brief Compute and return the branch probabilities.
/// Compute and return the branch probabilities.
const BranchProbabilityInfo &getBPI() const { return LBPI->getCalculated(); }
void getAnalysisUsage(AnalysisUsage &AU) const override;
@ -106,10 +106,10 @@ public:
void print(raw_ostream &OS, const Module *M) const override;
};
/// \brief Helper for client passes to initialize dependent passes for LBPI.
/// Helper for client passes to initialize dependent passes for LBPI.
void initializeLazyBPIPassPass(PassRegistry &Registry);
/// \brief Simple trait class that provides a mapping between BPI passes and the
/// Simple trait class that provides a mapping between BPI passes and the
/// corresponding BPInfo.
template <typename PassT> struct BPIPassTrait {
static PassT &getBPI(PassT *P) { return *P; }

View File

@ -113,6 +113,13 @@ public:
/// in LVI, so we need to pass it here as an argument.
void printLVI(Function &F, DominatorTree &DTree, raw_ostream &OS);
/// Disables use of the DominatorTree within LVI.
void disableDT();
/// Enables use of the DominatorTree within LVI. Does nothing if the class
/// instance was initialized without a DT pointer.
void enableDT();
// For old PM pass. Delete once LazyValueInfoWrapperPass is gone.
void releaseMemory();
@ -121,7 +128,7 @@ public:
FunctionAnalysisManager::Invalidator &Inv);
};
/// \brief Analysis to compute lazy value information.
/// Analysis to compute lazy value information.
class LazyValueAnalysis : public AnalysisInfoMixin<LazyValueAnalysis> {
public:
typedef LazyValueInfo Result;

View File

@ -26,12 +26,12 @@ class FunctionPass;
class Module;
class Function;
/// @brief Create a lint pass.
/// Create a lint pass.
///
/// Check a module or function.
FunctionPass *createLintPass();
/// @brief Check a module.
/// Check a module.
///
/// This should only be used for debugging, because it plays games with
/// PassManagers and stuff.

View File

@ -38,25 +38,25 @@ class SCEVUnionPredicate;
class LoopAccessInfo;
class OptimizationRemarkEmitter;
/// \brief Collection of parameters shared beetween the Loop Vectorizer and the
/// Collection of parameters shared beetween the Loop Vectorizer and the
/// Loop Access Analysis.
struct VectorizerParams {
/// \brief Maximum SIMD width.
/// Maximum SIMD width.
static const unsigned MaxVectorWidth;
/// \brief VF as overridden by the user.
/// VF as overridden by the user.
static unsigned VectorizationFactor;
/// \brief Interleave factor as overridden by the user.
/// Interleave factor as overridden by the user.
static unsigned VectorizationInterleave;
/// \brief True if force-vector-interleave was specified by the user.
/// True if force-vector-interleave was specified by the user.
static bool isInterleaveForced();
/// \\brief When performing memory disambiguation checks at runtime do not
/// \When performing memory disambiguation checks at runtime do not
/// make more than this number of comparisons.
static unsigned RuntimeMemoryCheckThreshold;
};
/// \brief Checks memory dependences among accesses to the same underlying
/// Checks memory dependences among accesses to the same underlying
/// object to determine whether there vectorization is legal or not (and at
/// which vectorization factor).
///
@ -94,12 +94,12 @@ class MemoryDepChecker {
public:
typedef PointerIntPair<Value *, 1, bool> MemAccessInfo;
typedef SmallVector<MemAccessInfo, 8> MemAccessInfoList;
/// \brief Set of potential dependent memory accesses.
/// Set of potential dependent memory accesses.
typedef EquivalenceClasses<MemAccessInfo> DepCandidates;
/// \brief Dependece between memory access instructions.
/// Dependece between memory access instructions.
struct Dependence {
/// \brief The type of the dependence.
/// The type of the dependence.
enum DepType {
// No dependence.
NoDep,
@ -127,36 +127,36 @@ public:
BackwardVectorizableButPreventsForwarding
};
/// \brief String version of the types.
/// String version of the types.
static const char *DepName[];
/// \brief Index of the source of the dependence in the InstMap vector.
/// Index of the source of the dependence in the InstMap vector.
unsigned Source;
/// \brief Index of the destination of the dependence in the InstMap vector.
/// Index of the destination of the dependence in the InstMap vector.
unsigned Destination;
/// \brief The type of the dependence.
/// The type of the dependence.
DepType Type;
Dependence(unsigned Source, unsigned Destination, DepType Type)
: Source(Source), Destination(Destination), Type(Type) {}
/// \brief Return the source instruction of the dependence.
/// Return the source instruction of the dependence.
Instruction *getSource(const LoopAccessInfo &LAI) const;
/// \brief Return the destination instruction of the dependence.
/// Return the destination instruction of the dependence.
Instruction *getDestination(const LoopAccessInfo &LAI) const;
/// \brief Dependence types that don't prevent vectorization.
/// Dependence types that don't prevent vectorization.
static bool isSafeForVectorization(DepType Type);
/// \brief Lexically forward dependence.
/// Lexically forward dependence.
bool isForward() const;
/// \brief Lexically backward dependence.
/// Lexically backward dependence.
bool isBackward() const;
/// \brief May be a lexically backward dependence type (includes Unknown).
/// May be a lexically backward dependence type (includes Unknown).
bool isPossiblyBackward() const;
/// \brief Print the dependence. \p Instr is used to map the instruction
/// Print the dependence. \p Instr is used to map the instruction
/// indices to instructions.
void print(raw_ostream &OS, unsigned Depth,
const SmallVectorImpl<Instruction *> &Instrs) const;
@ -167,7 +167,7 @@ public:
ShouldRetryWithRuntimeCheck(false), SafeForVectorization(true),
RecordDependences(true) {}
/// \brief Register the location (instructions are given increasing numbers)
/// Register the location (instructions are given increasing numbers)
/// of a write access.
void addAccess(StoreInst *SI) {
Value *Ptr = SI->getPointerOperand();
@ -176,7 +176,7 @@ public:
++AccessIdx;
}
/// \brief Register the location (instructions are given increasing numbers)
/// Register the location (instructions are given increasing numbers)
/// of a write access.
void addAccess(LoadInst *LI) {
Value *Ptr = LI->getPointerOperand();
@ -185,29 +185,29 @@ public:
++AccessIdx;
}
/// \brief Check whether the dependencies between the accesses are safe.
/// Check whether the dependencies between the accesses are safe.
///
/// Only checks sets with elements in \p CheckDeps.
bool areDepsSafe(DepCandidates &AccessSets, MemAccessInfoList &CheckDeps,
const ValueToValueMap &Strides);
/// \brief No memory dependence was encountered that would inhibit
/// No memory dependence was encountered that would inhibit
/// vectorization.
bool isSafeForVectorization() const { return SafeForVectorization; }
/// \brief The maximum number of bytes of a vector register we can vectorize
/// The maximum number of bytes of a vector register we can vectorize
/// the accesses safely with.
uint64_t getMaxSafeDepDistBytes() { return MaxSafeDepDistBytes; }
/// \brief Return the number of elements that are safe to operate on
/// Return the number of elements that are safe to operate on
/// simultaneously, multiplied by the size of the element in bits.
uint64_t getMaxSafeRegisterWidth() const { return MaxSafeRegisterWidth; }
/// \brief In same cases when the dependency check fails we can still
/// In same cases when the dependency check fails we can still
/// vectorize the loop with a dynamic array access check.
bool shouldRetryWithRuntimeCheck() { return ShouldRetryWithRuntimeCheck; }
/// \brief Returns the memory dependences. If null is returned we exceeded
/// Returns the memory dependences. If null is returned we exceeded
/// the MaxDependences threshold and this information is not
/// available.
const SmallVectorImpl<Dependence> *getDependences() const {
@ -216,13 +216,13 @@ public:
void clearDependences() { Dependences.clear(); }
/// \brief The vector of memory access instructions. The indices are used as
/// The vector of memory access instructions. The indices are used as
/// instruction identifiers in the Dependence class.
const SmallVectorImpl<Instruction *> &getMemoryInstructions() const {
return InstMap;
}
/// \brief Generate a mapping between the memory instructions and their
/// Generate a mapping between the memory instructions and their
/// indices according to program order.
DenseMap<Instruction *, unsigned> generateInstructionOrderMap() const {
DenseMap<Instruction *, unsigned> OrderMap;
@ -233,7 +233,7 @@ public:
return OrderMap;
}
/// \brief Find the set of instructions that read or write via \p Ptr.
/// Find the set of instructions that read or write via \p Ptr.
SmallVector<Instruction *, 4> getInstructionsForAccess(Value *Ptr,
bool isWrite) const;
@ -247,42 +247,42 @@ private:
PredicatedScalarEvolution &PSE;
const Loop *InnermostLoop;
/// \brief Maps access locations (ptr, read/write) to program order.
/// Maps access locations (ptr, read/write) to program order.
DenseMap<MemAccessInfo, std::vector<unsigned> > Accesses;
/// \brief Memory access instructions in program order.
/// Memory access instructions in program order.
SmallVector<Instruction *, 16> InstMap;
/// \brief The program order index to be used for the next instruction.
/// The program order index to be used for the next instruction.
unsigned AccessIdx;
// We can access this many bytes in parallel safely.
uint64_t MaxSafeDepDistBytes;
/// \brief Number of elements (from consecutive iterations) that are safe to
/// Number of elements (from consecutive iterations) that are safe to
/// operate on simultaneously, multiplied by the size of the element in bits.
/// The size of the element is taken from the memory access that is most
/// restrictive.
uint64_t MaxSafeRegisterWidth;
/// \brief If we see a non-constant dependence distance we can still try to
/// If we see a non-constant dependence distance we can still try to
/// vectorize this loop with runtime checks.
bool ShouldRetryWithRuntimeCheck;
/// \brief No memory dependence was encountered that would inhibit
/// No memory dependence was encountered that would inhibit
/// vectorization.
bool SafeForVectorization;
//// \brief True if Dependences reflects the dependences in the
//// True if Dependences reflects the dependences in the
//// loop. If false we exceeded MaxDependences and
//// Dependences is invalid.
bool RecordDependences;
/// \brief Memory dependences collected during the analysis. Only valid if
/// Memory dependences collected during the analysis. Only valid if
/// RecordDependences is true.
SmallVector<Dependence, 8> Dependences;
/// \brief Check whether there is a plausible dependence between the two
/// Check whether there is a plausible dependence between the two
/// accesses.
///
/// Access \p A must happen before \p B in program order. The two indices
@ -298,7 +298,7 @@ private:
const MemAccessInfo &B, unsigned BIdx,
const ValueToValueMap &Strides);
/// \brief Check whether the data dependence could prevent store-load
/// Check whether the data dependence could prevent store-load
/// forwarding.
///
/// \return false if we shouldn't vectorize at all or avoid larger
@ -306,7 +306,7 @@ private:
bool couldPreventStoreLoadForward(uint64_t Distance, uint64_t TypeByteSize);
};
/// \brief Holds information about the memory runtime legality checks to verify
/// Holds information about the memory runtime legality checks to verify
/// that a group of pointers do not overlap.
class RuntimePointerChecking {
public:
@ -355,13 +355,13 @@ public:
unsigned ASId, const ValueToValueMap &Strides,
PredicatedScalarEvolution &PSE);
/// \brief No run-time memory checking is necessary.
/// No run-time memory checking is necessary.
bool empty() const { return Pointers.empty(); }
/// A grouping of pointers. A single memcheck is required between
/// two groups.
struct CheckingPtrGroup {
/// \brief Create a new pointer checking group containing a single
/// Create a new pointer checking group containing a single
/// pointer, with index \p Index in RtCheck.
CheckingPtrGroup(unsigned Index, RuntimePointerChecking &RtCheck)
: RtCheck(RtCheck), High(RtCheck.Pointers[Index].End),
@ -369,7 +369,7 @@ public:
Members.push_back(Index);
}
/// \brief Tries to add the pointer recorded in RtCheck at index
/// Tries to add the pointer recorded in RtCheck at index
/// \p Index to this pointer checking group. We can only add a pointer
/// to a checking group if we will still be able to get
/// the upper and lower bounds of the check. Returns true in case
@ -390,7 +390,7 @@ public:
SmallVector<unsigned, 2> Members;
};
/// \brief A memcheck which made up of a pair of grouped pointers.
/// A memcheck which made up of a pair of grouped pointers.
///
/// These *have* to be const for now, since checks are generated from
/// CheckingPtrGroups in LAI::addRuntimeChecks which is a const member
@ -399,24 +399,24 @@ public:
typedef std::pair<const CheckingPtrGroup *, const CheckingPtrGroup *>
PointerCheck;
/// \brief Generate the checks and store it. This also performs the grouping
/// Generate the checks and store it. This also performs the grouping
/// of pointers to reduce the number of memchecks necessary.
void generateChecks(MemoryDepChecker::DepCandidates &DepCands,
bool UseDependencies);
/// \brief Returns the checks that generateChecks created.
/// Returns the checks that generateChecks created.
const SmallVector<PointerCheck, 4> &getChecks() const { return Checks; }
/// \brief Decide if we need to add a check between two groups of pointers,
/// Decide if we need to add a check between two groups of pointers,
/// according to needsChecking.
bool needsChecking(const CheckingPtrGroup &M,
const CheckingPtrGroup &N) const;
/// \brief Returns the number of run-time checks required according to
/// Returns the number of run-time checks required according to
/// needsChecking.
unsigned getNumberOfChecks() const { return Checks.size(); }
/// \brief Print the list run-time memory checks necessary.
/// Print the list run-time memory checks necessary.
void print(raw_ostream &OS, unsigned Depth = 0) const;
/// Print \p Checks.
@ -432,7 +432,7 @@ public:
/// Holds a partitioning of pointers into "check groups".
SmallVector<CheckingPtrGroup, 2> CheckingGroups;
/// \brief Check if pointers are in the same partition
/// Check if pointers are in the same partition
///
/// \p PtrToPartition contains the partition number for pointers (-1 if the
/// pointer belongs to multiple partitions).
@ -440,17 +440,17 @@ public:
arePointersInSamePartition(const SmallVectorImpl<int> &PtrToPartition,
unsigned PtrIdx1, unsigned PtrIdx2);
/// \brief Decide whether we need to issue a run-time check for pointer at
/// Decide whether we need to issue a run-time check for pointer at
/// index \p I and \p J to prove their independence.
bool needsChecking(unsigned I, unsigned J) const;
/// \brief Return PointerInfo for pointer at index \p PtrIdx.
/// Return PointerInfo for pointer at index \p PtrIdx.
const PointerInfo &getPointerInfo(unsigned PtrIdx) const {
return Pointers[PtrIdx];
}
private:
/// \brief Groups pointers such that a single memcheck is required
/// Groups pointers such that a single memcheck is required
/// between two different groups. This will clear the CheckingGroups vector
/// and re-compute it. We will only group dependecies if \p UseDependencies
/// is true, otherwise we will create a separate group for each pointer.
@ -464,12 +464,12 @@ private:
/// Holds a pointer to the ScalarEvolution analysis.
ScalarEvolution *SE;
/// \brief Set of run-time checks required to establish independence of
/// Set of run-time checks required to establish independence of
/// otherwise may-aliasing pointers in the loop.
SmallVector<PointerCheck, 4> Checks;
};
/// \brief Drive the analysis of memory accesses in the loop
/// Drive the analysis of memory accesses in the loop
///
/// This class is responsible for analyzing the memory accesses of a loop. It
/// collects the accesses and then its main helper the AccessAnalysis class
@ -503,7 +503,7 @@ public:
return PtrRtChecking.get();
}
/// \brief Number of memchecks required to prove independence of otherwise
/// Number of memchecks required to prove independence of otherwise
/// may-alias pointers.
unsigned getNumRuntimePointerChecks() const {
return PtrRtChecking->getNumberOfChecks();
@ -521,7 +521,7 @@ public:
unsigned getNumStores() const { return NumStores; }
unsigned getNumLoads() const { return NumLoads;}
/// \brief Add code that checks at runtime if the accessed arrays overlap.
/// Add code that checks at runtime if the accessed arrays overlap.
///
/// Returns a pair of instructions where the first element is the first
/// instruction generated in possibly a sequence of instructions and the
@ -529,7 +529,7 @@ public:
std::pair<Instruction *, Instruction *>
addRuntimeChecks(Instruction *Loc) const;
/// \brief Generete the instructions for the checks in \p PointerChecks.
/// Generete the instructions for the checks in \p PointerChecks.
///
/// Returns a pair of instructions where the first element is the first
/// instruction generated in possibly a sequence of instructions and the
@ -539,32 +539,32 @@ public:
const SmallVectorImpl<RuntimePointerChecking::PointerCheck>
&PointerChecks) const;
/// \brief The diagnostics report generated for the analysis. E.g. why we
/// The diagnostics report generated for the analysis. E.g. why we
/// couldn't analyze the loop.
const OptimizationRemarkAnalysis *getReport() const { return Report.get(); }
/// \brief the Memory Dependence Checker which can determine the
/// the Memory Dependence Checker which can determine the
/// loop-independent and loop-carried dependences between memory accesses.
const MemoryDepChecker &getDepChecker() const { return *DepChecker; }
/// \brief Return the list of instructions that use \p Ptr to read or write
/// Return the list of instructions that use \p Ptr to read or write
/// memory.
SmallVector<Instruction *, 4> getInstructionsForAccess(Value *Ptr,
bool isWrite) const {
return DepChecker->getInstructionsForAccess(Ptr, isWrite);
}
/// \brief If an access has a symbolic strides, this maps the pointer value to
/// If an access has a symbolic strides, this maps the pointer value to
/// the stride symbol.
const ValueToValueMap &getSymbolicStrides() const { return SymbolicStrides; }
/// \brief Pointer has a symbolic stride.
/// Pointer has a symbolic stride.
bool hasStride(Value *V) const { return StrideSet.count(V); }
/// \brief Print the information about the memory accesses in the loop.
/// Print the information about the memory accesses in the loop.
void print(raw_ostream &OS, unsigned Depth = 0) const;
/// \brief Checks existence of store to invariant address inside loop.
/// Checks existence of store to invariant address inside loop.
/// If the loop has any store to invariant address, then it returns true,
/// else returns false.
bool hasStoreToLoopInvariantAddress() const {
@ -579,15 +579,15 @@ public:
const PredicatedScalarEvolution &getPSE() const { return *PSE; }
private:
/// \brief Analyze the loop.
/// Analyze the loop.
void analyzeLoop(AliasAnalysis *AA, LoopInfo *LI,
const TargetLibraryInfo *TLI, DominatorTree *DT);
/// \brief Check if the structure of the loop allows it to be analyzed by this
/// Check if the structure of the loop allows it to be analyzed by this
/// pass.
bool canAnalyzeLoop();
/// \brief Save the analysis remark.
/// Save the analysis remark.
///
/// LAA does not directly emits the remarks. Instead it stores it which the
/// client can retrieve and presents as its own analysis
@ -595,7 +595,7 @@ private:
OptimizationRemarkAnalysis &recordAnalysis(StringRef RemarkName,
Instruction *Instr = nullptr);
/// \brief Collect memory access with loop invariant strides.
/// Collect memory access with loop invariant strides.
///
/// Looks for accesses like "a[i * StrideA]" where "StrideA" is loop
/// invariant.
@ -607,7 +607,7 @@ private:
/// at runtime. Using std::unique_ptr to make using move ctor simpler.
std::unique_ptr<RuntimePointerChecking> PtrRtChecking;
/// \brief the Memory Dependence Checker which can determine the
/// the Memory Dependence Checker which can determine the
/// loop-independent and loop-carried dependences between memory accesses.
std::unique_ptr<MemoryDepChecker> DepChecker;
@ -618,28 +618,28 @@ private:
uint64_t MaxSafeDepDistBytes;
/// \brief Cache the result of analyzeLoop.
/// Cache the result of analyzeLoop.
bool CanVecMem;
/// \brief Indicator for storing to uniform addresses.
/// Indicator for storing to uniform addresses.
/// If a loop has write to a loop invariant address then it should be true.
bool StoreToLoopInvariantAddress;
/// \brief The diagnostics report generated for the analysis. E.g. why we
/// The diagnostics report generated for the analysis. E.g. why we
/// couldn't analyze the loop.
std::unique_ptr<OptimizationRemarkAnalysis> Report;
/// \brief If an access has a symbolic strides, this maps the pointer value to
/// If an access has a symbolic strides, this maps the pointer value to
/// the stride symbol.
ValueToValueMap SymbolicStrides;
/// \brief Set of symbolic strides values.
/// Set of symbolic strides values.
SmallPtrSet<Value *, 8> StrideSet;
};
Value *stripIntegerCast(Value *V);
/// \brief Return the SCEV corresponding to a pointer with the symbolic stride
/// Return the SCEV corresponding to a pointer with the symbolic stride
/// replaced with constant one, assuming the SCEV predicate associated with
/// \p PSE is true.
///
@ -653,7 +653,7 @@ const SCEV *replaceSymbolicStrideSCEV(PredicatedScalarEvolution &PSE,
const ValueToValueMap &PtrToStride,
Value *Ptr, Value *OrigPtr = nullptr);
/// \brief If the pointer has a constant stride return it in units of its
/// If the pointer has a constant stride return it in units of its
/// element size. Otherwise return zero.
///
/// Ensure that it does not wrap in the address space, assuming the predicate
@ -667,12 +667,26 @@ int64_t getPtrStride(PredicatedScalarEvolution &PSE, Value *Ptr, const Loop *Lp,
const ValueToValueMap &StridesMap = ValueToValueMap(),
bool Assume = false, bool ShouldCheckWrap = true);
/// \brief Returns true if the memory operations \p A and \p B are consecutive.
/// Attempt to sort the pointers in \p VL and return the sorted indices
/// in \p SortedIndices, if reordering is required.
///
/// Returns 'true' if sorting is legal, otherwise returns 'false'.
///
/// For example, for a given \p VL of memory accesses in program order, a[i+4],
/// a[i+0], a[i+1] and a[i+7], this function will sort the \p VL and save the
/// sorted indices in \p SortedIndices as a[i+0], a[i+1], a[i+4], a[i+7] and
/// saves the mask for actual memory accesses in program order in
/// \p SortedIndices as <1,2,0,3>
bool sortPtrAccesses(ArrayRef<Value *> VL, const DataLayout &DL,
ScalarEvolution &SE,
SmallVectorImpl<unsigned> &SortedIndices);
/// Returns true if the memory operations \p A and \p B are consecutive.
/// This is a simple API that does not depend on the analysis pass.
bool isConsecutiveAccess(Value *A, Value *B, const DataLayout &DL,
ScalarEvolution &SE, bool CheckType = true);
/// \brief This analysis provides dependence information for the memory accesses
/// This analysis provides dependence information for the memory accesses
/// of a loop.
///
/// It runs the analysis for a loop on demand. This can be initiated by
@ -691,7 +705,7 @@ public:
void getAnalysisUsage(AnalysisUsage &AU) const override;
/// \brief Query the result of the loop access information for the loop \p L.
/// Query the result of the loop access information for the loop \p L.
///
/// If there is no cached result available run the analysis.
const LoopAccessInfo &getInfo(Loop *L);
@ -701,11 +715,11 @@ public:
LoopAccessInfoMap.clear();
}
/// \brief Print the result of the analysis when invoked with -analyze.
/// Print the result of the analysis when invoked with -analyze.
void print(raw_ostream &OS, const Module *M = nullptr) const override;
private:
/// \brief The cache.
/// The cache.
DenseMap<Loop *, std::unique_ptr<LoopAccessInfo>> LoopAccessInfoMap;
// The used analysis passes.
@ -716,7 +730,7 @@ private:
LoopInfo *LI;
};
/// \brief This analysis provides dependence information for the memory
/// This analysis provides dependence information for the memory
/// accesses of a loop.
///
/// It runs the analysis for a loop on demand. This can be initiated by

View File

@ -69,7 +69,7 @@ extern cl::opt<bool> EnableMSSALoopDependency;
extern template class AllAnalysesOn<Loop>;
extern template class AnalysisManager<Loop, LoopStandardAnalysisResults &>;
/// \brief The loop analysis manager.
/// The loop analysis manager.
///
/// See the documentation for the AnalysisManager template for detail
/// documentation. This typedef serves as a convenient way to refer to this

View File

@ -178,6 +178,12 @@ public:
return DenseBlockSet;
}
/// Return a direct, immutable handle to the blocks set.
const SmallPtrSetImpl<const BlockT *> &getBlocksSet() const {
assert(!isInvalid() && "Loop not in a valid state!");
return DenseBlockSet;
}
/// Return true if this loop is no longer valid. The only valid use of this
/// helper is "assert(L.isInvalid())" or equivalent, since IsInvalid is set to
/// true by the destructor. In other words, if this accessor returns true,
@ -255,6 +261,20 @@ public:
/// Otherwise return null.
BlockT *getExitBlock() const;
/// Return true if no exit block for the loop has a predecessor that is
/// outside the loop.
bool hasDedicatedExits() const;
/// Return all unique successor blocks of this loop.
/// These are the blocks _outside of the current loop_ which are branched to.
/// This assumes that loop exits are in canonical form, i.e. all exits are
/// dedicated exits.
void getUniqueExitBlocks(SmallVectorImpl<BlockT *> &ExitBlocks) const;
/// If getUniqueExitBlocks would return exactly one block, return that block.
/// Otherwise return null.
BlockT *getUniqueExitBlock() const;
/// Edge type.
typedef std::pair<const BlockT *, const BlockT *> Edge;
@ -438,7 +458,7 @@ extern template class LoopBase<BasicBlock, Loop>;
/// in the CFG are necessarily loops.
class Loop : public LoopBase<BasicBlock, Loop> {
public:
/// \brief A range representing the start and end location of a loop.
/// A range representing the start and end location of a loop.
class LocRange {
DebugLoc Start;
DebugLoc End;
@ -452,7 +472,7 @@ public:
const DebugLoc &getStart() const { return Start; }
const DebugLoc &getEnd() const { return End; }
/// \brief Check for null.
/// Check for null.
///
explicit operator bool() const { return Start && End; }
};
@ -527,7 +547,7 @@ public:
///
/// If this loop contains the same llvm.loop metadata on each branch to the
/// header then the node is returned. If any latch instruction does not
/// contain llvm.loop or or if multiple latches contain different nodes then
/// contain llvm.loop or if multiple latches contain different nodes then
/// 0 is returned.
MDNode *getLoopID() const;
/// Set the llvm.loop loop id metadata for this loop.
@ -547,20 +567,6 @@ public:
/// unrolling pass is run more than once (which it generally is).
void setLoopAlreadyUnrolled();
/// Return true if no exit block for the loop has a predecessor that is
/// outside the loop.
bool hasDedicatedExits() const;
/// Return all unique successor blocks of this loop.
/// These are the blocks _outside of the current loop_ which are branched to.
/// This assumes that loop exits are in canonical form, i.e. all exits are
/// dedicated exits.
void getUniqueExitBlocks(SmallVectorImpl<BasicBlock *> &ExitBlocks) const;
/// If getUniqueExitBlocks would return exactly one block, return that block.
/// Otherwise return null.
BasicBlock *getUniqueExitBlock() const;
void dump() const;
void dumpVerbose() const;
@ -929,7 +935,7 @@ template <> struct GraphTraits<Loop *> {
static ChildIteratorType child_end(NodeRef N) { return N->end(); }
};
/// \brief Analysis pass that exposes the \c LoopInfo for a function.
/// Analysis pass that exposes the \c LoopInfo for a function.
class LoopAnalysis : public AnalysisInfoMixin<LoopAnalysis> {
friend AnalysisInfoMixin<LoopAnalysis>;
static AnalysisKey Key;
@ -940,7 +946,7 @@ public:
LoopInfo run(Function &F, FunctionAnalysisManager &AM);
};
/// \brief Printer pass for the \c LoopAnalysis results.
/// Printer pass for the \c LoopAnalysis results.
class LoopPrinterPass : public PassInfoMixin<LoopPrinterPass> {
raw_ostream &OS;
@ -949,12 +955,12 @@ public:
PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
};
/// \brief Verifier pass for the \c LoopAnalysis results.
/// Verifier pass for the \c LoopAnalysis results.
struct LoopVerifierPass : public PassInfoMixin<LoopVerifierPass> {
PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
};
/// \brief The legacy pass manager's analysis pass to compute loop information.
/// The legacy pass manager's analysis pass to compute loop information.
class LoopInfoWrapperPass : public FunctionPass {
LoopInfo LI;
@ -968,7 +974,7 @@ public:
LoopInfo &getLoopInfo() { return LI; }
const LoopInfo &getLoopInfo() const { return LI; }
/// \brief Calculate the natural loop information for a given function.
/// Calculate the natural loop information for a given function.
bool runOnFunction(Function &F) override;
void verifyAnalysis() const override;

View File

@ -82,6 +82,74 @@ BlockT *LoopBase<BlockT, LoopT>::getExitBlock() const {
return nullptr;
}
template <class BlockT, class LoopT>
bool LoopBase<BlockT, LoopT>::hasDedicatedExits() const {
// Each predecessor of each exit block of a normal loop is contained
// within the loop.
SmallVector<BlockT *, 4> ExitBlocks;
getExitBlocks(ExitBlocks);
for (BlockT *EB : ExitBlocks)
for (BlockT *Predecessor : children<Inverse<BlockT *>>(EB))
if (!contains(Predecessor))
return false;
// All the requirements are met.
return true;
}
template <class BlockT, class LoopT>
void LoopBase<BlockT, LoopT>::getUniqueExitBlocks(
SmallVectorImpl<BlockT *> &ExitBlocks) const {
typedef GraphTraits<BlockT *> BlockTraits;
typedef GraphTraits<Inverse<BlockT *>> InvBlockTraits;
assert(hasDedicatedExits() &&
"getUniqueExitBlocks assumes the loop has canonical form exits!");
SmallVector<BlockT *, 32> SwitchExitBlocks;
for (BlockT *Block : this->blocks()) {
SwitchExitBlocks.clear();
for (BlockT *Successor : children<BlockT *>(Block)) {
// If block is inside the loop then it is not an exit block.
if (contains(Successor))
continue;
BlockT *FirstPred = *InvBlockTraits::child_begin(Successor);
// If current basic block is this exit block's first predecessor then only
// insert exit block in to the output ExitBlocks vector. This ensures that
// same exit block is not inserted twice into ExitBlocks vector.
if (Block != FirstPred)
continue;
// If a terminator has more then two successors, for example SwitchInst,
// then it is possible that there are multiple edges from current block to
// one exit block.
if (std::distance(BlockTraits::child_begin(Block),
BlockTraits::child_end(Block)) <= 2) {
ExitBlocks.push_back(Successor);
continue;
}
// In case of multiple edges from current block to exit block, collect
// only one edge in ExitBlocks. Use switchExitBlocks to keep track of
// duplicate edges.
if (!is_contained(SwitchExitBlocks, Successor)) {
SwitchExitBlocks.push_back(Successor);
ExitBlocks.push_back(Successor);
}
}
}
}
template <class BlockT, class LoopT>
BlockT *LoopBase<BlockT, LoopT>::getUniqueExitBlock() const {
SmallVector<BlockT *, 8> UniqueExitBlocks;
getUniqueExitBlocks(UniqueExitBlocks);
if (UniqueExitBlocks.size() == 1)
return UniqueExitBlocks[0];
return nullptr;
}
/// getExitEdges - Return all pairs of (_inside_block_,_outside_block_).
template <class BlockT, class LoopT>
void LoopBase<BlockT, LoopT>::getExitEdges(
@ -572,8 +640,8 @@ void LoopInfoBase<BlockT, LoopT>::print(raw_ostream &OS) const {
template <typename T>
bool compareVectors(std::vector<T> &BB1, std::vector<T> &BB2) {
std::sort(BB1.begin(), BB1.end());
std::sort(BB2.begin(), BB2.end());
llvm::sort(BB1.begin(), BB1.end());
llvm::sort(BB2.begin(), BB2.end());
return BB1 == BB2;
}
@ -617,6 +685,15 @@ static void compareLoops(const LoopT *L, const LoopT *OtherL,
std::vector<BlockT *> OtherBBs = OtherL->getBlocks();
assert(compareVectors(BBs, OtherBBs) &&
"Mismatched basic blocks in the loops!");
const SmallPtrSetImpl<const BlockT *> &BlocksSet = L->getBlocksSet();
const SmallPtrSetImpl<const BlockT *> &OtherBlocksSet = L->getBlocksSet();
assert(BlocksSet.size() == OtherBlocksSet.size() &&
std::all_of(BlocksSet.begin(), BlocksSet.end(),
[&OtherBlocksSet](const BlockT *BB) {
return OtherBlocksSet.count(BB);
}) &&
"Mismatched basic blocks in BlocksSets!");
}
#endif
@ -636,6 +713,9 @@ void LoopInfoBase<BlockT, LoopT>::verify(
LoopT *L = Entry.second;
assert(Loops.count(L) && "orphaned loop");
assert(L->contains(BB) && "orphaned block");
for (LoopT *ChildLoop : *L)
assert(!ChildLoop->contains(BB) &&
"BBMap should point to the innermost loop containing BB");
}
// Recompute LoopInfo to verify loops structure.

View File

@ -168,6 +168,25 @@ public:
}
};
/// Wrapper class to LoopBlocksDFS that provides a standard begin()/end()
/// interface for the DFS reverse post-order traversal of blocks in a loop body.
class LoopBlocksRPO {
private:
LoopBlocksDFS DFS;
public:
LoopBlocksRPO(Loop *Container) : DFS(Container) {}
/// Traverse the loop blocks and store the DFS result.
void perform(LoopInfo *LI) {
DFS.perform(LI);
}
/// Reverse iterate over the cached postorder blocks.
LoopBlocksDFS::RPOIterator begin() const { return DFS.beginRPO(); }
LoopBlocksDFS::RPOIterator end() const { return DFS.endRPO(); }
};
/// Specialize po_iterator_storage to record postorder numbers.
template<> class po_iterator_storage<LoopBlocksTraversal, true> {
LoopBlocksTraversal &LBT;

View File

@ -57,7 +57,7 @@ public:
using Base::visit;
private:
/// \brief A cache of pointer bases and constant-folded offsets corresponding
/// A cache of pointer bases and constant-folded offsets corresponding
/// to GEP (or derived from GEP) instructions.
///
/// In order to find the base pointer one needs to perform non-trivial
@ -65,11 +65,11 @@ private:
/// results saved.
DenseMap<Value *, SimplifiedAddress> SimplifiedAddresses;
/// \brief SCEV expression corresponding to number of currently simulated
/// SCEV expression corresponding to number of currently simulated
/// iteration.
const SCEV *IterationNumber;
/// \brief A Value->Constant map for keeping values that we managed to
/// A Value->Constant map for keeping values that we managed to
/// constant-fold on the given iteration.
///
/// While we walk the loop instructions, we build up and maintain a mapping

View File

@ -53,33 +53,33 @@ class Type;
class UndefValue;
class Value;
/// \brief Tests if a value is a call or invoke to a library function that
/// Tests if a value is a call or invoke to a library function that
/// allocates or reallocates memory (either malloc, calloc, realloc, or strdup
/// like).
bool isAllocationFn(const Value *V, const TargetLibraryInfo *TLI,
bool LookThroughBitCast = false);
/// \brief Tests if a value is a call or invoke to a function that returns a
/// Tests if a value is a call or invoke to a function that returns a
/// NoAlias pointer (including malloc/calloc/realloc/strdup-like functions).
bool isNoAliasFn(const Value *V, const TargetLibraryInfo *TLI,
bool LookThroughBitCast = false);
/// \brief Tests if a value is a call or invoke to a library function that
/// Tests if a value is a call or invoke to a library function that
/// allocates uninitialized memory (such as malloc).
bool isMallocLikeFn(const Value *V, const TargetLibraryInfo *TLI,
bool LookThroughBitCast = false);
/// \brief Tests if a value is a call or invoke to a library function that
/// Tests if a value is a call or invoke to a library function that
/// allocates zero-filled memory (such as calloc).
bool isCallocLikeFn(const Value *V, const TargetLibraryInfo *TLI,
bool LookThroughBitCast = false);
/// \brief Tests if a value is a call or invoke to a library function that
/// Tests if a value is a call or invoke to a library function that
/// allocates memory similar to malloc or calloc.
bool isMallocOrCallocLikeFn(const Value *V, const TargetLibraryInfo *TLI,
bool LookThroughBitCast = false);
/// \brief Tests if a value is a call or invoke to a library function that
/// Tests if a value is a call or invoke to a library function that
/// allocates memory (either malloc, calloc, or strdup like).
bool isAllocLikeFn(const Value *V, const TargetLibraryInfo *TLI,
bool LookThroughBitCast = false);
@ -170,14 +170,14 @@ struct ObjectSizeOpts {
bool NullIsUnknownSize = false;
};
/// \brief Compute the size of the object pointed by Ptr. Returns true and the
/// Compute the size of the object pointed by Ptr. Returns true and the
/// object size in Size if successful, and false otherwise. In this context, by
/// object we mean the region of memory starting at Ptr to the end of the
/// underlying object pointed to by Ptr.
bool getObjectSize(const Value *Ptr, uint64_t &Size, const DataLayout &DL,
const TargetLibraryInfo *TLI, ObjectSizeOpts Opts = {});
/// Try to turn a call to @llvm.objectsize into an integer value of the given
/// Try to turn a call to \@llvm.objectsize into an integer value of the given
/// Type. Returns null on failure.
/// If MustSucceed is true, this function will not return null, and may return
/// conservative values governed by the second argument of the call to
@ -189,7 +189,7 @@ ConstantInt *lowerObjectSizeCall(IntrinsicInst *ObjectSize,
using SizeOffsetType = std::pair<APInt, APInt>;
/// \brief Evaluate the size and offset of an object pointed to by a Value*
/// Evaluate the size and offset of an object pointed to by a Value*
/// statically. Fails if size or offset are not known at compile time.
class ObjectSizeOffsetVisitor
: public InstVisitor<ObjectSizeOffsetVisitor, SizeOffsetType> {
@ -248,7 +248,7 @@ private:
using SizeOffsetEvalType = std::pair<Value *, Value *>;
/// \brief Evaluate the size and offset of an object pointed to by a Value*.
/// Evaluate the size and offset of an object pointed to by a Value*.
/// May create code to compute the result at run-time.
class ObjectSizeOffsetEvaluator
: public InstVisitor<ObjectSizeOffsetEvaluator, SizeOffsetEvalType> {

Some files were not shown because too many files have changed in this diff Show More