Vendor import of llvm trunk r305575:

https://llvm.org/svn/llvm-project/llvm/trunk@305575
This commit is contained in:
Dimitry Andric 2017-06-16 21:03:24 +00:00
parent 7ab83427af
commit 7c7aba6e5f
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/vendor/llvm/dist/; revision=320013
svn path=/vendor/llvm/llvm-trunk-r305575/; revision=320014; tag=vendor/llvm/llvm-trunk-r305575
595 changed files with 17871 additions and 11385 deletions

View File

@ -35,38 +35,24 @@ function(tablegen project ofn)
# a tablegen change, as cmake does not propagate file-level dependencies
# of custom targets. See the following ticket for more information:
# https://cmake.org/Bug/view.php?id=15858
# We could always have just one dependency on both the target and
# the file, but these 2 cases would produce cleaner cmake files.
if (${${project}_TABLEGEN_TARGET} STREQUAL ${${project}_TABLEGEN_EXE})
add_custom_command(OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/${ofn}.tmp
# Generate tablegen output in a temporary file.
COMMAND ${${project}_TABLEGEN_EXE} ${ARGN} -I ${CMAKE_CURRENT_SOURCE_DIR}
${LLVM_TABLEGEN_FLAGS}
${LLVM_TARGET_DEFINITIONS_ABSOLUTE}
-o ${CMAKE_CURRENT_BINARY_DIR}/${ofn}.tmp
# The file in LLVM_TARGET_DEFINITIONS may be not in the current
# directory and local_tds may not contain it, so we must
# explicitly list it here:
DEPENDS ${${project}_TABLEGEN_TARGET} ${local_tds} ${global_tds}
${LLVM_TARGET_DEFINITIONS_ABSOLUTE}
COMMENT "Building ${ofn}..."
)
else()
add_custom_command(OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/${ofn}.tmp
# Generate tablegen output in a temporary file.
COMMAND ${${project}_TABLEGEN_EXE} ${ARGN} -I ${CMAKE_CURRENT_SOURCE_DIR}
${LLVM_TABLEGEN_FLAGS}
${LLVM_TARGET_DEFINITIONS_ABSOLUTE}
-o ${CMAKE_CURRENT_BINARY_DIR}/${ofn}.tmp
# The file in LLVM_TARGET_DEFINITIONS may be not in the current
# directory and local_tds may not contain it, so we must
# explicitly list it here:
DEPENDS ${${project}_TABLEGEN_TARGET} ${${project}_TABLEGEN_EXE}
${local_tds} ${global_tds}
${LLVM_TARGET_DEFINITIONS_ABSOLUTE}
COMMENT "Building ${ofn}..."
)
endif()
# The dependency on both, the target and the file, produces the same
# dependency twice in the result file when
# ("${${project}_TABLEGEN_TARGET}" STREQUAL "${${project}_TABLEGEN_EXE}")
# but lets us having smaller and cleaner code here.
add_custom_command(OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/${ofn}.tmp
# Generate tablegen output in a temporary file.
COMMAND ${${project}_TABLEGEN_EXE} ${ARGN} -I ${CMAKE_CURRENT_SOURCE_DIR}
${LLVM_TABLEGEN_FLAGS}
${LLVM_TARGET_DEFINITIONS_ABSOLUTE}
-o ${CMAKE_CURRENT_BINARY_DIR}/${ofn}.tmp
# The file in LLVM_TARGET_DEFINITIONS may be not in the current
# directory and local_tds may not contain it, so we must
# explicitly list it here:
DEPENDS ${${project}_TABLEGEN_TARGET} ${${project}_TABLEGEN_EXE}
${local_tds} ${global_tds}
${LLVM_TARGET_DEFINITIONS_ABSOLUTE}
COMMENT "Building ${ofn}..."
)
add_custom_command(OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/${ofn}
# Only update the real output file if there are any differences.
# This prevents recompilation of all the files depending on it if there

View File

@ -64,6 +64,20 @@ Branch weights are assigned to every destination.
[ , i32 <LABEL_BRANCH_WEIGHT> ... ]
}
``CallInst``
^^^^^^^^^^^^^^^^^^
Calls may have branch weight metadata, containing the execution count of
the call. It is currently used in SamplePGO mode only, to augment the
block and entry counts which may not be accurate with sampling.
.. code-block:: none
!0 = metadata !{
metadata !"branch_weights",
i32 <CALL_BRANCH_WEIGHT>
}
Other
^^^^^

View File

@ -4033,26 +4033,26 @@ DICompileUnit
"""""""""""""
``DICompileUnit`` nodes represent a compile unit. The ``enums:``,
``retainedTypes:``, ``subprograms:``, ``globals:``, ``imports:`` and ``macros:``
fields are tuples containing the debug info to be emitted along with the compile
unit, regardless of code optimizations (some nodes are only emitted if there are
references to them from instructions). The ``debugInfoForProfiling:`` field is a
boolean indicating whether or not line-table discriminators are updated to
provide more-accurate debug info for profiling results.
``retainedTypes:``, ``globals:``, ``imports:`` and ``macros:`` fields are tuples
containing the debug info to be emitted along with the compile unit, regardless
of code optimizations (some nodes are only emitted if there are references to
them from instructions). The ``debugInfoForProfiling:`` field is a boolean
indicating whether or not line-table discriminators are updated to provide
more-accurate debug info for profiling results.
.. code-block:: text
!0 = !DICompileUnit(language: DW_LANG_C99, file: !1, producer: "clang",
isOptimized: true, flags: "-O2", runtimeVersion: 2,
splitDebugFilename: "abc.debug", emissionKind: FullDebug,
enums: !2, retainedTypes: !3, subprograms: !4,
globals: !5, imports: !6, macros: !7, dwoId: 0x0abcd)
enums: !2, retainedTypes: !3, globals: !4, imports: !5,
macros: !6, dwoId: 0x0abcd)
Compile unit descriptors provide the root scope for objects declared in a
specific compilation unit. File descriptors are defined using this scope.
These descriptors are collected by a named metadata ``!llvm.dbg.cu``. They
keep track of subprograms, global variables, type information, and imported
entities (declarations and namespaces).
specific compilation unit. File descriptors are defined using this scope. These
descriptors are collected by a named metadata node ``!llvm.dbg.cu``. They keep
track of global variables, type information, and imported entities (declarations
and namespaces).
.. _DIFile:
@ -4326,8 +4326,8 @@ and ``scope:``.
containingType: !4,
virtuality: DW_VIRTUALITY_pure_virtual,
virtualIndex: 10, flags: DIFlagPrototyped,
isOptimized: true, templateParams: !5,
declaration: !6, variables: !7)
isOptimized: true, unit: !5, templateParams: !6,
declaration: !7, variables: !8, thrownTypes: !9)
.. _DILexicalBlock:
@ -4404,7 +4404,12 @@ referenced LLVM variable relates to the source language variable.
The current supported vocabulary is limited:
- ``DW_OP_deref`` dereferences the top of the expression stack.
- ``DW_OP_plus, 93`` adds ``93`` to the working expression.
- ``DW_OP_plus`` pops the last two entries from the expression stack, adds
them together and appends the result to the expression stack.
- ``DW_OP_minus`` pops the last two entries from the expression stack, subtracts
the last entry from the second last entry and appends the result to the
expression stack.
- ``DW_OP_plus_uconst, 93`` adds ``93`` to the working expression.
- ``DW_OP_LLVM_fragment, 16, 8`` specifies the offset and size (``16`` and ``8``
here, respectively) of the variable fragment from the working expression. Note
that contrary to DW_OP_bit_piece, the offset is describing the the location
@ -4426,9 +4431,10 @@ combined with a concrete location.
.. code-block:: llvm
!0 = !DIExpression(DW_OP_deref)
!1 = !DIExpression(DW_OP_plus, 3)
!1 = !DIExpression(DW_OP_plus_uconst, 3)
!1 = !DIExpression(DW_OP_constu, 3, DW_OP_plus)
!2 = !DIExpression(DW_OP_bit_piece, 3, 7)
!3 = !DIExpression(DW_OP_deref, DW_OP_plus, 3, DW_OP_LLVM_fragment, 3, 7)
!3 = !DIExpression(DW_OP_deref, DW_OP_constu, 3, DW_OP_plus, DW_OP_LLVM_fragment, 3, 7)
!4 = !DIExpression(DW_OP_constu, 2, DW_OP_swap, DW_OP_xderef)
!5 = !DIExpression(DW_OP_constu, 42, DW_OP_stack_value)
@ -5186,6 +5192,72 @@ Example:
!0 = !{i32* @a}
'``prof``' Metadata
^^^^^^^^^^^^^^^^^^^
The ``prof`` metadata is used to record profile data in the IR.
The first operand of the metadata node indicates the profile metadata
type. There are currently 3 types:
:ref:`branch_weights<prof_node_branch_weights>`,
:ref:`function_entry_count<prof_node_function_entry_count>`, and
:ref:`VP<prof_node_VP>`.
.. _prof_node_branch_weights:
branch_weights
""""""""""""""
Branch weight metadata attached to a branch, select, switch or call instruction
represents the likeliness of the associated branch being taken.
For more information, see :doc:`BranchWeightMetadata`.
.. _prof_node_function_entry_count:
function_entry_count
""""""""""""""""""""
Function entry count metadata can be attached to function definitions
to record the number of times the function is called. Used with BFI
information, it is also used to derive the basic block profile count.
For more information, see :doc:`BranchWeightMetadata`.
.. _prof_node_VP:
VP
""
VP (value profile) metadata can be attached to instructions that have
value profile information. Currently this is indirect calls (where it
records the hottest callees) and calls to memory intrinsics such as memcpy,
memmove, and memset (where it records the hottest byte lengths).
Each VP metadata node contains "VP" string, then a uint32_t value for the value
profiling kind, a uint64_t value for the total number of times the instruction
is executed, followed by uint64_t value and execution count pairs.
The value profiling kind is 0 for indirect call targets and 1 for memory
operations. For indirect call targets, each profile value is a hash
of the callee function name, and for memory operations each value is the
byte length.
Note that the value counts do not need to add up to the total count
listed in the third operand (in practice only the top hottest values
are tracked and reported).
Indirect call example:
.. code-block:: llvm
call void %f(), !prof !1
!1 = !{!"VP", i32 0, i64 1600, i64 7651369219802541373, i64 1030, i64 -4377547752858689819, i64 410}
Note that the VP type is 0 (the second operand), which indicates this is
an indirect call value profile data. The third operand indicates that the
indirect call executed 1600 times. The 4th and 6th operands give the
hashes of the 2 hottest target functions' names (this is the same hash used
to represent function names in the profile database), and the 5th and 7th
operands give the execution count that each of the respective prior target
functions was called.
Module Flags Metadata
=====================
@ -5352,40 +5424,6 @@ Some important flag interactions:
- A module with ``Objective-C Garbage Collection`` set to 0 cannot be
merged with a module with ``Objective-C GC Only`` set to 6.
Automatic Linker Flags Module Flags Metadata
--------------------------------------------
Some targets support embedding flags to the linker inside individual object
files. Typically this is used in conjunction with language extensions which
allow source files to explicitly declare the libraries they depend on, and have
these automatically be transmitted to the linker via object files.
These flags are encoded in the IR using metadata in the module flags section,
using the ``Linker Options`` key. The merge behavior for this flag is required
to be ``AppendUnique``, and the value for the key is expected to be a metadata
node which should be a list of other metadata nodes, each of which should be a
list of metadata strings defining linker options.
For example, the following metadata section specifies two separate sets of
linker options, presumably to link against ``libz`` and the ``Cocoa``
framework::
!0 = !{ i32 6, !"Linker Options",
!{
!{ !"-lz" },
!{ !"-framework", !"Cocoa" } } }
!llvm.module.flags = !{ !0 }
The metadata encoding as lists of lists of options, as opposed to a collapsed
list of options, is chosen so that the IR encoding can use multiple option
strings to specify e.g., a single library, while still having that specifier be
preserved as an atomic element that can be recognized by a target specific
assembly writer or object file emitter.
Each individual option is required to be either a valid option for the target's
linker, or an option that is reserved by the target specific assembly writer or
object file emitter. No other aspect of these options is defined by the IR.
C type width Module Flags Metadata
----------------------------------
@ -5422,6 +5460,37 @@ enum is the smallest type which can represent all of its values::
!0 = !{i32 1, !"short_wchar", i32 1}
!1 = !{i32 1, !"short_enum", i32 0}
Automatic Linker Flags Named Metadata
=====================================
Some targets support embedding flags to the linker inside individual object
files. Typically this is used in conjunction with language extensions which
allow source files to explicitly declare the libraries they depend on, and have
these automatically be transmitted to the linker via object files.
These flags are encoded in the IR using named metadata with the name
``!llvm.linker.options``. Each operand is expected to be a metadata node
which should be a list of other metadata nodes, each of which should be a
list of metadata strings defining linker options.
For example, the following metadata section specifies two separate sets of
linker options, presumably to link against ``libz`` and the ``Cocoa``
framework::
!0 = !{ !"-lz" },
!1 = !{ !"-framework", !"Cocoa" } } }
!llvm.linker.options = !{ !0, !1 }
The metadata encoding as lists of lists of options, as opposed to a collapsed
list of options, is chosen so that the IR encoding can use multiple option
strings to specify e.g., a single library, while still having that specifier be
preserved as an atomic element that can be recognized by a target specific
assembly writer or object file emitter.
Each individual option is required to be either a valid option for the target's
linker, or an option that is reserved by the target specific assembly writer or
object file emitter. No other aspect of these options is defined by the IR.
.. _intrinsicglobalvariables:
Intrinsic Global Variables
@ -13999,62 +14068,66 @@ Element Wise Atomic Memory Intrinsics
These intrinsics are similar to the standard library memory intrinsics except
that they perform memory transfer as a sequence of atomic memory accesses.
.. _int_memcpy_element_atomic:
.. _int_memcpy_element_unordered_atomic:
'``llvm.memcpy.element.atomic``' Intrinsic
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
'``llvm.memcpy.element.unordered.atomic``' Intrinsic
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Syntax:
"""""""
This is an overloaded intrinsic. You can use ``llvm.memcpy.element.atomic`` on
This is an overloaded intrinsic. You can use ``llvm.memcpy.element.unordered.atomic`` on
any integer bit width and for different address spaces. Not all targets
support all bit widths however.
::
declare void @llvm.memcpy.element.atomic.p0i8.p0i8(i8* <dest>, i8* <src>,
i64 <num_elements>, i32 <element_size>)
declare void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i32(i8* <dest>,
i8* <src>,
i32 <len>,
i32 <element_size>)
declare void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i64(i8* <dest>,
i8* <src>,
i64 <len>,
i32 <element_size>)
Overview:
"""""""""
The '``llvm.memcpy.element.atomic.*``' intrinsic performs copy of a block of
memory from the source location to the destination location as a sequence of
unordered atomic memory accesses where each access is a multiple of
``element_size`` bytes wide and aligned at an element size boundary. For example
each element is accessed atomically in source and destination buffers.
The '``llvm.memcpy.element.unordered.atomic.*``' intrinsic is a specialization of the
'``llvm.memcpy.*``' intrinsic. It differs in that the ``dest`` and ``src`` are treated
as arrays with elements that are exactly ``element_size`` bytes, and the copy between
buffers uses a sequence of :ref:`unordered atomic <ordering>` load/store operations
that are a positive integer multiple of the ``element_size`` in size.
Arguments:
""""""""""
The first argument is a pointer to the destination, the second is a
pointer to the source. The third argument is an integer argument
specifying the number of elements to copy, the fourth argument is size of
the single element in bytes.
The first three arguments are the same as they are in the :ref:`@llvm.memcpy <int_memcpy>`
intrinsic, with the added constraint that ``len`` is required to be a positive integer
multiple of the ``element_size``. If ``len`` is not a positive integer multiple of
``element_size``, then the behaviour of the intrinsic is undefined.
``element_size`` should be a power of two, greater than zero and less than
a target-specific atomic access size limit.
``element_size`` must be a compile-time constant positive power of two no greater than
target-specific atomic access size limit.
For each of the input pointers ``align`` parameter attribute must be specified.
It must be a power of two and greater than or equal to the ``element_size``.
Caller guarantees that both the source and destination pointers are aligned to
that boundary.
For each of the input pointers ``align`` parameter attribute must be specified. It
must be a power of two no less than the ``element_size``. Caller guarantees that
both the source and destination pointers are aligned to that boundary.
Semantics:
""""""""""
The '``llvm.memcpy.element.atomic.*``' intrinsic copies
'``num_elements`` * ``element_size``' bytes of memory from the source location to
the destination location. These locations are not allowed to overlap. Memory copy
is performed as a sequence of unordered atomic memory accesses where each access
is guaranteed to be a multiple of ``element_size`` bytes wide and aligned at an
element size boundary.
The '``llvm.memcpy.element.unordered.atomic.*``' intrinsic copies ``len`` bytes of
memory from the source location to the destination location. These locations are not
allowed to overlap. The memory copy is performed as a sequence of load/store operations
where each access is guaranteed to be a multiple of ``element_size`` bytes wide and
aligned at an ``element_size`` boundary.
The order of the copy is unspecified. The same value may be read from the source
buffer many times, but only one write is issued to the destination buffer per
element. It is well defined to have concurrent reads and writes to both source
and destination provided those reads and writes are at least unordered atomic.
element. It is well defined to have concurrent reads and writes to both source and
destination provided those reads and writes are unordered atomic when specified.
This intrinsic does not provide any additional ordering guarantees over those
provided by a set of unordered loads from the source location and stores to the
@ -14063,8 +14136,8 @@ destination.
Lowering:
"""""""""
In the most general case call to the '``llvm.memcpy.element.atomic.*``' is lowered
to a call to the symbol ``__llvm_memcpy_element_atomic_*``. Where '*' is replaced
with an actual element size.
In the most general case call to the '``llvm.memcpy.element.unordered.atomic.*``' is
lowered to a call to the symbol ``__llvm_memcpy_element_unordered_atomic_*``. Where '*'
is replaced with an actual element size.
Optimizer is allowed to inline memory copy when it's profitable to do so.
The optimizer is allowed to inline the memory copy when it's profitable to do so.

View File

@ -109,6 +109,13 @@ G
Garbage Collection. The practice of using reachability analysis instead of
explicit memory management to reclaim unused memory.
**GVN**
Global Value Numbering. GVN is a pass that partitions values computed by a
function into congruence classes. Values ending up in the same congruence
class are guaranteed to be the same for every execution of the program.
In that respect, congruency is a compile-time approximation of equivalence
of values at runtime.
H
-

View File

@ -54,7 +54,8 @@ reviewer understand your code.
To get a full diff, use one of the following commands (or just use Arcanist
to upload your patch):
* ``git diff -U999999 other-branch``
* ``git show HEAD -U999999 > mypatch.patch``
* ``git format-patch -U999999 @{u}``
* ``svn diff --diff-cmd=diff -x -U999999``
To upload a new patch:

View File

@ -10,10 +10,16 @@
#ifndef LLVM_ADT_ALLOCATORLIST_H
#define LLVM_ADT_ALLOCATORLIST_H
#include "llvm/ADT/ilist_node.h"
#include "llvm/ADT/iterator.h"
#include "llvm/ADT/simple_ilist.h"
#include "llvm/Support/Allocator.h"
#include <algorithm>
#include <cassert>
#include <cstddef>
#include <iterator>
#include <type_traits>
#include <utility>
namespace llvm {
@ -39,7 +45,8 @@ template <class T, class AllocatorT> class AllocatorList : AllocatorT {
T V;
};
typedef simple_ilist<Node> list_type;
using list_type = simple_ilist<Node>;
list_type List;
AllocatorT &getAlloc() { return *this; }
@ -51,13 +58,17 @@ template <class T, class AllocatorT> class AllocatorList : AllocatorT {
struct Cloner {
AllocatorList &AL;
Cloner(AllocatorList &AL) : AL(AL) {}
Node *operator()(const Node &N) const { return AL.create(N.V); }
};
struct Disposer {
AllocatorList &AL;
Disposer(AllocatorList &AL) : AL(AL) {}
void operator()(Node *N) const {
N->~Node();
AL.getAlloc().Deallocate(N);
@ -65,13 +76,13 @@ template <class T, class AllocatorT> class AllocatorList : AllocatorT {
};
public:
typedef T value_type;
typedef T *pointer;
typedef T &reference;
typedef const T *const_pointer;
typedef const T &const_reference;
typedef typename list_type::size_type size_type;
typedef typename list_type::difference_type difference_type;
using value_type = T;
using pointer = T *;
using reference = T &;
using const_pointer = const T *;
using const_reference = const T &;
using size_type = typename list_type::size_type;
using difference_type = typename list_type::difference_type;
private:
template <class ValueT, class IteratorBase>
@ -83,20 +94,18 @@ template <class T, class AllocatorT> class AllocatorList : AllocatorT {
friend class IteratorImpl;
friend AllocatorList;
typedef iterator_adaptor_base<IteratorImpl<ValueT, IteratorBase>,
IteratorBase, std::bidirectional_iterator_tag,
ValueT>
base_type;
using base_type =
iterator_adaptor_base<IteratorImpl<ValueT, IteratorBase>, IteratorBase,
std::bidirectional_iterator_tag, ValueT>;
public:
typedef ValueT value_type;
typedef ValueT *pointer;
typedef ValueT &reference;
using value_type = ValueT;
using pointer = ValueT *;
using reference = ValueT &;
IteratorImpl() = default;
IteratorImpl(const IteratorImpl &) = default;
IteratorImpl &operator=(const IteratorImpl &) = default;
~IteratorImpl() = default;
explicit IteratorImpl(const IteratorBase &I) : base_type(I) {}
@ -106,6 +115,8 @@ template <class T, class AllocatorT> class AllocatorList : AllocatorT {
OtherIteratorBase, IteratorBase>::value>::type * = nullptr)
: base_type(X.wrapped()) {}
~IteratorImpl() = default;
reference operator*() const { return base_type::wrapped()->V; }
pointer operator->() const { return &operator*(); }
@ -118,30 +129,34 @@ template <class T, class AllocatorT> class AllocatorList : AllocatorT {
};
public:
typedef IteratorImpl<T, typename list_type::iterator> iterator;
typedef IteratorImpl<T, typename list_type::reverse_iterator>
reverse_iterator;
typedef IteratorImpl<const T, typename list_type::const_iterator>
const_iterator;
typedef IteratorImpl<const T, typename list_type::const_reverse_iterator>
const_reverse_iterator;
using iterator = IteratorImpl<T, typename list_type::iterator>;
using reverse_iterator =
IteratorImpl<T, typename list_type::reverse_iterator>;
using const_iterator =
IteratorImpl<const T, typename list_type::const_iterator>;
using const_reverse_iterator =
IteratorImpl<const T, typename list_type::const_reverse_iterator>;
AllocatorList() = default;
AllocatorList(AllocatorList &&X)
: AllocatorT(std::move(X.getAlloc())), List(std::move(X.List)) {}
AllocatorList(const AllocatorList &X) {
List.cloneFrom(X.List, Cloner(*this), Disposer(*this));
}
AllocatorList &operator=(AllocatorList &&X) {
clear(); // Dispose of current nodes explicitly.
List = std::move(X.List);
getAlloc() = std::move(X.getAlloc());
return *this;
}
AllocatorList &operator=(const AllocatorList &X) {
List.cloneFrom(X.List, Cloner(*this), Disposer(*this));
return *this;
}
~AllocatorList() { clear(); }
void swap(AllocatorList &RHS) {

View File

@ -1,4 +1,4 @@
//===--- ArrayRef.h - Array Reference Wrapper -------------------*- C++ -*-===//
//===- ArrayRef.h - Array Reference Wrapper ---------------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
@ -12,12 +12,21 @@
#include "llvm/ADT/Hashing.h"
#include "llvm/ADT/None.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/Support/Compiler.h"
#include <algorithm>
#include <array>
#include <cassert>
#include <cstddef>
#include <initializer_list>
#include <iterator>
#include <memory>
#include <type_traits>
#include <vector>
namespace llvm {
/// ArrayRef - Represent a constant reference to an array (0 or more elements
/// consecutively in memory), i.e. a start pointer and a length. It allows
/// various APIs to take consecutive elements easily and conveniently.
@ -32,28 +41,27 @@ namespace llvm {
template<typename T>
class LLVM_NODISCARD ArrayRef {
public:
typedef const T *iterator;
typedef const T *const_iterator;
typedef size_t size_type;
typedef std::reverse_iterator<iterator> reverse_iterator;
using iterator = const T *;
using const_iterator = const T *;
using size_type = size_t;
using reverse_iterator = std::reverse_iterator<iterator>;
private:
/// The start of the array, in an external buffer.
const T *Data;
const T *Data = nullptr;
/// The number of elements.
size_type Length;
size_type Length = 0;
public:
/// @name Constructors
/// @{
/// Construct an empty ArrayRef.
/*implicit*/ ArrayRef() : Data(nullptr), Length(0) {}
/*implicit*/ ArrayRef() = default;
/// Construct an empty ArrayRef from None.
/*implicit*/ ArrayRef(NoneType) : Data(nullptr), Length(0) {}
/*implicit*/ ArrayRef(NoneType) {}
/// Construct an ArrayRef from a single element.
/*implicit*/ ArrayRef(const T &OneElt)
@ -282,9 +290,8 @@ namespace llvm {
template<typename T>
class LLVM_NODISCARD MutableArrayRef : public ArrayRef<T> {
public:
typedef T *iterator;
typedef std::reverse_iterator<iterator> reverse_iterator;
using iterator = T *;
using reverse_iterator = std::reverse_iterator<iterator>;
/// Construct an empty MutableArrayRef.
/*implicit*/ MutableArrayRef() : ArrayRef<T>() {}
@ -416,19 +423,23 @@ namespace llvm {
/// This is a MutableArrayRef that owns its array.
template <typename T> class OwningArrayRef : public MutableArrayRef<T> {
public:
OwningArrayRef() {}
OwningArrayRef() = default;
OwningArrayRef(size_t Size) : MutableArrayRef<T>(new T[Size], Size) {}
OwningArrayRef(ArrayRef<T> Data)
: MutableArrayRef<T>(new T[Data.size()], Data.size()) {
std::copy(Data.begin(), Data.end(), this->begin());
}
OwningArrayRef(OwningArrayRef &&Other) { *this = Other; }
OwningArrayRef &operator=(OwningArrayRef &&Other) {
delete[] this->data();
this->MutableArrayRef<T>::operator=(Other);
Other.MutableArrayRef<T>::operator=(MutableArrayRef<T>());
return *this;
}
~OwningArrayRef() { delete[] this->data(); }
};
@ -517,13 +528,14 @@ namespace llvm {
// ArrayRefs can be treated like a POD type.
template <typename T> struct isPodLike;
template <typename T> struct isPodLike<ArrayRef<T> > {
template <typename T> struct isPodLike<ArrayRef<T>> {
static const bool value = true;
};
template <typename T> hash_code hash_value(ArrayRef<T> S) {
return hash_combine_range(S.begin(), S.end());
}
} // end namespace llvm
#endif // LLVM_ADT_ARRAYREF_H

View File

@ -25,7 +25,6 @@
#include "llvm/ADT/iterator_range.h"
#include <iterator>
#include <queue>
#include <set>
#include <utility>
namespace llvm {
@ -49,13 +48,13 @@ template <class GraphT,
class bf_iterator
: public std::iterator<std::forward_iterator_tag, typename GT::NodeRef>,
public bf_iterator_storage<SetType> {
typedef std::iterator<std::forward_iterator_tag, typename GT::NodeRef> super;
using super = std::iterator<std::forward_iterator_tag, typename GT::NodeRef>;
typedef typename GT::NodeRef NodeRef;
typedef typename GT::ChildIteratorType ChildItTy;
using NodeRef = typename GT::NodeRef;
using ChildItTy = typename GT::ChildIteratorType;
// First element is the node reference, second is the next child to visit.
typedef std::pair<NodeRef, Optional<ChildItTy>> QueueElement;
using QueueElement = std::pair<NodeRef, Optional<ChildItTy>>;
// Visit queue - used to maintain BFS ordering.
// Optional<> because we need markers for levels.
@ -109,7 +108,7 @@ class bf_iterator
}
public:
typedef typename super::pointer pointer;
using pointer = typename super::pointer;
// Provide static begin and end methods as our public "constructors"
static bf_iterator begin(const GraphT &G) {

View File

@ -1,4 +1,4 @@
//===--- DAGDeltaAlgorithm.h - A DAG Minimization Algorithm ----*- C++ -*--===//
//===- DAGDeltaAlgorithm.h - A DAG Minimization Algorithm ------*- C++ -*--===//
//
// The LLVM Compiler Infrastructure
//
@ -40,12 +40,12 @@ class DAGDeltaAlgorithm {
virtual void anchor();
public:
typedef unsigned change_ty;
typedef std::pair<change_ty, change_ty> edge_ty;
using change_ty = unsigned;
using edge_ty = std::pair<change_ty, change_ty>;
// FIXME: Use a decent data structure.
typedef std::set<change_ty> changeset_ty;
typedef std::vector<changeset_ty> changesetlist_ty;
using changeset_ty = std::set<change_ty>;
using changesetlist_ty = std::vector<changeset_ty>;
public:
virtual ~DAGDeltaAlgorithm() = default;

View File

@ -1,4 +1,4 @@
//===--- DeltaAlgorithm.h - A Set Minimization Algorithm -------*- C++ -*--===//
//===- DeltaAlgorithm.h - A Set Minimization Algorithm ---------*- C++ -*--===//
//
// The LLVM Compiler Infrastructure
//
@ -35,10 +35,10 @@ namespace llvm {
/// predicate.
class DeltaAlgorithm {
public:
typedef unsigned change_ty;
using change_ty = unsigned;
// FIXME: Use a decent data structure.
typedef std::set<change_ty> changeset_ty;
typedef std::vector<changeset_ty> changesetlist_ty;
using changeset_ty = std::set<change_ty>;
using changesetlist_ty = std::vector<changeset_ty>;
private:
/// Cache of failed test results. Successful test results are never cached
@ -90,4 +90,4 @@ class DeltaAlgorithm {
} // end namespace llvm
#endif
#endif // LLVM_ADT_DELTAALGORITHM_H

View File

@ -25,8 +25,8 @@
#include <cstddef>
#include <cstring>
#include <iterator>
#include <limits>
#include <new>
#include <type_traits>
#include <utility>
namespace llvm {
@ -57,14 +57,15 @@ class DenseMapBase : public DebugEpochBase {
using const_arg_type_t = typename const_pointer_or_const_ref<T>::type;
public:
typedef unsigned size_type;
typedef KeyT key_type;
typedef ValueT mapped_type;
typedef BucketT value_type;
using size_type = unsigned;
using key_type = KeyT;
using mapped_type = ValueT;
using value_type = BucketT;
using iterator = DenseMapIterator<KeyT, ValueT, KeyInfoT, BucketT>;
using const_iterator =
DenseMapIterator<KeyT, ValueT, KeyInfoT, BucketT, true>;
typedef DenseMapIterator<KeyT, ValueT, KeyInfoT, BucketT> iterator;
typedef DenseMapIterator<KeyT, ValueT, KeyInfoT, BucketT, true>
const_iterator;
inline iterator begin() {
// When the map is empty, avoid the overhead of AdvancePastEmptyBuckets().
return empty() ? end() : iterator(getBuckets(), getBucketsEnd(), *this);
@ -387,15 +388,18 @@ class DenseMapBase : public DebugEpochBase {
static unsigned getHashValue(const KeyT &Val) {
return KeyInfoT::getHashValue(Val);
}
template<typename LookupKeyT>
static unsigned getHashValue(const LookupKeyT &Val) {
return KeyInfoT::getHashValue(Val);
}
static const KeyT getEmptyKey() {
static_assert(std::is_base_of<DenseMapBase, DerivedT>::value,
"Must pass the derived type to this template!");
return KeyInfoT::getEmptyKey();
}
static const KeyT getTombstoneKey() {
return KeyInfoT::getTombstoneKey();
}
@ -404,39 +408,51 @@ class DenseMapBase : public DebugEpochBase {
unsigned getNumEntries() const {
return static_cast<const DerivedT *>(this)->getNumEntries();
}
void setNumEntries(unsigned Num) {
static_cast<DerivedT *>(this)->setNumEntries(Num);
}
void incrementNumEntries() {
setNumEntries(getNumEntries() + 1);
}
void decrementNumEntries() {
setNumEntries(getNumEntries() - 1);
}
unsigned getNumTombstones() const {
return static_cast<const DerivedT *>(this)->getNumTombstones();
}
void setNumTombstones(unsigned Num) {
static_cast<DerivedT *>(this)->setNumTombstones(Num);
}
void incrementNumTombstones() {
setNumTombstones(getNumTombstones() + 1);
}
void decrementNumTombstones() {
setNumTombstones(getNumTombstones() - 1);
}
const BucketT *getBuckets() const {
return static_cast<const DerivedT *>(this)->getBuckets();
}
BucketT *getBuckets() {
return static_cast<DerivedT *>(this)->getBuckets();
}
unsigned getNumBuckets() const {
return static_cast<const DerivedT *>(this)->getNumBuckets();
}
BucketT *getBucketsEnd() {
return getBuckets() + getNumBuckets();
}
const BucketT *getBucketsEnd() const {
return getBuckets() + getNumBuckets();
}
@ -587,10 +603,11 @@ template <typename KeyT, typename ValueT,
typename BucketT = detail::DenseMapPair<KeyT, ValueT>>
class DenseMap : public DenseMapBase<DenseMap<KeyT, ValueT, KeyInfoT, BucketT>,
KeyT, ValueT, KeyInfoT, BucketT> {
friend class DenseMapBase<DenseMap, KeyT, ValueT, KeyInfoT, BucketT>;
// Lift some types from the dependent base class into this class for
// simplicity of referring to them.
typedef DenseMapBase<DenseMap, KeyT, ValueT, KeyInfoT, BucketT> BaseT;
friend class DenseMapBase<DenseMap, KeyT, ValueT, KeyInfoT, BucketT>;
using BaseT = DenseMapBase<DenseMap, KeyT, ValueT, KeyInfoT, BucketT>;
BucketT *Buckets;
unsigned NumEntries;
@ -705,6 +722,7 @@ class DenseMap : public DenseMapBase<DenseMap<KeyT, ValueT, KeyInfoT, BucketT>,
unsigned getNumEntries() const {
return NumEntries;
}
void setNumEntries(unsigned Num) {
NumEntries = Num;
}
@ -712,6 +730,7 @@ class DenseMap : public DenseMapBase<DenseMap<KeyT, ValueT, KeyInfoT, BucketT>,
unsigned getNumTombstones() const {
return NumTombstones;
}
void setNumTombstones(unsigned Num) {
NumTombstones = Num;
}
@ -743,10 +762,12 @@ class SmallDenseMap
: public DenseMapBase<
SmallDenseMap<KeyT, ValueT, InlineBuckets, KeyInfoT, BucketT>, KeyT,
ValueT, KeyInfoT, BucketT> {
friend class DenseMapBase<SmallDenseMap, KeyT, ValueT, KeyInfoT, BucketT>;
// Lift some types from the dependent base class into this class for
// simplicity of referring to them.
typedef DenseMapBase<SmallDenseMap, KeyT, ValueT, KeyInfoT, BucketT> BaseT;
friend class DenseMapBase<SmallDenseMap, KeyT, ValueT, KeyInfoT, BucketT>;
using BaseT = DenseMapBase<SmallDenseMap, KeyT, ValueT, KeyInfoT, BucketT>;
static_assert(isPowerOf2_64(InlineBuckets),
"InlineBuckets must be a power of 2.");
@ -972,6 +993,7 @@ class SmallDenseMap
unsigned getNumEntries() const {
return NumEntries;
}
void setNumEntries(unsigned Num) {
// NumEntries is hardcoded to be 31 bits wide.
assert(Num < (1U << 31) && "Cannot support more than 1<<31 entries");
@ -981,6 +1003,7 @@ class SmallDenseMap
unsigned getNumTombstones() const {
return NumTombstones;
}
void setNumTombstones(unsigned Num) {
NumTombstones = Num;
}
@ -992,15 +1015,18 @@ class SmallDenseMap
// 'storage.buffer' static type is 'char *'.
return reinterpret_cast<const BucketT *>(storage.buffer);
}
BucketT *getInlineBuckets() {
return const_cast<BucketT *>(
const_cast<const SmallDenseMap *>(this)->getInlineBuckets());
}
const LargeRep *getLargeRep() const {
assert(!Small);
// Note, same rule about aliasing as with getInlineBuckets.
return reinterpret_cast<const LargeRep *>(storage.buffer);
}
LargeRep *getLargeRep() {
return const_cast<LargeRep *>(
const_cast<const SmallDenseMap *>(this)->getLargeRep());
@ -1009,10 +1035,12 @@ class SmallDenseMap
const BucketT *getBuckets() const {
return Small ? getInlineBuckets() : getLargeRep()->Buckets;
}
BucketT *getBuckets() {
return const_cast<BucketT *>(
const_cast<const SmallDenseMap *>(this)->getBuckets());
}
unsigned getNumBuckets() const {
return Small ? InlineBuckets : getLargeRep()->NumBuckets;
}
@ -1037,23 +1065,25 @@ class SmallDenseMap
template <typename KeyT, typename ValueT, typename KeyInfoT, typename Bucket,
bool IsConst>
class DenseMapIterator : DebugEpochBase::HandleBase {
typedef DenseMapIterator<KeyT, ValueT, KeyInfoT, Bucket, true> ConstIterator;
friend class DenseMapIterator<KeyT, ValueT, KeyInfoT, Bucket, true>;
friend class DenseMapIterator<KeyT, ValueT, KeyInfoT, Bucket, false>;
using ConstIterator = DenseMapIterator<KeyT, ValueT, KeyInfoT, Bucket, true>;
public:
typedef ptrdiff_t difference_type;
typedef typename std::conditional<IsConst, const Bucket, Bucket>::type
value_type;
typedef value_type *pointer;
typedef value_type &reference;
typedef std::forward_iterator_tag iterator_category;
using difference_type = ptrdiff_t;
using value_type =
typename std::conditional<IsConst, const Bucket, Bucket>::type;
using pointer = value_type *;
using reference = value_type &;
using iterator_category = std::forward_iterator_tag;
private:
pointer Ptr, End;
pointer Ptr = nullptr;
pointer End = nullptr;
public:
DenseMapIterator() : Ptr(nullptr), End(nullptr) {}
DenseMapIterator() = default;
DenseMapIterator(pointer Pos, pointer E, const DebugEpochBase &Epoch,
bool NoAdvance = false)

View File

@ -18,7 +18,10 @@
#include "llvm/ADT/Hashing.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/Support/PointerLikeTypeTraits.h"
#include "llvm/Support/type_traits.h"
#include <cassert>
#include <cstddef>
#include <cstdint>
#include <utility>
namespace llvm {
@ -38,15 +41,18 @@ struct DenseMapInfo<T*> {
Val <<= PointerLikeTypeTraits<T*>::NumLowBitsAvailable;
return reinterpret_cast<T*>(Val);
}
static inline T* getTombstoneKey() {
uintptr_t Val = static_cast<uintptr_t>(-2);
Val <<= PointerLikeTypeTraits<T*>::NumLowBitsAvailable;
return reinterpret_cast<T*>(Val);
}
static unsigned getHashValue(const T *PtrVal) {
return (unsigned((uintptr_t)PtrVal) >> 4) ^
(unsigned((uintptr_t)PtrVal) >> 9);
}
static bool isEqual(const T *LHS, const T *RHS) { return LHS == RHS; }
};
@ -55,6 +61,7 @@ template<> struct DenseMapInfo<char> {
static inline char getEmptyKey() { return ~0; }
static inline char getTombstoneKey() { return ~0 - 1; }
static unsigned getHashValue(const char& Val) { return Val * 37U; }
static bool isEqual(const char &LHS, const char &RHS) {
return LHS == RHS;
}
@ -65,6 +72,7 @@ template <> struct DenseMapInfo<unsigned short> {
static inline unsigned short getEmptyKey() { return 0xFFFF; }
static inline unsigned short getTombstoneKey() { return 0xFFFF - 1; }
static unsigned getHashValue(const unsigned short &Val) { return Val * 37U; }
static bool isEqual(const unsigned short &LHS, const unsigned short &RHS) {
return LHS == RHS;
}
@ -75,6 +83,7 @@ template<> struct DenseMapInfo<unsigned> {
static inline unsigned getEmptyKey() { return ~0U; }
static inline unsigned getTombstoneKey() { return ~0U - 1; }
static unsigned getHashValue(const unsigned& Val) { return Val * 37U; }
static bool isEqual(const unsigned& LHS, const unsigned& RHS) {
return LHS == RHS;
}
@ -84,9 +93,11 @@ template<> struct DenseMapInfo<unsigned> {
template<> struct DenseMapInfo<unsigned long> {
static inline unsigned long getEmptyKey() { return ~0UL; }
static inline unsigned long getTombstoneKey() { return ~0UL - 1L; }
static unsigned getHashValue(const unsigned long& Val) {
return (unsigned)(Val * 37UL);
}
static bool isEqual(const unsigned long& LHS, const unsigned long& RHS) {
return LHS == RHS;
}
@ -96,9 +107,11 @@ template<> struct DenseMapInfo<unsigned long> {
template<> struct DenseMapInfo<unsigned long long> {
static inline unsigned long long getEmptyKey() { return ~0ULL; }
static inline unsigned long long getTombstoneKey() { return ~0ULL - 1ULL; }
static unsigned getHashValue(const unsigned long long& Val) {
return (unsigned)(Val * 37ULL);
}
static bool isEqual(const unsigned long long& LHS,
const unsigned long long& RHS) {
return LHS == RHS;
@ -118,6 +131,7 @@ template<> struct DenseMapInfo<int> {
static inline int getEmptyKey() { return 0x7fffffff; }
static inline int getTombstoneKey() { return -0x7fffffff - 1; }
static unsigned getHashValue(const int& Val) { return (unsigned)(Val * 37U); }
static bool isEqual(const int& LHS, const int& RHS) {
return LHS == RHS;
}
@ -128,10 +142,13 @@ template<> struct DenseMapInfo<long> {
static inline long getEmptyKey() {
return (1UL << (sizeof(long) * 8 - 1)) - 1UL;
}
static inline long getTombstoneKey() { return getEmptyKey() - 1L; }
static unsigned getHashValue(const long& Val) {
return (unsigned)(Val * 37UL);
}
static bool isEqual(const long& LHS, const long& RHS) {
return LHS == RHS;
}
@ -141,9 +158,11 @@ template<> struct DenseMapInfo<long> {
template<> struct DenseMapInfo<long long> {
static inline long long getEmptyKey() { return 0x7fffffffffffffffLL; }
static inline long long getTombstoneKey() { return -0x7fffffffffffffffLL-1; }
static unsigned getHashValue(const long long& Val) {
return (unsigned)(Val * 37ULL);
}
static bool isEqual(const long long& LHS,
const long long& RHS) {
return LHS == RHS;
@ -152,19 +171,21 @@ template<> struct DenseMapInfo<long long> {
// Provide DenseMapInfo for all pairs whose members have info.
template<typename T, typename U>
struct DenseMapInfo<std::pair<T, U> > {
typedef std::pair<T, U> Pair;
typedef DenseMapInfo<T> FirstInfo;
typedef DenseMapInfo<U> SecondInfo;
struct DenseMapInfo<std::pair<T, U>> {
using Pair = std::pair<T, U>;
using FirstInfo = DenseMapInfo<T>;
using SecondInfo = DenseMapInfo<U>;
static inline Pair getEmptyKey() {
return std::make_pair(FirstInfo::getEmptyKey(),
SecondInfo::getEmptyKey());
}
static inline Pair getTombstoneKey() {
return std::make_pair(FirstInfo::getTombstoneKey(),
SecondInfo::getTombstoneKey());
}
static unsigned getHashValue(const Pair& PairVal) {
uint64_t key = (uint64_t)FirstInfo::getHashValue(PairVal.first) << 32
| (uint64_t)SecondInfo::getHashValue(PairVal.second);
@ -178,6 +199,7 @@ struct DenseMapInfo<std::pair<T, U> > {
key ^= (key >> 31);
return (unsigned)key;
}
static bool isEqual(const Pair &LHS, const Pair &RHS) {
return FirstInfo::isEqual(LHS.first, RHS.first) &&
SecondInfo::isEqual(LHS.second, RHS.second);
@ -190,16 +212,19 @@ template <> struct DenseMapInfo<StringRef> {
return StringRef(reinterpret_cast<const char *>(~static_cast<uintptr_t>(0)),
0);
}
static inline StringRef getTombstoneKey() {
return StringRef(reinterpret_cast<const char *>(~static_cast<uintptr_t>(1)),
0);
}
static unsigned getHashValue(StringRef Val) {
assert(Val.data() != getEmptyKey().data() && "Cannot hash the empty key!");
assert(Val.data() != getTombstoneKey().data() &&
"Cannot hash the tombstone key!");
return (unsigned)(hash_value(Val));
}
static bool isEqual(StringRef LHS, StringRef RHS) {
if (RHS.data() == getEmptyKey().data())
return LHS.data() == getEmptyKey().data();
@ -215,16 +240,19 @@ template <typename T> struct DenseMapInfo<ArrayRef<T>> {
return ArrayRef<T>(reinterpret_cast<const T *>(~static_cast<uintptr_t>(0)),
size_t(0));
}
static inline ArrayRef<T> getTombstoneKey() {
return ArrayRef<T>(reinterpret_cast<const T *>(~static_cast<uintptr_t>(1)),
size_t(0));
}
static unsigned getHashValue(ArrayRef<T> Val) {
assert(Val.data() != getEmptyKey().data() && "Cannot hash the empty key!");
assert(Val.data() != getTombstoneKey().data() &&
"Cannot hash the tombstone key!");
return (unsigned)(hash_value(Val));
}
static bool isEqual(ArrayRef<T> LHS, ArrayRef<T> RHS) {
if (RHS.data() == getEmptyKey().data())
return LHS.data() == getEmptyKey().data();
@ -236,4 +264,4 @@ template <typename T> struct DenseMapInfo<ArrayRef<T>> {
} // end namespace llvm
#endif
#endif // LLVM_ADT_DENSEMAPINFO_H

View File

@ -15,11 +15,18 @@
#define LLVM_ADT_DENSESET_H
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/DenseMapInfo.h"
#include "llvm/Support/type_traits.h"
#include <algorithm>
#include <cstddef>
#include <initializer_list>
#include <iterator>
#include <utility>
namespace llvm {
namespace detail {
struct DenseSetEmpty {};
// Use the empty base class trick so we can create a DenseMap where the buckets
@ -48,13 +55,14 @@ class DenseSetImpl {
static_assert(sizeof(typename MapTy::value_type) == sizeof(ValueT),
"DenseMap buckets unexpectedly large!");
MapTy TheMap;
template <typename T>
using const_arg_type_t = typename const_pointer_or_const_ref<T>::type;
public:
typedef ValueT key_type;
typedef ValueT value_type;
typedef unsigned size_type;
using key_type = ValueT;
using value_type = ValueT;
using size_type = unsigned;
explicit DenseSetImpl(unsigned InitialReserve = 0) : TheMap(InitialReserve) {}
@ -100,11 +108,11 @@ class DenseSetImpl {
friend class ConstIterator;
public:
typedef typename MapTy::iterator::difference_type difference_type;
typedef ValueT value_type;
typedef value_type *pointer;
typedef value_type &reference;
typedef std::forward_iterator_tag iterator_category;
using difference_type = typename MapTy::iterator::difference_type;
using value_type = ValueT;
using pointer = value_type *;
using reference = value_type &;
using iterator_category = std::forward_iterator_tag;
Iterator() = default;
Iterator(const typename MapTy::iterator &i) : I(i) {}
@ -126,16 +134,14 @@ class DenseSetImpl {
friend class Iterator;
public:
typedef typename MapTy::const_iterator::difference_type difference_type;
typedef ValueT value_type;
typedef value_type *pointer;
typedef value_type &reference;
typedef std::forward_iterator_tag iterator_category;
ConstIterator(const Iterator &B) : I(B.I) {}
using difference_type = typename MapTy::const_iterator::difference_type;
using value_type = ValueT;
using pointer = value_type *;
using reference = value_type &;
using iterator_category = std::forward_iterator_tag;
ConstIterator() = default;
ConstIterator(const Iterator &B) : I(B.I) {}
ConstIterator(const typename MapTy::const_iterator &i) : I(i) {}
const ValueT &operator*() const { return I->getFirst(); }
@ -147,8 +153,8 @@ class DenseSetImpl {
bool operator!=(const ConstIterator& X) const { return I != X.I; }
};
typedef Iterator iterator;
typedef ConstIterator const_iterator;
using iterator = Iterator;
using const_iterator = ConstIterator;
iterator begin() { return Iterator(TheMap.begin()); }
iterator end() { return Iterator(TheMap.end()); }
@ -208,7 +214,7 @@ class DenseSetImpl {
}
};
} // namespace detail
} // end namespace detail
/// Implements a dense probed hash-table based set.
template <typename ValueT, typename ValueInfoT = DenseMapInfo<ValueT>>
@ -246,4 +252,4 @@ class SmallDenseSet
} // end namespace llvm
#endif
#endif // LLVM_ADT_DENSESET_H

View File

@ -68,13 +68,14 @@ class df_iterator_storage<SetType, true> {
// cross edges in the spanning tree but is not used in the common case.
template <typename NodeRef, unsigned SmallSize=8>
struct df_iterator_default_set : public SmallPtrSet<NodeRef, SmallSize> {
typedef SmallPtrSet<NodeRef, SmallSize> BaseSet;
typedef typename BaseSet::iterator iterator;
std::pair<iterator,bool> insert(NodeRef N) { return BaseSet::insert(N) ; }
using BaseSet = SmallPtrSet<NodeRef, SmallSize>;
using iterator = typename BaseSet::iterator;
std::pair<iterator,bool> insert(NodeRef N) { return BaseSet::insert(N); }
template <typename IterT>
void insert(IterT Begin, IterT End) { BaseSet::insert(Begin,End); }
void completed(NodeRef) { }
void completed(NodeRef) {}
};
// Generic Depth First Iterator
@ -85,15 +86,14 @@ template <class GraphT,
class df_iterator
: public std::iterator<std::forward_iterator_tag, typename GT::NodeRef>,
public df_iterator_storage<SetType, ExtStorage> {
typedef std::iterator<std::forward_iterator_tag, typename GT::NodeRef> super;
typedef typename GT::NodeRef NodeRef;
typedef typename GT::ChildIteratorType ChildItTy;
using super = std::iterator<std::forward_iterator_tag, typename GT::NodeRef>;
using NodeRef = typename GT::NodeRef;
using ChildItTy = typename GT::ChildIteratorType;
// First element is node reference, second is the 'next child' to visit.
// The second child is initialized lazily to pick up graph changes during the
// DFS.
typedef std::pair<NodeRef, Optional<ChildItTy>> StackElement;
using StackElement = std::pair<NodeRef, Optional<ChildItTy>>;
// VisitStack - Used to maintain the ordering. Top = current block
std::vector<StackElement> VisitStack;
@ -103,12 +103,15 @@ class df_iterator
this->Visited.insert(Node);
VisitStack.push_back(StackElement(Node, None));
}
inline df_iterator() = default; // End is when stack is empty
inline df_iterator(NodeRef Node, SetType &S)
: df_iterator_storage<SetType, ExtStorage>(S) {
if (this->Visited.insert(Node).second)
VisitStack.push_back(StackElement(Node, None));
}
inline df_iterator(SetType &S)
: df_iterator_storage<SetType, ExtStorage>(S) {
// End is when stack is empty
@ -142,7 +145,7 @@ class df_iterator
}
public:
typedef typename super::pointer pointer;
using pointer = typename super::pointer;
// Provide static begin and end methods as our public "constructors"
static df_iterator begin(const GraphT &G) {

View File

@ -1,4 +1,4 @@
//===-- llvm/ADT/EquivalenceClasses.h - Generic Equiv. Classes --*- C++ -*-===//
//===- llvm/ADT/EquivalenceClasses.h - Generic Equiv. Classes ---*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
@ -69,6 +69,7 @@ class EquivalenceClasses {
/// leader is determined by a bit stolen from one of the pointers.
class ECValue {
friend class EquivalenceClasses;
mutable const ECValue *Leader, *Next;
ElemTy Data;
@ -141,14 +142,14 @@ class EquivalenceClasses {
//
/// iterator* - Provides a way to iterate over all values in the set.
typedef typename std::set<ECValue>::const_iterator iterator;
using iterator = typename std::set<ECValue>::const_iterator;
iterator begin() const { return TheMapping.begin(); }
iterator end() const { return TheMapping.end(); }
bool empty() const { return TheMapping.empty(); }
/// member_* Iterate over the members of an equivalence class.
///
class member_iterator;
member_iterator member_begin(iterator I) const {
// Only leaders provide anything to iterate over.
@ -204,7 +205,6 @@ class EquivalenceClasses {
/// equivalence class it is in. This does the path-compression part that
/// makes union-find "union findy". This returns an end iterator if the value
/// is not in the equivalence class.
///
member_iterator findLeader(iterator I) const {
if (I == TheMapping.end()) return member_end();
return member_iterator(I->getLeader());
@ -241,15 +241,17 @@ class EquivalenceClasses {
class member_iterator : public std::iterator<std::forward_iterator_tag,
const ElemTy, ptrdiff_t> {
typedef std::iterator<std::forward_iterator_tag,
const ElemTy, ptrdiff_t> super;
const ECValue *Node;
friend class EquivalenceClasses;
using super = std::iterator<std::forward_iterator_tag,
const ElemTy, ptrdiff_t>;
const ECValue *Node;
public:
typedef size_t size_type;
typedef typename super::pointer pointer;
typedef typename super::reference reference;
using size_type = size_t;
using pointer = typename super::pointer;
using reference = typename super::reference;
explicit member_iterator() = default;
explicit member_iterator(const ECValue *N) : Node(N) {}

View File

@ -40,7 +40,7 @@ namespace llvm {
/// FoldingSetNode. The node class must also define a Profile method used to
/// establish the unique bits of data for the node. The Profile method is
/// passed a FoldingSetNodeID object which is used to gather the bits. Just
/// call one of the Add* functions defined in the FoldingSetImpl::NodeID class.
/// call one of the Add* functions defined in the FoldingSetBase::NodeID class.
/// NOTE: That the folding set does not own the nodes and it is the
/// responsibility of the user to dispose of the nodes.
///
@ -104,13 +104,13 @@ class FoldingSetNodeID;
class StringRef;
//===----------------------------------------------------------------------===//
/// FoldingSetImpl - Implements the folding set functionality. The main
/// FoldingSetBase - Implements the folding set functionality. The main
/// structure is an array of buckets. Each bucket is indexed by the hash of
/// the nodes it contains. The bucket itself points to the nodes contained
/// in the bucket via a singly linked list. The last node in the list points
/// back to the bucket to facilitate node removal.
///
class FoldingSetImpl {
class FoldingSetBase {
virtual void anchor(); // Out of line virtual method.
protected:
@ -126,10 +126,10 @@ class FoldingSetImpl {
/// is greater than twice the number of buckets.
unsigned NumNodes;
explicit FoldingSetImpl(unsigned Log2InitSize = 6);
FoldingSetImpl(FoldingSetImpl &&Arg);
FoldingSetImpl &operator=(FoldingSetImpl &&RHS);
~FoldingSetImpl();
explicit FoldingSetBase(unsigned Log2InitSize = 6);
FoldingSetBase(FoldingSetBase &&Arg);
FoldingSetBase &operator=(FoldingSetBase &&RHS);
~FoldingSetBase();
public:
//===--------------------------------------------------------------------===//
@ -152,33 +152,6 @@ class FoldingSetImpl {
/// clear - Remove all nodes from the folding set.
void clear();
/// RemoveNode - Remove a node from the folding set, returning true if one
/// was removed or false if the node was not in the folding set.
bool RemoveNode(Node *N);
/// GetOrInsertNode - If there is an existing simple Node exactly
/// equal to the specified node, return it. Otherwise, insert 'N' and return
/// it instead.
Node *GetOrInsertNode(Node *N);
/// FindNodeOrInsertPos - Look up the node specified by ID. If it exists,
/// return it. If not, return the insertion token that will make insertion
/// faster.
Node *FindNodeOrInsertPos(const FoldingSetNodeID &ID, void *&InsertPos);
/// InsertNode - Insert the specified node into the folding set, knowing that
/// it is not already in the folding set. InsertPos must be obtained from
/// FindNodeOrInsertPos.
void InsertNode(Node *N, void *InsertPos);
/// InsertNode - Insert the specified node into the folding set, knowing that
/// it is not already in the folding set.
void InsertNode(Node *N) {
Node *Inserted = GetOrInsertNode(N);
(void)Inserted;
assert(Inserted == N && "Node already inserted!");
}
/// size - Returns the number of nodes in the folding set.
unsigned size() const { return NumNodes; }
@ -220,6 +193,28 @@ class FoldingSetImpl {
/// ComputeNodeHash - Instantiations of the FoldingSet template implement
/// this function to compute a hash value for the given node.
virtual unsigned ComputeNodeHash(Node *N, FoldingSetNodeID &TempID) const = 0;
// The below methods are protected to encourage subclasses to provide a more
// type-safe API.
/// RemoveNode - Remove a node from the folding set, returning true if one
/// was removed or false if the node was not in the folding set.
bool RemoveNode(Node *N);
/// GetOrInsertNode - If there is an existing simple Node exactly
/// equal to the specified node, return it. Otherwise, insert 'N' and return
/// it instead.
Node *GetOrInsertNode(Node *N);
/// FindNodeOrInsertPos - Look up the node specified by ID. If it exists,
/// return it. If not, return the insertion token that will make insertion
/// faster.
Node *FindNodeOrInsertPos(const FoldingSetNodeID &ID, void *&InsertPos);
/// InsertNode - Insert the specified node into the folding set, knowing that
/// it is not already in the folding set. InsertPos must be obtained from
/// FindNodeOrInsertPos.
void InsertNode(Node *N, void *InsertPos);
};
//===----------------------------------------------------------------------===//
@ -293,7 +288,7 @@ class FoldingSetNodeIDRef {
FoldingSetNodeIDRef(const unsigned *D, size_t S) : Data(D), Size(S) {}
/// ComputeHash - Compute a strong hash value for this FoldingSetNodeIDRef,
/// used to lookup the node in the FoldingSetImpl.
/// used to lookup the node in the FoldingSetBase.
unsigned ComputeHash() const;
bool operator==(FoldingSetNodeIDRef) const;
@ -345,7 +340,7 @@ class FoldingSetNodeID {
inline void clear() { Bits.clear(); }
/// ComputeHash - Compute a strong hash value for this FoldingSetNodeID, used
/// to lookup the node in the FoldingSetImpl.
/// to lookup the node in the FoldingSetBase.
unsigned ComputeHash() const;
/// operator== - Used to compare two nodes to each other.
@ -368,7 +363,7 @@ class FoldingSetNodeID {
};
// Convenience type to hide the implementation of the folding set.
typedef FoldingSetImpl::Node FoldingSetNode;
typedef FoldingSetBase::Node FoldingSetNode;
template<class T> class FoldingSetIterator;
template<class T> class FoldingSetBucketIterator;
@ -407,6 +402,71 @@ DefaultContextualFoldingSetTrait<T, Ctx>::ComputeHash(T &X,
return TempID.ComputeHash();
}
//===----------------------------------------------------------------------===//
/// FoldingSetImpl - An implementation detail that lets us share code between
/// FoldingSet and ContextualFoldingSet.
template <class T> class FoldingSetImpl : public FoldingSetBase {
protected:
explicit FoldingSetImpl(unsigned Log2InitSize)
: FoldingSetBase(Log2InitSize) {}
FoldingSetImpl(FoldingSetImpl &&Arg) = default;
FoldingSetImpl &operator=(FoldingSetImpl &&RHS) = default;
~FoldingSetImpl() = default;
public:
typedef FoldingSetIterator<T> iterator;
iterator begin() { return iterator(Buckets); }
iterator end() { return iterator(Buckets+NumBuckets); }
typedef FoldingSetIterator<const T> const_iterator;
const_iterator begin() const { return const_iterator(Buckets); }
const_iterator end() const { return const_iterator(Buckets+NumBuckets); }
typedef FoldingSetBucketIterator<T> bucket_iterator;
bucket_iterator bucket_begin(unsigned hash) {
return bucket_iterator(Buckets + (hash & (NumBuckets-1)));
}
bucket_iterator bucket_end(unsigned hash) {
return bucket_iterator(Buckets + (hash & (NumBuckets-1)), true);
}
/// RemoveNode - Remove a node from the folding set, returning true if one
/// was removed or false if the node was not in the folding set.
bool RemoveNode(T *N) { return FoldingSetBase::RemoveNode(N); }
/// GetOrInsertNode - If there is an existing simple Node exactly
/// equal to the specified node, return it. Otherwise, insert 'N' and
/// return it instead.
T *GetOrInsertNode(T *N) {
return static_cast<T *>(FoldingSetBase::GetOrInsertNode(N));
}
/// FindNodeOrInsertPos - Look up the node specified by ID. If it exists,
/// return it. If not, return the insertion token that will make insertion
/// faster.
T *FindNodeOrInsertPos(const FoldingSetNodeID &ID, void *&InsertPos) {
return static_cast<T *>(FoldingSetBase::FindNodeOrInsertPos(ID, InsertPos));
}
/// InsertNode - Insert the specified node into the folding set, knowing that
/// it is not already in the folding set. InsertPos must be obtained from
/// FindNodeOrInsertPos.
void InsertNode(T *N, void *InsertPos) {
FoldingSetBase::InsertNode(N, InsertPos);
}
/// InsertNode - Insert the specified node into the folding set, knowing that
/// it is not already in the folding set.
void InsertNode(T *N) {
T *Inserted = GetOrInsertNode(N);
(void)Inserted;
assert(Inserted == N && "Node already inserted!");
}
};
//===----------------------------------------------------------------------===//
/// FoldingSet - This template class is used to instantiate a specialized
/// implementation of the folding set to the node class T. T must be a
@ -416,8 +476,10 @@ DefaultContextualFoldingSetTrait<T, Ctx>::ComputeHash(T &X,
/// moved-from state is not a valid state for anything other than
/// move-assigning and destroying. This is primarily to enable movable APIs
/// that incorporate these objects.
template <class T> class FoldingSet final : public FoldingSetImpl {
private:
template <class T> class FoldingSet final : public FoldingSetImpl<T> {
using Super = FoldingSetImpl<T>;
using Node = typename Super::Node;
/// GetNodeProfile - Each instantiatation of the FoldingSet needs to provide a
/// way to convert nodes into a unique specifier.
void GetNodeProfile(Node *N, FoldingSetNodeID &ID) const override {
@ -442,45 +504,10 @@ template <class T> class FoldingSet final : public FoldingSetImpl {
public:
explicit FoldingSet(unsigned Log2InitSize = 6)
: FoldingSetImpl(Log2InitSize) {}
: Super(Log2InitSize) {}
FoldingSet(FoldingSet &&Arg) : FoldingSetImpl(std::move(Arg)) {}
FoldingSet &operator=(FoldingSet &&RHS) {
(void)FoldingSetImpl::operator=(std::move(RHS));
return *this;
}
typedef FoldingSetIterator<T> iterator;
iterator begin() { return iterator(Buckets); }
iterator end() { return iterator(Buckets+NumBuckets); }
typedef FoldingSetIterator<const T> const_iterator;
const_iterator begin() const { return const_iterator(Buckets); }
const_iterator end() const { return const_iterator(Buckets+NumBuckets); }
typedef FoldingSetBucketIterator<T> bucket_iterator;
bucket_iterator bucket_begin(unsigned hash) {
return bucket_iterator(Buckets + (hash & (NumBuckets-1)));
}
bucket_iterator bucket_end(unsigned hash) {
return bucket_iterator(Buckets + (hash & (NumBuckets-1)), true);
}
/// GetOrInsertNode - If there is an existing simple Node exactly
/// equal to the specified node, return it. Otherwise, insert 'N' and
/// return it instead.
T *GetOrInsertNode(Node *N) {
return static_cast<T *>(FoldingSetImpl::GetOrInsertNode(N));
}
/// FindNodeOrInsertPos - Look up the node specified by ID. If it exists,
/// return it. If not, return the insertion token that will make insertion
/// faster.
T *FindNodeOrInsertPos(const FoldingSetNodeID &ID, void *&InsertPos) {
return static_cast<T *>(FoldingSetImpl::FindNodeOrInsertPos(ID, InsertPos));
}
FoldingSet(FoldingSet &&Arg) = default;
FoldingSet &operator=(FoldingSet &&RHS) = default;
};
//===----------------------------------------------------------------------===//
@ -493,74 +520,42 @@ template <class T> class FoldingSet final : public FoldingSetImpl {
/// function with signature
/// void Profile(FoldingSetNodeID &, Ctx);
template <class T, class Ctx>
class ContextualFoldingSet final : public FoldingSetImpl {
class ContextualFoldingSet final : public FoldingSetImpl<T> {
// Unfortunately, this can't derive from FoldingSet<T> because the
// construction vtable for FoldingSet<T> requires
// construction of the vtable for FoldingSet<T> requires
// FoldingSet<T>::GetNodeProfile to be instantiated, which in turn
// requires a single-argument T::Profile().
private:
using Super = FoldingSetImpl<T>;
using Node = typename Super::Node;
Ctx Context;
/// GetNodeProfile - Each instantiatation of the FoldingSet needs to provide a
/// way to convert nodes into a unique specifier.
void GetNodeProfile(FoldingSetImpl::Node *N,
FoldingSetNodeID &ID) const override {
void GetNodeProfile(Node *N, FoldingSetNodeID &ID) const override {
T *TN = static_cast<T *>(N);
ContextualFoldingSetTrait<T, Ctx>::Profile(*TN, ID, Context);
}
bool NodeEquals(FoldingSetImpl::Node *N, const FoldingSetNodeID &ID,
unsigned IDHash, FoldingSetNodeID &TempID) const override {
bool NodeEquals(Node *N, const FoldingSetNodeID &ID, unsigned IDHash,
FoldingSetNodeID &TempID) const override {
T *TN = static_cast<T *>(N);
return ContextualFoldingSetTrait<T, Ctx>::Equals(*TN, ID, IDHash, TempID,
Context);
}
unsigned ComputeNodeHash(FoldingSetImpl::Node *N,
FoldingSetNodeID &TempID) const override {
unsigned ComputeNodeHash(Node *N, FoldingSetNodeID &TempID) const override {
T *TN = static_cast<T *>(N);
return ContextualFoldingSetTrait<T, Ctx>::ComputeHash(*TN, TempID, Context);
}
public:
explicit ContextualFoldingSet(Ctx Context, unsigned Log2InitSize = 6)
: FoldingSetImpl(Log2InitSize), Context(Context)
: Super(Log2InitSize), Context(Context)
{}
Ctx getContext() const { return Context; }
typedef FoldingSetIterator<T> iterator;
iterator begin() { return iterator(Buckets); }
iterator end() { return iterator(Buckets+NumBuckets); }
typedef FoldingSetIterator<const T> const_iterator;
const_iterator begin() const { return const_iterator(Buckets); }
const_iterator end() const { return const_iterator(Buckets+NumBuckets); }
typedef FoldingSetBucketIterator<T> bucket_iterator;
bucket_iterator bucket_begin(unsigned hash) {
return bucket_iterator(Buckets + (hash & (NumBuckets-1)));
}
bucket_iterator bucket_end(unsigned hash) {
return bucket_iterator(Buckets + (hash & (NumBuckets-1)), true);
}
/// GetOrInsertNode - If there is an existing simple Node exactly
/// equal to the specified node, return it. Otherwise, insert 'N'
/// and return it instead.
T *GetOrInsertNode(Node *N) {
return static_cast<T *>(FoldingSetImpl::GetOrInsertNode(N));
}
/// FindNodeOrInsertPos - Look up the node specified by ID. If it
/// exists, return it. If not, return the insertion token that will
/// make insertion faster.
T *FindNodeOrInsertPos(const FoldingSetNodeID &ID, void *&InsertPos) {
return static_cast<T *>(FoldingSetImpl::FindNodeOrInsertPos(ID, InsertPos));
}
};
//===----------------------------------------------------------------------===//

View File

@ -1,4 +1,4 @@
//===-- llvm/ADT/GraphTraits.h - Graph traits template ----------*- C++ -*-===//
//===- llvm/ADT/GraphTraits.h - Graph traits template -----------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
@ -41,7 +41,6 @@ struct GraphTraits {
// static ChildIteratorType child_end (NodeRef)
// Return iterators that point to the beginning and ending of the child
// node list for the specified node.
//
// typedef ...iterator nodes_iterator; - dereference to a NodeRef
// static nodes_iterator nodes_begin(GraphType *G)
@ -50,7 +49,6 @@ struct GraphTraits {
// static unsigned size (GraphType *G)
// Return total number of nodes in the graph
//
// If anyone tries to use this class without having an appropriate
// specialization, make an error. If you get this error, it's because you
@ -58,11 +56,9 @@ struct GraphTraits {
// graph, or you need to define it for a new graph type. Either that or
// your argument to XXX_begin(...) is unknown or needs to have the proper .h
// file #include'd.
//
typedef typename GraphType::UnknownGraphTypeError NodeRef;
using NodeRef = typename GraphType::UnknownGraphTypeError;
};
// Inverse - This class is used as a little marker class to tell the graph
// iterator to iterate over the graph in a graph defined "Inverse" ordering.
// Not all graphs define an inverse ordering, and if they do, it depends on
@ -73,7 +69,7 @@ struct GraphTraits {
// for (; I != E; ++I) { ... }
//
// Which is equivalent to:
// df_iterator<Inverse<Method*> > I = idf_begin(M), E = idf_end(M);
// df_iterator<Inverse<Method*>> I = idf_begin(M), E = idf_end(M);
// for (; I != E; ++I) { ... }
//
template <class GraphType>
@ -114,6 +110,7 @@ inverse_children(const typename GraphTraits<GraphType>::NodeRef &G) {
return make_range(GraphTraits<Inverse<GraphType>>::child_begin(G),
GraphTraits<Inverse<GraphType>>::child_end(G));
}
} // End llvm namespace
#endif
} // end namespace llvm
#endif // LLVM_ADT_GRAPHTRAITS_H

View File

@ -63,8 +63,8 @@ class ImmutableListImpl : public FoldingSetNode {
template <typename T>
class ImmutableList {
public:
typedef T value_type;
typedef ImmutableListFactory<T> Factory;
using value_type = T;
using Factory = ImmutableListFactory<T>;
private:
const ImmutableListImpl<T>* X;
@ -141,8 +141,8 @@ class ImmutableList {
template <typename T>
class ImmutableListFactory {
typedef ImmutableListImpl<T> ListTy;
typedef FoldingSet<ListTy> CacheTy;
using ListTy = ImmutableListImpl<T>;
using CacheTy = FoldingSet<ListTy>;
CacheTy Cache;
uintptr_t Allocator;

View File

@ -26,12 +26,12 @@ namespace llvm {
/// only the first element (the key) is used by isEqual and isLess.
template <typename T, typename S>
struct ImutKeyValueInfo {
typedef const std::pair<T,S> value_type;
typedef const value_type& value_type_ref;
typedef const T key_type;
typedef const T& key_type_ref;
typedef const S data_type;
typedef const S& data_type_ref;
using value_type = const std::pair<T,S>;
using value_type_ref = const value_type&;
using key_type = const T;
using key_type_ref = const T&;
using data_type = const S;
using data_type_ref = const S&;
static inline key_type_ref KeyOfValue(value_type_ref V) {
return V.first;
@ -62,13 +62,13 @@ template <typename KeyT, typename ValT,
typename ValInfo = ImutKeyValueInfo<KeyT,ValT>>
class ImmutableMap {
public:
typedef typename ValInfo::value_type value_type;
typedef typename ValInfo::value_type_ref value_type_ref;
typedef typename ValInfo::key_type key_type;
typedef typename ValInfo::key_type_ref key_type_ref;
typedef typename ValInfo::data_type data_type;
typedef typename ValInfo::data_type_ref data_type_ref;
typedef ImutAVLTree<ValInfo> TreeTy;
using value_type = typename ValInfo::value_type;
using value_type_ref = typename ValInfo::value_type_ref;
using key_type = typename ValInfo::key_type;
using key_type_ref = typename ValInfo::key_type_ref;
using data_type = typename ValInfo::data_type;
using data_type_ref = typename ValInfo::data_type_ref;
using TreeTy = ImutAVLTree<ValInfo>;
protected:
TreeTy* Root;
@ -86,6 +86,10 @@ class ImmutableMap {
if (Root) { Root->retain(); }
}
~ImmutableMap() {
if (Root) { Root->release(); }
}
ImmutableMap &operator=(const ImmutableMap &X) {
if (Root != X.Root) {
if (X.Root) { X.Root->retain(); }
@ -95,10 +99,6 @@ class ImmutableMap {
return *this;
}
~ImmutableMap() {
if (Root) { Root->release(); }
}
class Factory {
typename TreeTy::Factory F;
const bool Canonicalize;
@ -166,12 +166,14 @@ class ImmutableMap {
template <typename Callback>
struct CBWrapper {
Callback C;
void operator()(value_type_ref V) { C(V.first,V.second); }
};
template <typename Callback>
struct CBWrapperRef {
Callback &C;
CBWrapperRef(Callback& c) : C(c) {}
void operator()(value_type_ref V) { C(V.first,V.second); }
@ -254,14 +256,14 @@ template <typename KeyT, typename ValT,
typename ValInfo = ImutKeyValueInfo<KeyT,ValT>>
class ImmutableMapRef {
public:
typedef typename ValInfo::value_type value_type;
typedef typename ValInfo::value_type_ref value_type_ref;
typedef typename ValInfo::key_type key_type;
typedef typename ValInfo::key_type_ref key_type_ref;
typedef typename ValInfo::data_type data_type;
typedef typename ValInfo::data_type_ref data_type_ref;
typedef ImutAVLTree<ValInfo> TreeTy;
typedef typename TreeTy::Factory FactoryTy;
using value_type = typename ValInfo::value_type;
using value_type_ref = typename ValInfo::value_type_ref;
using key_type = typename ValInfo::key_type;
using key_type_ref = typename ValInfo::key_type_ref;
using data_type = typename ValInfo::data_type;
using data_type_ref = typename ValInfo::data_type_ref;
using TreeTy = ImutAVLTree<ValInfo>;
using FactoryTy = typename TreeTy::Factory;
protected:
TreeTy *Root;
@ -292,6 +294,11 @@ class ImmutableMapRef {
}
}
~ImmutableMapRef() {
if (Root)
Root->release();
}
ImmutableMapRef &operator=(const ImmutableMapRef &X) {
if (Root != X.Root) {
if (X.Root)
@ -306,11 +313,6 @@ class ImmutableMapRef {
return *this;
}
~ImmutableMapRef() {
if (Root)
Root->release();
}
static inline ImmutableMapRef getEmptyMap(FactoryTy *F) {
return ImmutableMapRef(0, F);
}

View File

@ -41,18 +41,16 @@ template <typename ImutInfo> class ImutAVLTreeGenericIterator;
template <typename ImutInfo >
class ImutAVLTree {
public:
typedef typename ImutInfo::key_type_ref key_type_ref;
typedef typename ImutInfo::value_type value_type;
typedef typename ImutInfo::value_type_ref value_type_ref;
using key_type_ref = typename ImutInfo::key_type_ref;
using value_type = typename ImutInfo::value_type;
using value_type_ref = typename ImutInfo::value_type_ref;
using Factory = ImutAVLFactory<ImutInfo>;
using iterator = ImutAVLTreeInOrderIterator<ImutInfo>;
typedef ImutAVLFactory<ImutInfo> Factory;
friend class ImutAVLFactory<ImutInfo>;
friend class ImutIntervalAVLFactory<ImutInfo>;
friend class ImutAVLTreeGenericIterator<ImutInfo>;
typedef ImutAVLTreeInOrderIterator<ImutInfo> iterator;
//===----------------------------------------------------===//
// Public Interface.
//===----------------------------------------------------===//
@ -225,17 +223,17 @@ class ImutAVLTree {
Factory *factory;
ImutAVLTree *left;
ImutAVLTree *right;
ImutAVLTree *prev;
ImutAVLTree *next;
ImutAVLTree *prev = nullptr;
ImutAVLTree *next = nullptr;
unsigned height : 28;
unsigned IsMutable : 1;
unsigned IsDigestCached : 1;
unsigned IsCanonicalized : 1;
unsigned height : 28;
bool IsMutable : 1;
bool IsDigestCached : 1;
bool IsCanonicalized : 1;
value_type value;
uint32_t digest;
uint32_t refCount;
uint32_t digest = 0;
uint32_t refCount = 0;
//===----------------------------------------------------===//
// Internal methods (node manipulation; used by Factory).
@ -246,9 +244,8 @@ class ImutAVLTree {
/// ImutAVLFactory.
ImutAVLTree(Factory *f, ImutAVLTree* l, ImutAVLTree* r, value_type_ref v,
unsigned height)
: factory(f), left(l), right(r), prev(nullptr), next(nullptr),
height(height), IsMutable(true), IsDigestCached(false),
IsCanonicalized(0), value(v), digest(0), refCount(0)
: factory(f), left(l), right(r), height(height), IsMutable(true),
IsDigestCached(false), IsCanonicalized(false), value(v)
{
if (left) left->retain();
if (right) right->retain();
@ -369,11 +366,11 @@ class ImutAVLTree {
template <typename ImutInfo >
class ImutAVLFactory {
friend class ImutAVLTree<ImutInfo>;
typedef ImutAVLTree<ImutInfo> TreeTy;
typedef typename TreeTy::value_type_ref value_type_ref;
typedef typename TreeTy::key_type_ref key_type_ref;
typedef DenseMap<unsigned, TreeTy*> CacheTy;
using TreeTy = ImutAVLTree<ImutInfo>;
using value_type_ref = typename TreeTy::value_type_ref;
using key_type_ref = typename TreeTy::key_type_ref;
using CacheTy = DenseMap<unsigned, TreeTy*>;
CacheTy Cache;
uintptr_t Allocator;
@ -659,7 +656,7 @@ class ImutAVLTreeGenericIterator
enum VisitFlag { VisitedNone=0x0, VisitedLeft=0x1, VisitedRight=0x3,
Flags=0x3 };
typedef ImutAVLTree<ImutInfo> TreeTy;
using TreeTy = ImutAVLTree<ImutInfo>;
ImutAVLTreeGenericIterator() = default;
ImutAVLTreeGenericIterator(const TreeTy *Root) {
@ -764,11 +761,12 @@ template <typename ImutInfo>
class ImutAVLTreeInOrderIterator
: public std::iterator<std::bidirectional_iterator_tag,
ImutAVLTree<ImutInfo>> {
typedef ImutAVLTreeGenericIterator<ImutInfo> InternalIteratorTy;
using InternalIteratorTy = ImutAVLTreeGenericIterator<ImutInfo>;
InternalIteratorTy InternalItr;
public:
typedef ImutAVLTree<ImutInfo> TreeTy;
using TreeTy = ImutAVLTree<ImutInfo>;
ImutAVLTreeInOrderIterator(const TreeTy* Root) : InternalItr(Root) {
if (Root)
@ -840,8 +838,8 @@ struct ImutAVLValueIterator
/// and generic handling of pointers is done below.
template <typename T>
struct ImutProfileInfo {
typedef const T value_type;
typedef const T& value_type_ref;
using value_type = const T;
using value_type_ref = const T&;
static void Profile(FoldingSetNodeID &ID, value_type_ref X) {
FoldingSetTrait<T>::Profile(X,ID);
@ -851,8 +849,8 @@ struct ImutProfileInfo {
/// Profile traits for integers.
template <typename T>
struct ImutProfileInteger {
typedef const T value_type;
typedef const T& value_type_ref;
using value_type = const T;
using value_type_ref = const T&;
static void Profile(FoldingSetNodeID &ID, value_type_ref X) {
ID.AddInteger(X);
@ -878,8 +876,8 @@ PROFILE_INTEGER_INFO(unsigned long long)
/// Profile traits for booleans.
template <>
struct ImutProfileInfo<bool> {
typedef const bool value_type;
typedef const bool& value_type_ref;
using value_type = const bool;
using value_type_ref = const bool&;
static void Profile(FoldingSetNodeID &ID, value_type_ref X) {
ID.AddBoolean(X);
@ -890,8 +888,8 @@ struct ImutProfileInfo<bool> {
/// references to unique objects.
template <typename T>
struct ImutProfileInfo<T*> {
typedef const T* value_type;
typedef value_type value_type_ref;
using value_type = const T*;
using value_type_ref = value_type;
static void Profile(FoldingSetNodeID &ID, value_type_ref X) {
ID.AddPointer(X);
@ -910,12 +908,12 @@ struct ImutProfileInfo<T*> {
/// std::equal_to<> and std::less<> to perform comparison of elements.
template <typename T>
struct ImutContainerInfo : public ImutProfileInfo<T> {
typedef typename ImutProfileInfo<T>::value_type value_type;
typedef typename ImutProfileInfo<T>::value_type_ref value_type_ref;
typedef value_type key_type;
typedef value_type_ref key_type_ref;
typedef bool data_type;
typedef bool data_type_ref;
using value_type = typename ImutProfileInfo<T>::value_type;
using value_type_ref = typename ImutProfileInfo<T>::value_type_ref;
using key_type = value_type;
using key_type_ref = value_type_ref;
using data_type = bool;
using data_type_ref = bool;
static key_type_ref KeyOfValue(value_type_ref D) { return D; }
static data_type_ref DataOfValue(value_type_ref) { return true; }
@ -936,12 +934,12 @@ struct ImutContainerInfo : public ImutProfileInfo<T> {
/// their addresses.
template <typename T>
struct ImutContainerInfo<T*> : public ImutProfileInfo<T*> {
typedef typename ImutProfileInfo<T*>::value_type value_type;
typedef typename ImutProfileInfo<T*>::value_type_ref value_type_ref;
typedef value_type key_type;
typedef value_type_ref key_type_ref;
typedef bool data_type;
typedef bool data_type_ref;
using value_type = typename ImutProfileInfo<T*>::value_type;
using value_type_ref = typename ImutProfileInfo<T*>::value_type_ref;
using key_type = value_type;
using key_type_ref = value_type_ref;
using data_type = bool;
using data_type_ref = bool;
static key_type_ref KeyOfValue(value_type_ref D) { return D; }
static data_type_ref DataOfValue(value_type_ref) { return true; }
@ -960,9 +958,9 @@ struct ImutContainerInfo<T*> : public ImutProfileInfo<T*> {
template <typename ValT, typename ValInfo = ImutContainerInfo<ValT>>
class ImmutableSet {
public:
typedef typename ValInfo::value_type value_type;
typedef typename ValInfo::value_type_ref value_type_ref;
typedef ImutAVLTree<ValInfo> TreeTy;
using value_type = typename ValInfo::value_type;
using value_type_ref = typename ValInfo::value_type_ref;
using TreeTy = ImutAVLTree<ValInfo>;
private:
TreeTy *Root;
@ -980,6 +978,10 @@ class ImmutableSet {
if (Root) { Root->retain(); }
}
~ImmutableSet() {
if (Root) { Root->release(); }
}
ImmutableSet &operator=(const ImmutableSet &X) {
if (Root != X.Root) {
if (X.Root) { X.Root->retain(); }
@ -989,10 +991,6 @@ class ImmutableSet {
return *this;
}
~ImmutableSet() {
if (Root) { Root->release(); }
}
class Factory {
typename TreeTy::Factory F;
const bool Canonicalize;
@ -1084,7 +1082,7 @@ class ImmutableSet {
// Iterators.
//===--------------------------------------------------===//
typedef ImutAVLValueIterator<ImmutableSet> iterator;
using iterator = ImutAVLValueIterator<ImmutableSet>;
iterator begin() const { return iterator(Root); }
iterator end() const { return iterator(); }
@ -1112,10 +1110,10 @@ class ImmutableSet {
template <typename ValT, typename ValInfo = ImutContainerInfo<ValT>>
class ImmutableSetRef {
public:
typedef typename ValInfo::value_type value_type;
typedef typename ValInfo::value_type_ref value_type_ref;
typedef ImutAVLTree<ValInfo> TreeTy;
typedef typename TreeTy::Factory FactoryTy;
using value_type = typename ValInfo::value_type;
using value_type_ref = typename ValInfo::value_type_ref;
using TreeTy = ImutAVLTree<ValInfo>;
using FactoryTy = typename TreeTy::Factory;
private:
TreeTy *Root;
@ -1138,6 +1136,10 @@ class ImmutableSetRef {
if (Root) { Root->retain(); }
}
~ImmutableSetRef() {
if (Root) { Root->release(); }
}
ImmutableSetRef &operator=(const ImmutableSetRef &X) {
if (Root != X.Root) {
if (X.Root) { X.Root->retain(); }
@ -1147,9 +1149,6 @@ class ImmutableSetRef {
}
return *this;
}
~ImmutableSetRef() {
if (Root) { Root->release(); }
}
static ImmutableSetRef getEmptySet(FactoryTy *F) {
return ImmutableSetRef(0, F);
@ -1196,7 +1195,7 @@ class ImmutableSetRef {
// Iterators.
//===--------------------------------------------------===//
typedef ImutAVLValueIterator<ImmutableSetRef> iterator;
using iterator = ImutAVLValueIterator<ImmutableSetRef>;
iterator begin() const { return iterator(Root); }
iterator end() const { return iterator(); }

View File

@ -20,28 +20,28 @@
#ifndef LLVM_ADT_INDEXEDMAP_H
#define LLVM_ADT_INDEXEDMAP_H
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/STLExtras.h"
#include <cassert>
#include <functional>
namespace llvm {
template <typename T, typename ToIndexT = llvm::identity<unsigned> >
template <typename T, typename ToIndexT = identity<unsigned>>
class IndexedMap {
typedef typename ToIndexT::argument_type IndexT;
using IndexT = typename ToIndexT::argument_type;
// Prefer SmallVector with zero inline storage over std::vector. IndexedMaps
// can grow very large and SmallVector grows more efficiently as long as T
// is trivially copyable.
typedef SmallVector<T, 0> StorageT;
using StorageT = SmallVector<T, 0>;
StorageT storage_;
T nullVal_;
ToIndexT toIndex_;
public:
IndexedMap() : nullVal_(T()) { }
IndexedMap() : nullVal_(T()) {}
explicit IndexedMap(const T& val) : nullVal_(val) { }
explicit IndexedMap(const T& val) : nullVal_(val) {}
typename StorageT::reference operator[](IndexT n) {
assert(toIndex_(n) < storage_.size() && "index out of bounds!");
@ -80,6 +80,6 @@ template <typename T, typename ToIndexT = llvm::identity<unsigned> >
}
};
} // End llvm namespace
} // end namespace llvm
#endif
#endif // LLVM_ADT_INDEXEDMAP_H

View File

@ -106,6 +106,7 @@
#include "llvm/Support/RecyclingAllocator.h"
#include <algorithm>
#include <cassert>
#include <cstdint>
#include <iterator>
#include <new>
#include <utility>
@ -186,7 +187,7 @@ struct IntervalMapHalfOpenInfo {
/// It should be considered private to the implementation.
namespace IntervalMapImpl {
typedef std::pair<unsigned,unsigned> IdxPair;
using IdxPair = std::pair<unsigned,unsigned>;
//===----------------------------------------------------------------------===//
//--- IntervalMapImpl::NodeBase ---//
@ -445,7 +446,7 @@ struct NodeSizer {
LeafSize = DesiredLeafSize > MinLeafSize ? DesiredLeafSize : MinLeafSize
};
typedef NodeBase<std::pair<KeyT, KeyT>, ValT, LeafSize> LeafBase;
using LeafBase = NodeBase<std::pair<KeyT, KeyT>, ValT, LeafSize>;
enum {
// Now that we have the leaf branching factor, compute the actual allocation
@ -461,8 +462,8 @@ struct NodeSizer {
/// This typedef is very likely to be identical for all IntervalMaps with
/// reasonably sized entries, so the same allocator can be shared among
/// different kinds of maps.
typedef RecyclingAllocator<BumpPtrAllocator, char,
AllocBytes, CacheLineBytes> Allocator;
using Allocator =
RecyclingAllocator<BumpPtrAllocator, char, AllocBytes, CacheLineBytes>;
};
//===----------------------------------------------------------------------===//
@ -930,12 +931,12 @@ template <typename KeyT, typename ValT,
unsigned N = IntervalMapImpl::NodeSizer<KeyT, ValT>::LeafSize,
typename Traits = IntervalMapInfo<KeyT>>
class IntervalMap {
typedef IntervalMapImpl::NodeSizer<KeyT, ValT> Sizer;
typedef IntervalMapImpl::LeafNode<KeyT, ValT, Sizer::LeafSize, Traits> Leaf;
typedef IntervalMapImpl::BranchNode<KeyT, ValT, Sizer::BranchSize, Traits>
Branch;
typedef IntervalMapImpl::LeafNode<KeyT, ValT, N, Traits> RootLeaf;
typedef IntervalMapImpl::IdxPair IdxPair;
using Sizer = IntervalMapImpl::NodeSizer<KeyT, ValT>;
using Leaf = IntervalMapImpl::LeafNode<KeyT, ValT, Sizer::LeafSize, Traits>;
using Branch =
IntervalMapImpl::BranchNode<KeyT, ValT, Sizer::BranchSize, Traits>;
using RootLeaf = IntervalMapImpl::LeafNode<KeyT, ValT, N, Traits>;
using IdxPair = IntervalMapImpl::IdxPair;
// The RootLeaf capacity is given as a template parameter. We must compute the
// corresponding RootBranch capacity.
@ -945,8 +946,8 @@ class IntervalMap {
RootBranchCap = DesiredRootBranchCap ? DesiredRootBranchCap : 1
};
typedef IntervalMapImpl::BranchNode<KeyT, ValT, RootBranchCap, Traits>
RootBranch;
using RootBranch =
IntervalMapImpl::BranchNode<KeyT, ValT, RootBranchCap, Traits>;
// When branched, we store a global start key as well as the branch node.
struct RootBranchData {
@ -955,10 +956,10 @@ class IntervalMap {
};
public:
typedef typename Sizer::Allocator Allocator;
typedef KeyT KeyType;
typedef ValT ValueType;
typedef Traits KeyTraits;
using Allocator = typename Sizer::Allocator;
using KeyType = KeyT;
using ValueType = ValT;
using KeyTraits = Traits;
private:
// The root data is either a RootLeaf or a RootBranchData instance.
@ -1290,7 +1291,7 @@ class IntervalMap<KeyT, ValT, N, Traits>::const_iterator :
friend class IntervalMap;
// The map referred to.
IntervalMap *map;
IntervalMap *map = nullptr;
// We store a full path from the root to the current position.
// The path may be partially filled, but never between iterator calls.
@ -1338,7 +1339,7 @@ class IntervalMap<KeyT, ValT, N, Traits>::const_iterator :
public:
/// const_iterator - Create an iterator that isn't pointing anywhere.
const_iterator() : map(nullptr) {}
const_iterator() = default;
/// setMap - Change the map iterated over. This call must be followed by a
/// call to goToBegin(), goToEnd(), or find()
@ -1509,7 +1510,8 @@ const_iterator::treeAdvanceTo(KeyT x) {
template <typename KeyT, typename ValT, unsigned N, typename Traits>
class IntervalMap<KeyT, ValT, N, Traits>::iterator : public const_iterator {
friend class IntervalMap;
typedef IntervalMapImpl::IdxPair IdxPair;
using IdxPair = IntervalMapImpl::IdxPair;
explicit iterator(IntervalMap &map) : const_iterator(map) {}
@ -2003,7 +2005,7 @@ iterator::overflow(unsigned Level) {
// Elements have been rearranged, now update node sizes and stops.
bool SplitRoot = false;
unsigned Pos = 0;
for (;;) {
while (true) {
KeyT Stop = Node[Pos]->stop(NewSize[Pos]-1);
if (NewNode && Pos == NewNode) {
SplitRoot = insertNode(Level, NodeRef(Node[Pos], NewSize[Pos]), Stop);
@ -2045,8 +2047,9 @@ iterator::overflow(unsigned Level) {
///
template <typename MapA, typename MapB>
class IntervalMapOverlaps {
typedef typename MapA::KeyType KeyType;
typedef typename MapA::KeyTraits Traits;
using KeyType = typename MapA::KeyType;
using Traits = typename MapA::KeyTraits;
typename MapA::const_iterator posA;
typename MapB::const_iterator posB;
@ -2071,7 +2074,7 @@ class IntervalMapOverlaps {
// Already overlapping.
return;
for (;;) {
while (true) {
// Make a.end > b.start.
posA.advanceTo(posB.start());
if (!posA.valid() || !Traits::stopLess(posB.stop(), posA.start()))

View File

@ -1,4 +1,4 @@
//== llvm/ADT/IntrusiveRefCntPtr.h - Smart Refcounting Pointer ---*- C++ -*-==//
//==- llvm/ADT/IntrusiveRefCntPtr.h - Smart Refcounting Pointer --*- C++ -*-==//
//
// The LLVM Compiler Infrastructure
//
@ -73,9 +73,10 @@ template <class Derived> class RefCountedBase {
public:
RefCountedBase() = default;
RefCountedBase(const RefCountedBase &) : RefCount(0) {}
RefCountedBase(const RefCountedBase &) {}
void Retain() const { ++RefCount; }
void Release() const {
assert(RefCount > 0 && "Reference count is already zero.");
if (--RefCount == 0)
@ -136,7 +137,7 @@ template <typename T> class IntrusiveRefCntPtr {
T *Obj = nullptr;
public:
typedef T element_type;
using element_type = T;
explicit IntrusiveRefCntPtr() = default;
IntrusiveRefCntPtr(T *obj) : Obj(obj) { retain(); }
@ -153,13 +154,13 @@ template <typename T> class IntrusiveRefCntPtr {
retain();
}
~IntrusiveRefCntPtr() { release(); }
IntrusiveRefCntPtr &operator=(IntrusiveRefCntPtr S) {
swap(S);
return *this;
}
~IntrusiveRefCntPtr() { release(); }
T &operator*() const { return *Obj; }
T *operator->() const { return Obj; }
T *get() const { return Obj; }
@ -183,6 +184,7 @@ template <typename T> class IntrusiveRefCntPtr {
if (Obj)
IntrusiveRefCntPtrInfo<T>::retain(Obj);
}
void release() {
if (Obj)
IntrusiveRefCntPtrInfo<T>::release(Obj);
@ -248,14 +250,16 @@ bool operator!=(const IntrusiveRefCntPtr<T> &A, std::nullptr_t B) {
template <typename From> struct simplify_type;
template <class T> struct simplify_type<IntrusiveRefCntPtr<T>> {
typedef T *SimpleType;
using SimpleType = T *;
static SimpleType getSimplifiedValue(IntrusiveRefCntPtr<T> &Val) {
return Val.get();
}
};
template <class T> struct simplify_type<const IntrusiveRefCntPtr<T>> {
typedef /*const*/ T *SimpleType;
using SimpleType = /*const*/ T *;
static SimpleType getSimplifiedValue(const IntrusiveRefCntPtr<T> &Val) {
return Val.get();
}

View File

@ -19,6 +19,12 @@
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/SmallVector.h"
#include <algorithm>
#include <cassert>
#include <cstddef>
#include <iterator>
#include <type_traits>
#include <utility>
#include <vector>
namespace llvm {
@ -27,20 +33,20 @@ namespace llvm {
/// in a deterministic order. The values are kept in a std::vector and the
/// mapping is done with DenseMap from Keys to indexes in that vector.
template<typename KeyT, typename ValueT,
typename MapType = llvm::DenseMap<KeyT, unsigned>,
typename VectorType = std::vector<std::pair<KeyT, ValueT> > >
typename MapType = DenseMap<KeyT, unsigned>,
typename VectorType = std::vector<std::pair<KeyT, ValueT>>>
class MapVector {
typedef typename VectorType::value_type value_type;
typedef typename VectorType::size_type size_type;
using value_type = typename VectorType::value_type;
using size_type = typename VectorType::size_type;
MapType Map;
VectorType Vector;
public:
typedef typename VectorType::iterator iterator;
typedef typename VectorType::const_iterator const_iterator;
typedef typename VectorType::reverse_iterator reverse_iterator;
typedef typename VectorType::const_reverse_iterator const_reverse_iterator;
using iterator = typename VectorType::iterator;
using const_iterator = typename VectorType::const_iterator;
using reverse_iterator = typename VectorType::reverse_iterator;
using const_reverse_iterator = typename VectorType::const_reverse_iterator;
/// Clear the MapVector and return the underlying vector.
VectorType takeVector() {
@ -220,4 +226,4 @@ struct SmallMapVector
} // end namespace llvm
#endif
#endif // LLVM_ADT_MAPVECTOR_H

View File

@ -1,4 +1,4 @@
//===-- Optional.h - Simple variant for passing optional values ---*- C++ -*-=//
//===- Optional.h - Simple variant for passing optional values --*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
@ -19,6 +19,8 @@
#include "llvm/ADT/None.h"
#include "llvm/Support/AlignOf.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/type_traits.h"
#include <algorithm>
#include <cassert>
#include <new>
#include <utility>
@ -28,15 +30,18 @@ namespace llvm {
template<typename T>
class Optional {
AlignedCharArrayUnion<T> storage;
bool hasVal;
public:
typedef T value_type;
bool hasVal = false;
public:
using value_type = T;
Optional(NoneType) {}
explicit Optional() {}
Optional(NoneType) : hasVal(false) {}
explicit Optional() : hasVal(false) {}
Optional(const T &y) : hasVal(true) {
new (storage.buffer) T(y);
}
Optional(const Optional &O) : hasVal(O.hasVal) {
if (hasVal)
new (storage.buffer) T(*O);
@ -45,12 +50,18 @@ class Optional {
Optional(T &&y) : hasVal(true) {
new (storage.buffer) T(std::forward<T>(y));
}
Optional(Optional<T> &&O) : hasVal(O) {
if (O) {
new (storage.buffer) T(std::move(*O));
O.reset();
}
}
~Optional() {
reset();
}
Optional &operator=(T &&y) {
if (hasVal)
**this = std::move(y);
@ -60,6 +71,7 @@ class Optional {
}
return *this;
}
Optional &operator=(Optional &&O) {
if (!O)
reset();
@ -112,10 +124,6 @@ class Optional {
}
}
~Optional() {
reset();
}
const T* getPointer() const { assert(hasVal); return reinterpret_cast<const T*>(storage.buffer); }
T* getPointer() { assert(hasVal); return reinterpret_cast<T*>(storage.buffer); }
const T& getValue() const LLVM_LVALUE_FUNCTION { assert(hasVal); return *getPointer(); }
@ -144,8 +152,7 @@ class Optional {
#endif
};
template <typename T> struct isPodLike;
template <typename T> struct isPodLike<Optional<T> > {
template <typename T> struct isPodLike<Optional<T>> {
// An Optional<T> is pod-like if T is.
static const bool value = isPodLike<T>::value;
};
@ -284,6 +291,6 @@ template <typename T> bool operator>=(const T &X, const Optional<T> &Y) {
return !(X < Y);
}
} // end llvm namespace
} // end namespace llvm
#endif
#endif // LLVM_ADT_OPTIONAL_H

View File

@ -76,8 +76,8 @@ template <typename T, unsigned BitNum, typename BitVectorTy = BitVector>
class PackedVector : public PackedVectorBase<T, BitNum, BitVectorTy,
std::numeric_limits<T>::is_signed> {
BitVectorTy Bits;
typedef PackedVectorBase<T, BitNum, BitVectorTy,
std::numeric_limits<T>::is_signed> base;
using base = PackedVectorBase<T, BitNum, BitVectorTy,
std::numeric_limits<T>::is_signed>;
public:
class reference {
@ -99,7 +99,7 @@ class PackedVector : public PackedVectorBase<T, BitNum, BitVectorTy,
};
PackedVector() = default;
explicit PackedVector(unsigned size) : Bits(size << (BitNum-1)) { }
explicit PackedVector(unsigned size) : Bits(size << (BitNum-1)) {}
bool empty() const { return Bits.empty(); }

View File

@ -13,7 +13,10 @@
#include "llvm/ADT/DenseMapInfo.h"
#include "llvm/Support/MathExtras.h"
#include "llvm/Support/PointerLikeTypeTraits.h"
#include <cassert>
#include <climits>
#include <cstdint>
#include <type_traits>
namespace llvm {
@ -29,7 +32,7 @@ namespace llvm {
/// Also, the default constructed value zero initializes the integer.
template <typename IntT, int Bits = sizeof(IntT) * CHAR_BIT>
class PointerEmbeddedInt {
uintptr_t Value;
uintptr_t Value = 0;
// Note: This '<' is correct; using '<=' would result in some shifts
// overflowing their storage types.
@ -54,15 +57,12 @@ class PointerEmbeddedInt {
explicit PointerEmbeddedInt(uintptr_t Value, RawValueTag) : Value(Value) {}
public:
PointerEmbeddedInt() : Value(0) {}
PointerEmbeddedInt() = default;
PointerEmbeddedInt(IntT I) {
*this = I;
}
PointerEmbeddedInt(IntT I) { *this = I; }
PointerEmbeddedInt &operator=(IntT I) {
assert((std::is_signed<IntT>::value ? llvm::isInt<Bits>(I)
: llvm::isUInt<Bits>(I)) &&
assert((std::is_signed<IntT>::value ? isInt<Bits>(I) : isUInt<Bits>(I)) &&
"Integer has bits outside those preserved!");
Value = static_cast<uintptr_t>(I) << Shift;
return *this;
@ -81,15 +81,17 @@ class PointerEmbeddedInt {
// types.
template <typename IntT, int Bits>
class PointerLikeTypeTraits<PointerEmbeddedInt<IntT, Bits>> {
typedef PointerEmbeddedInt<IntT, Bits> T;
using T = PointerEmbeddedInt<IntT, Bits>;
public:
static inline void *getAsVoidPointer(const T &P) {
return reinterpret_cast<void *>(P.Value);
}
static inline T getFromVoidPointer(void *P) {
return T(reinterpret_cast<uintptr_t>(P), typename T::RawValueTag());
}
static inline T getFromVoidPointer(const void *P) {
return T(reinterpret_cast<uintptr_t>(P), typename T::RawValueTag());
}
@ -101,17 +103,19 @@ class PointerLikeTypeTraits<PointerEmbeddedInt<IntT, Bits>> {
// itself can be a key.
template <typename IntT, int Bits>
struct DenseMapInfo<PointerEmbeddedInt<IntT, Bits>> {
typedef PointerEmbeddedInt<IntT, Bits> T;
typedef DenseMapInfo<IntT> IntInfo;
using T = PointerEmbeddedInt<IntT, Bits>;
using IntInfo = DenseMapInfo<IntT>;
static inline T getEmptyKey() { return IntInfo::getEmptyKey(); }
static inline T getTombstoneKey() { return IntInfo::getTombstoneKey(); }
static unsigned getHashValue(const T &Arg) {
return IntInfo::getHashValue(Arg);
}
static bool isEqual(const T &LHS, const T &RHS) { return LHS == RHS; }
};
}
#endif
} // end namespace llvm
#endif // LLVM_ADT_POINTEREMBEDDEDINT_H

View File

@ -158,7 +158,7 @@ template <typename PT1, typename PT2> class PointerUnion {
assert(
get<PT1>() == Val.getPointer() &&
"Can't get the address because PointerLikeTypeTraits changes the ptr");
return (PT1 *)Val.getAddrOfPointer();
return const_cast<PT1 *>(reinterpret_cast<const PT1 *>(Val.getAddrOfPointer()));
}
/// Assignment from nullptr which just clears the union.

View File

@ -109,6 +109,7 @@ class ScopedHashTableScope {
ScopedHashTableVal<K, V> *getLastValInScope() {
return LastValInScope;
}
void setLastValInScope(ScopedHashTableVal<K, V> *Val) {
LastValInScope = Val;
}
@ -151,13 +152,14 @@ class ScopedHashTable {
public:
/// ScopeTy - This is a helpful typedef that allows clients to get easy access
/// to the name of the scope for this hash table.
typedef ScopedHashTableScope<K, V, KInfo, AllocatorTy> ScopeTy;
typedef unsigned size_type;
using ScopeTy = ScopedHashTableScope<K, V, KInfo, AllocatorTy>;
using size_type = unsigned;
private:
friend class ScopedHashTableScope<K, V, KInfo, AllocatorTy>;
typedef ScopedHashTableVal<K, V> ValTy;
using ValTy = ScopedHashTableVal<K, V>;
DenseMap<K, ValTy*, KInfo> TopLevelMap;
ScopeTy *CurScope = nullptr;
@ -165,7 +167,7 @@ class ScopedHashTable {
public:
ScopedHashTable() = default;
ScopedHashTable(AllocatorTy A) : CurScope(0), Allocator(A) {}
ScopedHashTable(AllocatorTy A) : Allocator(A) {}
ScopedHashTable(const ScopedHashTable &) = delete;
ScopedHashTable &operator=(const ScopedHashTable &) = delete;
@ -194,7 +196,7 @@ class ScopedHashTable {
insertIntoScope(CurScope, Key, Val);
}
typedef ScopedHashTableIterator<K, V, KInfo> iterator;
using iterator = ScopedHashTableIterator<K, V, KInfo>;
iterator end() { return iterator(0); }

View File

@ -15,8 +15,15 @@
#define LLVM_ADT_SMALLBITVECTOR_H
#include "llvm/ADT/BitVector.h"
#include "llvm/ADT/iterator_range.h"
#include "llvm/Support/MathExtras.h"
#include <algorithm>
#include <cassert>
#include <climits>
#include <cstddef>
#include <cstdint>
#include <limits>
#include <utility>
namespace llvm {
@ -29,7 +36,7 @@ class SmallBitVector {
// TODO: In "large" mode, a pointer to a BitVector is used, leading to an
// unnecessary level of indirection. It would be more efficient to use a
// pointer to memory containing size, allocation size, and the array of bits.
uintptr_t X;
uintptr_t X = 1;
enum {
// The number of bits in this class.
@ -54,7 +61,8 @@ class SmallBitVector {
"Unsupported word size");
public:
typedef unsigned size_type;
using size_type = unsigned;
// Encapsulation of a single bit.
class reference {
SmallBitVector &TheVector;
@ -134,21 +142,8 @@ class SmallBitVector {
}
public:
typedef const_set_bits_iterator_impl<SmallBitVector> const_set_bits_iterator;
typedef const_set_bits_iterator set_iterator;
const_set_bits_iterator set_bits_begin() const {
return const_set_bits_iterator(*this);
}
const_set_bits_iterator set_bits_end() const {
return const_set_bits_iterator(*this, -1);
}
iterator_range<const_set_bits_iterator> set_bits() const {
return make_range(set_bits_begin(), set_bits_end());
}
/// Creates an empty bitvector.
SmallBitVector() : X(1) {}
SmallBitVector() = default;
/// Creates a bitvector of specified number of bits. All bits are initialized
/// to the specified value.
@ -176,6 +171,21 @@ class SmallBitVector {
delete getPointer();
}
using const_set_bits_iterator = const_set_bits_iterator_impl<SmallBitVector>;
using set_iterator = const_set_bits_iterator;
const_set_bits_iterator set_bits_begin() const {
return const_set_bits_iterator(*this);
}
const_set_bits_iterator set_bits_end() const {
return const_set_bits_iterator(*this, -1);
}
iterator_range<const_set_bits_iterator> set_bits() const {
return make_range(set_bits_begin(), set_bits_end());
}
/// Tests whether there are no bits in this bitvector.
bool empty() const {
return isSmall() ? getSmallSize() == 0 : getPointer()->empty();
@ -677,14 +687,16 @@ operator^(const SmallBitVector &LHS, const SmallBitVector &RHS) {
return Result;
}
} // End llvm namespace
} // end namespace llvm
namespace std {
/// Implement std::swap in terms of BitVector swap.
inline void
swap(llvm::SmallBitVector &LHS, llvm::SmallBitVector &RHS) {
LHS.swap(RHS);
}
/// Implement std::swap in terms of BitVector swap.
inline void
swap(llvm::SmallBitVector &LHS, llvm::SmallBitVector &RHS) {
LHS.swap(RHS);
}
#endif
} // end namespace std
#endif // LLVM_ADT_SMALLBITVECTOR_H

View File

@ -39,8 +39,9 @@ class SmallSet {
/// we will never use.
SmallVector<T, N> Vector;
std::set<T, C> Set;
typedef typename SmallVector<T, N>::const_iterator VIterator;
typedef typename SmallVector<T, N>::iterator mutable_iterator;
using VIterator = typename SmallVector<T, N>::const_iterator;
using mutable_iterator = typename SmallVector<T, N>::iterator;
// In small mode SmallPtrSet uses linear search for the elements, so it is
// not a good idea to choose this value too high. You may consider using a
@ -48,7 +49,7 @@ class SmallSet {
static_assert(N <= 32, "N should be small");
public:
typedef size_t size_type;
using size_type = size_t;
SmallSet() = default;

View File

@ -14,6 +14,7 @@
#ifndef LLVM_ADT_STRINGEXTRAS_H
#define LLVM_ADT_STRINGEXTRAS_H
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/StringRef.h"
#include <cassert>
#include <cstddef>
@ -40,6 +41,11 @@ static inline StringRef toStringRef(bool B) {
return StringRef(B ? "true" : "false");
}
/// Construct a string ref from an array ref of unsigned chars.
static inline StringRef toStringRef(ArrayRef<uint8_t> Input) {
return StringRef(reinterpret_cast<const char *>(Input.begin()), Input.size());
}
/// Interpret the given character \p C as a hexadecimal digit and return its
/// value.
///
@ -68,7 +74,7 @@ static inline std::string utohexstr(uint64_t X, bool LowerCase = false) {
/// Convert buffer \p Input to its hexadecimal representation.
/// The returned string is double the size of \p Input.
static inline std::string toHex(StringRef Input) {
inline std::string toHex(StringRef Input) {
static const char *const LUT = "0123456789ABCDEF";
size_t Length = Input.size();
@ -82,6 +88,10 @@ static inline std::string toHex(StringRef Input) {
return Output;
}
inline std::string toHex(ArrayRef<uint8_t> Input) {
return toHex(toStringRef(Input));
}
static inline uint8_t hexFromNibbles(char MSB, char LSB) {
unsigned U1 = hexDigitValue(MSB);
unsigned U2 = hexDigitValue(LSB);

View File

@ -239,7 +239,9 @@ class Triple {
/// Default constructor is the same as an empty string and leaves all
/// triple fields unknown.
Triple() : Data(), Arch(), Vendor(), OS(), Environment(), ObjectFormat() {}
Triple()
: Data(), Arch(), SubArch(), Vendor(), OS(), Environment(),
ObjectFormat() {}
explicit Triple(const Twine &Str);
Triple(const Twine &ArchStr, const Twine &VendorStr, const Twine &OSStr);

View File

@ -1,4 +1,4 @@
//===- llvm/ADT/ilist_base.h - Intrusive List Base ---------------*- C++ -*-==//
//===- llvm/ADT/ilist_base.h - Intrusive List Base --------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
@ -12,15 +12,13 @@
#include "llvm/ADT/ilist_node_base.h"
#include <cassert>
#include <cstddef>
#include <type_traits>
namespace llvm {
/// Implementations of list algorithms using ilist_node_base.
template <bool EnableSentinelTracking> class ilist_base {
public:
typedef ilist_node_base<EnableSentinelTracking> node_base_type;
using node_base_type = ilist_node_base<EnableSentinelTracking>;
static void insertBeforeImpl(node_base_type &Next, node_base_type &N) {
node_base_type &Prev = *Next.getPrev();

View File

@ -1,4 +1,4 @@
//===- llvm/ADT/ilist_iterator.h - Intrusive List Iterator -------*- C++ -*-==//
//===- llvm/ADT/ilist_iterator.h - Intrusive List Iterator ------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
@ -23,28 +23,30 @@ namespace ilist_detail {
/// Find const-correct node types.
template <class OptionsT, bool IsConst> struct IteratorTraits;
template <class OptionsT> struct IteratorTraits<OptionsT, false> {
typedef typename OptionsT::value_type value_type;
typedef typename OptionsT::pointer pointer;
typedef typename OptionsT::reference reference;
typedef ilist_node_impl<OptionsT> *node_pointer;
typedef ilist_node_impl<OptionsT> &node_reference;
using value_type = typename OptionsT::value_type;
using pointer = typename OptionsT::pointer;
using reference = typename OptionsT::reference;
using node_pointer = ilist_node_impl<OptionsT> *;
using node_reference = ilist_node_impl<OptionsT> &;
};
template <class OptionsT> struct IteratorTraits<OptionsT, true> {
typedef const typename OptionsT::value_type value_type;
typedef typename OptionsT::const_pointer pointer;
typedef typename OptionsT::const_reference reference;
typedef const ilist_node_impl<OptionsT> *node_pointer;
typedef const ilist_node_impl<OptionsT> &node_reference;
using value_type = const typename OptionsT::value_type;
using pointer = typename OptionsT::const_pointer;
using reference = typename OptionsT::const_reference;
using node_pointer = const ilist_node_impl<OptionsT> *;
using node_reference = const ilist_node_impl<OptionsT> &;
};
template <bool IsReverse> struct IteratorHelper;
template <> struct IteratorHelper<false> : ilist_detail::NodeAccess {
typedef ilist_detail::NodeAccess Access;
using Access = ilist_detail::NodeAccess;
template <class T> static void increment(T *&I) { I = Access::getNext(*I); }
template <class T> static void decrement(T *&I) { I = Access::getPrev(*I); }
};
template <> struct IteratorHelper<true> : ilist_detail::NodeAccess {
typedef ilist_detail::NodeAccess Access;
using Access = ilist_detail::NodeAccess;
template <class T> static void increment(T *&I) { I = Access::getPrev(*I); }
template <class T> static void decrement(T *&I) { I = Access::getNext(*I); }
};
@ -58,24 +60,23 @@ class ilist_iterator : ilist_detail::SpecificNodeAccess<OptionsT> {
friend ilist_iterator<OptionsT, !IsReverse, IsConst>;
friend ilist_iterator<OptionsT, !IsReverse, !IsConst>;
typedef ilist_detail::IteratorTraits<OptionsT, IsConst> Traits;
typedef ilist_detail::SpecificNodeAccess<OptionsT> Access;
using Traits = ilist_detail::IteratorTraits<OptionsT, IsConst>;
using Access = ilist_detail::SpecificNodeAccess<OptionsT>;
public:
typedef typename Traits::value_type value_type;
typedef typename Traits::pointer pointer;
typedef typename Traits::reference reference;
typedef ptrdiff_t difference_type;
typedef std::bidirectional_iterator_tag iterator_category;
typedef typename OptionsT::const_pointer const_pointer;
typedef typename OptionsT::const_reference const_reference;
using value_type = typename Traits::value_type;
using pointer = typename Traits::pointer;
using reference = typename Traits::reference;
using difference_type = ptrdiff_t;
using iterator_category = std::bidirectional_iterator_tag;
using const_pointer = typename OptionsT::const_pointer;
using const_reference = typename OptionsT::const_reference;
private:
typedef typename Traits::node_pointer node_pointer;
typedef typename Traits::node_reference node_reference;
using node_pointer = typename Traits::node_pointer;
using node_reference = typename Traits::node_reference;
node_pointer NodePtr;
node_pointer NodePtr = nullptr;
public:
/// Create from an ilist_node.
@ -83,7 +84,7 @@ class ilist_iterator : ilist_detail::SpecificNodeAccess<OptionsT> {
explicit ilist_iterator(pointer NP) : NodePtr(Access::getNodePtr(NP)) {}
explicit ilist_iterator(reference NR) : NodePtr(Access::getNodePtr(&NR)) {}
ilist_iterator() : NodePtr(nullptr) {}
ilist_iterator() = default;
// This is templated so that we can allow constructing a const iterator from
// a nonconst iterator...
@ -184,8 +185,8 @@ template <typename From> struct simplify_type;
/// FIXME: remove this, since there is no implicit conversion to NodeTy.
template <class OptionsT, bool IsConst>
struct simplify_type<ilist_iterator<OptionsT, false, IsConst>> {
typedef ilist_iterator<OptionsT, false, IsConst> iterator;
typedef typename iterator::pointer SimpleType;
using iterator = ilist_iterator<OptionsT, false, IsConst>;
using SimpleType = typename iterator::pointer;
static SimpleType getSimplifiedValue(const iterator &Node) { return &*Node; }
};

View File

@ -1,4 +1,4 @@
//==-- llvm/ADT/ilist_node.h - Intrusive Linked List Helper ------*- C++ -*-==//
//===- llvm/ADT/ilist_node.h - Intrusive Linked List Helper -----*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
@ -21,11 +21,10 @@
namespace llvm {
namespace ilist_detail {
struct NodeAccess;
} // end namespace ilist_detail
template<typename NodeTy>
struct ilist_traits;
struct NodeAccess;
} // end namespace ilist_detail
template <class OptionsT, bool IsReverse, bool IsConst> class ilist_iterator;
template <class OptionsT> class ilist_sentinel;
@ -39,9 +38,9 @@ template <class OptionsT> class ilist_sentinel;
/// provide type safety: you can't insert nodes of \a ilist_node_impl into the
/// wrong \a simple_ilist or \a iplist.
template <class OptionsT> class ilist_node_impl : OptionsT::node_base_type {
typedef typename OptionsT::value_type value_type;
typedef typename OptionsT::node_base_type node_base_type;
typedef typename OptionsT::list_base_type list_base_type;
using value_type = typename OptionsT::value_type;
using node_base_type = typename OptionsT::node_base_type;
using list_base_type = typename OptionsT::list_base_type;
friend typename OptionsT::list_base_type;
friend struct ilist_detail::NodeAccess;
@ -52,17 +51,18 @@ template <class OptionsT> class ilist_node_impl : OptionsT::node_base_type {
friend class ilist_iterator<OptionsT, true, true>;
protected:
ilist_node_impl() = default;
using self_iterator = ilist_iterator<OptionsT, false, false>;
using const_self_iterator = ilist_iterator<OptionsT, false, true>;
using reverse_self_iterator = ilist_iterator<OptionsT, true, false>;
using const_reverse_self_iterator = ilist_iterator<OptionsT, true, true>;
typedef ilist_iterator<OptionsT, false, false> self_iterator;
typedef ilist_iterator<OptionsT, false, true> const_self_iterator;
typedef ilist_iterator<OptionsT, true, false> reverse_self_iterator;
typedef ilist_iterator<OptionsT, true, true> const_reverse_self_iterator;
ilist_node_impl() = default;
private:
ilist_node_impl *getPrev() {
return static_cast<ilist_node_impl *>(node_base_type::getPrev());
}
ilist_node_impl *getNext() {
return static_cast<ilist_node_impl *>(node_base_type::getNext());
}
@ -70,6 +70,7 @@ template <class OptionsT> class ilist_node_impl : OptionsT::node_base_type {
const ilist_node_impl *getPrev() const {
return static_cast<ilist_node_impl *>(node_base_type::getPrev());
}
const ilist_node_impl *getNext() const {
return static_cast<ilist_node_impl *>(node_base_type::getNext());
}
@ -80,9 +81,11 @@ template <class OptionsT> class ilist_node_impl : OptionsT::node_base_type {
public:
self_iterator getIterator() { return self_iterator(*this); }
const_self_iterator getIterator() const { return const_self_iterator(*this); }
reverse_self_iterator getReverseIterator() {
return reverse_self_iterator(*this);
}
const_reverse_self_iterator getReverseIterator() const {
return const_reverse_self_iterator(*this);
}
@ -151,6 +154,7 @@ class ilist_node
};
namespace ilist_detail {
/// An access class for ilist_node private API.
///
/// This gives access to the private parts of ilist nodes. Nodes for an ilist
@ -163,15 +167,18 @@ struct NodeAccess {
static ilist_node_impl<OptionsT> *getNodePtr(typename OptionsT::pointer N) {
return N;
}
template <class OptionsT>
static const ilist_node_impl<OptionsT> *
getNodePtr(typename OptionsT::const_pointer N) {
return N;
}
template <class OptionsT>
static typename OptionsT::pointer getValuePtr(ilist_node_impl<OptionsT> *N) {
return static_cast<typename OptionsT::pointer>(N);
}
template <class OptionsT>
static typename OptionsT::const_pointer
getValuePtr(const ilist_node_impl<OptionsT> *N) {
@ -182,15 +189,18 @@ struct NodeAccess {
static ilist_node_impl<OptionsT> *getPrev(ilist_node_impl<OptionsT> &N) {
return N.getPrev();
}
template <class OptionsT>
static ilist_node_impl<OptionsT> *getNext(ilist_node_impl<OptionsT> &N) {
return N.getNext();
}
template <class OptionsT>
static const ilist_node_impl<OptionsT> *
getPrev(const ilist_node_impl<OptionsT> &N) {
return N.getPrev();
}
template <class OptionsT>
static const ilist_node_impl<OptionsT> *
getNext(const ilist_node_impl<OptionsT> &N) {
@ -200,23 +210,27 @@ struct NodeAccess {
template <class OptionsT> struct SpecificNodeAccess : NodeAccess {
protected:
typedef typename OptionsT::pointer pointer;
typedef typename OptionsT::const_pointer const_pointer;
typedef ilist_node_impl<OptionsT> node_type;
using pointer = typename OptionsT::pointer;
using const_pointer = typename OptionsT::const_pointer;
using node_type = ilist_node_impl<OptionsT>;
static node_type *getNodePtr(pointer N) {
return NodeAccess::getNodePtr<OptionsT>(N);
}
static const node_type *getNodePtr(const_pointer N) {
return NodeAccess::getNodePtr<OptionsT>(N);
}
static pointer getValuePtr(node_type *N) {
return NodeAccess::getValuePtr<OptionsT>(N);
}
static const_pointer getValuePtr(const node_type *N) {
return NodeAccess::getValuePtr<OptionsT>(N);
}
};
} // end namespace ilist_detail
template <class OptionsT>
@ -265,6 +279,7 @@ class ilist_node_with_parent : public ilist_node<NodeTy, Options...> {
getNodeParent()->*(ParentTy::getSublistAccess((NodeTy *)nullptr));
return List.getPrevNode(*static_cast<NodeTy *>(this));
}
/// \brief Get the previous node, or \c nullptr for the list head.
const NodeTy *getPrevNode() const {
return const_cast<ilist_node_with_parent *>(this)->getPrevNode();
@ -278,6 +293,7 @@ class ilist_node_with_parent : public ilist_node<NodeTy, Options...> {
getNodeParent()->*(ParentTy::getSublistAccess((NodeTy *)nullptr));
return List.getNextNode(*static_cast<NodeTy *>(this));
}
/// \brief Get the next node, or \c nullptr for the list tail.
const NodeTy *getNextNode() const {
return const_cast<ilist_node_with_parent *>(this)->getNextNode();
@ -285,6 +301,6 @@ class ilist_node_with_parent : public ilist_node<NodeTy, Options...> {
/// @}
};
} // End llvm namespace
} // end namespace llvm
#endif
#endif // LLVM_ADT_ILIST_NODE_H

View File

@ -11,9 +11,11 @@
#define LLVM_ADT_ITERATOR_H
#include "llvm/ADT/iterator_range.h"
#include <algorithm>
#include <cstddef>
#include <iterator>
#include <type_traits>
#include <utility>
namespace llvm {
@ -206,7 +208,7 @@ template <
class iterator_adaptor_base
: public iterator_facade_base<DerivedT, IteratorCategoryT, T,
DifferenceTypeT, PointerT, ReferenceT> {
typedef typename iterator_adaptor_base::iterator_facade_base BaseT;
using BaseT = typename iterator_adaptor_base::iterator_facade_base;
protected:
WrappedIteratorT I;
@ -221,7 +223,7 @@ class iterator_adaptor_base
const WrappedIteratorT &wrapped() const { return I; }
public:
typedef DifferenceTypeT difference_type;
using difference_type = DifferenceTypeT;
DerivedT &operator+=(difference_type n) {
static_assert(
@ -279,7 +281,7 @@ class iterator_adaptor_base
/// which is implemented with some iterator over T*s:
///
/// \code
/// typedef pointee_iterator<SmallVectorImpl<T *>::iterator> iterator;
/// using iterator = pointee_iterator<SmallVectorImpl<T *>::iterator>;
/// \endcode
template <typename WrappedIteratorT,
typename T = typename std::remove_reference<

View File

@ -13,9 +13,14 @@
#include "llvm/ADT/ilist_base.h"
#include "llvm/ADT/ilist_iterator.h"
#include "llvm/ADT/ilist_node.h"
#include "llvm/ADT/ilist_node_options.h"
#include "llvm/Support/Compiler.h"
#include <algorithm>
#include <cassert>
#include <cstddef>
#include <functional>
#include <iterator>
#include <utility>
namespace llvm {
@ -77,23 +82,23 @@ class simple_ilist
typename ilist_detail::compute_node_options<T, Options...>::type> {
static_assert(ilist_detail::check_options<Options...>::value,
"Unrecognized node option!");
typedef
typename ilist_detail::compute_node_options<T, Options...>::type OptionsT;
typedef typename OptionsT::list_base_type list_base_type;
using OptionsT =
typename ilist_detail::compute_node_options<T, Options...>::type;
using list_base_type = typename OptionsT::list_base_type;
ilist_sentinel<OptionsT> Sentinel;
public:
typedef typename OptionsT::value_type value_type;
typedef typename OptionsT::pointer pointer;
typedef typename OptionsT::reference reference;
typedef typename OptionsT::const_pointer const_pointer;
typedef typename OptionsT::const_reference const_reference;
typedef ilist_iterator<OptionsT, false, false> iterator;
typedef ilist_iterator<OptionsT, false, true> const_iterator;
typedef ilist_iterator<OptionsT, true, false> reverse_iterator;
typedef ilist_iterator<OptionsT, true, true> const_reverse_iterator;
typedef size_t size_type;
typedef ptrdiff_t difference_type;
using value_type = typename OptionsT::value_type;
using pointer = typename OptionsT::pointer;
using reference = typename OptionsT::reference;
using const_pointer = typename OptionsT::const_pointer;
using const_reference = typename OptionsT::const_reference;
using iterator = ilist_iterator<OptionsT, false, false>;
using const_iterator = ilist_iterator<OptionsT, false, true>;
using reverse_iterator = ilist_iterator<OptionsT, true, false>;
using const_reverse_iterator = ilist_iterator<OptionsT, true, true>;
using size_type = size_t;
using difference_type = ptrdiff_t;
simple_ilist() = default;
~simple_ilist() = default;

View File

@ -147,7 +147,6 @@ class MemoryAccess
MemoryAccess(const MemoryAccess &) = delete;
MemoryAccess &operator=(const MemoryAccess &) = delete;
void *operator new(size_t, unsigned) = delete;
void *operator new(size_t) = delete;
BasicBlock *getBlock() const { return Block; }
@ -232,7 +231,6 @@ inline raw_ostream &operator<<(raw_ostream &OS, const MemoryAccess &MA) {
/// MemoryDef instead.
class MemoryUseOrDef : public MemoryAccess {
public:
void *operator new(size_t, unsigned) = delete;
void *operator new(size_t) = delete;
DECLARE_TRANSPARENT_OPERAND_ACCESSORS(MemoryAccess);
@ -298,7 +296,6 @@ class MemoryUse final : public MemoryUseOrDef {
// allocate space for exactly one operand
void *operator new(size_t s) { return User::operator new(s, 1); }
void *operator new(size_t, unsigned) = delete;
static inline bool classof(const Value *MA) {
return MA->getValueID() == MemoryUseVal;
@ -355,7 +352,6 @@ class MemoryDef final : public MemoryUseOrDef {
// allocate space for exactly one operand
void *operator new(size_t s) { return User::operator new(s, 1); }
void *operator new(size_t, unsigned) = delete;
static inline bool classof(const Value *MA) {
return MA->getValueID() == MemoryDefVal;
@ -438,8 +434,6 @@ class MemoryPhi final : public MemoryAccess {
allocHungoffUses(ReservedSpace);
}
void *operator new(size_t, unsigned) = delete;
// Block iterator interface. This provides access to the list of incoming
// basic blocks, which parallels the list of incoming values.
typedef BasicBlock **block_iterator;

View File

@ -1214,26 +1214,31 @@ class ScalarEvolution {
SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap,
unsigned Depth = 0);
const SCEV *getAddExpr(const SCEV *LHS, const SCEV *RHS,
SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap) {
SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap,
unsigned Depth = 0) {
SmallVector<const SCEV *, 2> Ops = {LHS, RHS};
return getAddExpr(Ops, Flags);
return getAddExpr(Ops, Flags, Depth);
}
const SCEV *getAddExpr(const SCEV *Op0, const SCEV *Op1, const SCEV *Op2,
SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap) {
SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap,
unsigned Depth = 0) {
SmallVector<const SCEV *, 3> Ops = {Op0, Op1, Op2};
return getAddExpr(Ops, Flags);
return getAddExpr(Ops, Flags, Depth);
}
const SCEV *getMulExpr(SmallVectorImpl<const SCEV *> &Ops,
SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap);
SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap,
unsigned Depth = 0);
const SCEV *getMulExpr(const SCEV *LHS, const SCEV *RHS,
SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap) {
SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap,
unsigned Depth = 0) {
SmallVector<const SCEV *, 2> Ops = {LHS, RHS};
return getMulExpr(Ops, Flags);
return getMulExpr(Ops, Flags, Depth);
}
const SCEV *getMulExpr(const SCEV *Op0, const SCEV *Op1, const SCEV *Op2,
SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap) {
SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap,
unsigned Depth = 0) {
SmallVector<const SCEV *, 3> Ops = {Op0, Op1, Op2};
return getMulExpr(Ops, Flags);
return getMulExpr(Ops, Flags, Depth);
}
const SCEV *getUDivExpr(const SCEV *LHS, const SCEV *RHS);
const SCEV *getUDivExactExpr(const SCEV *LHS, const SCEV *RHS);
@ -1287,7 +1292,8 @@ class ScalarEvolution {
/// Return LHS-RHS. Minus is represented in SCEV as A+B*-1.
const SCEV *getMinusSCEV(const SCEV *LHS, const SCEV *RHS,
SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap);
SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap,
unsigned Depth = 0);
/// Return a SCEV corresponding to a conversion of the input value to the
/// specified type. If the type must be extended, it is zero extended.
@ -1693,10 +1699,14 @@ class ScalarEvolution {
bool doesIVOverflowOnGT(const SCEV *RHS, const SCEV *Stride, bool IsSigned,
bool NoWrap);
/// Get add expr already created or create a new one
/// Get add expr already created or create a new one.
const SCEV *getOrCreateAddExpr(SmallVectorImpl<const SCEV *> &Ops,
SCEV::NoWrapFlags Flags);
/// Get mul expr already created or create a new one.
const SCEV *getOrCreateMulExpr(SmallVectorImpl<const SCEV *> &Ops,
SCEV::NoWrapFlags Flags);
private:
FoldingSet<SCEV> UniqueSCEVs;
FoldingSet<SCEVPredicate> UniquePreds;

View File

@ -235,6 +235,11 @@ class TargetTransformInfo {
/// starting with the sources of divergence.
bool isSourceOfDivergence(const Value *V) const;
// \brief Returns true for the target specific
// set of operations which produce uniform result
// even taking non-unform arguments
bool isAlwaysUniform(const Value *V) const;
/// Returns the address space ID for a target's 'flat' address space. Note
/// this is not necessarily the same as addrspace(0), which LLVM sometimes
/// refers to as the generic address space. The flat address space is a
@ -821,6 +826,7 @@ class TargetTransformInfo::Concept {
virtual int getUserCost(const User *U) = 0;
virtual bool hasBranchDivergence() = 0;
virtual bool isSourceOfDivergence(const Value *V) = 0;
virtual bool isAlwaysUniform(const Value *V) = 0;
virtual unsigned getFlatAddressSpace() = 0;
virtual bool isLoweredToCall(const Function *F) = 0;
virtual void getUnrollingPreferences(Loop *L, UnrollingPreferences &UP) = 0;
@ -873,7 +879,7 @@ class TargetTransformInfo::Concept {
virtual int getIntImmCost(Intrinsic::ID IID, unsigned Idx, const APInt &Imm,
Type *Ty) = 0;
virtual unsigned getNumberOfRegisters(bool Vector) = 0;
virtual unsigned getRegisterBitWidth(bool Vector) = 0;
virtual unsigned getRegisterBitWidth(bool Vector) const = 0;
virtual unsigned getMinVectorRegisterBitWidth() = 0;
virtual bool shouldConsiderAddressTypePromotion(
const Instruction &I, bool &AllowPromotionWithoutCommonHeader) = 0;
@ -998,6 +1004,10 @@ class TargetTransformInfo::Model final : public TargetTransformInfo::Concept {
return Impl.isSourceOfDivergence(V);
}
bool isAlwaysUniform(const Value *V) override {
return Impl.isAlwaysUniform(V);
}
unsigned getFlatAddressSpace() override {
return Impl.getFlatAddressSpace();
}
@ -1119,7 +1129,7 @@ class TargetTransformInfo::Model final : public TargetTransformInfo::Concept {
unsigned getNumberOfRegisters(bool Vector) override {
return Impl.getNumberOfRegisters(Vector);
}
unsigned getRegisterBitWidth(bool Vector) override {
unsigned getRegisterBitWidth(bool Vector) const override {
return Impl.getRegisterBitWidth(Vector);
}
unsigned getMinVectorRegisterBitWidth() override {

View File

@ -177,6 +177,8 @@ class TargetTransformInfoImplBase {
bool isSourceOfDivergence(const Value *V) { return false; }
bool isAlwaysUniform(const Value *V) { return false; }
unsigned getFlatAddressSpace () {
return -1;
}
@ -320,7 +322,7 @@ class TargetTransformInfoImplBase {
unsigned getNumberOfRegisters(bool Vector) { return 8; }
unsigned getRegisterBitWidth(bool Vector) { return 32; }
unsigned getRegisterBitWidth(bool Vector) const { return 32; }
unsigned getMinVectorRegisterBitWidth() { return 128; }

View File

@ -20,6 +20,13 @@
namespace llvm {
/// The type of CFI jumptable needed for a function.
enum CfiFunctionLinkage {
CFL_Definition = 0,
CFL_Declaration = 1,
CFL_WeakDeclaration = 2
};
/// A call site that could be devirtualized.
struct DevirtCallSite {
/// The offset from the address point to the virtual function.

View File

@ -249,8 +249,8 @@ template <typename T> class ArrayRef;
};
/// Returns true if the value \p V is a pointer into a ContantDataArray.
/// If successfull \p Index will point to a ConstantDataArray info object
/// with an apropriate offset.
/// If successful \p Index will point to a ConstantDataArray info object
/// with an appropriate offset.
bool getConstantDataArrayInfo(const Value *V, ConstantDataArraySlice &Slice,
unsigned ElementSize, uint64_t Offset = 0);

View File

@ -1,4 +1,4 @@
//===-- llvm/BinaryFormat/ELF.h - ELF constants and structures --*- C++ -*-===//
//===- llvm/BinaryFormat/ELF.h - ELF constants and structures ---*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
@ -20,27 +20,25 @@
#ifndef LLVM_BINARYFORMAT_ELF_H
#define LLVM_BINARYFORMAT_ELF_H
#include "llvm/Support/Compiler.h"
#include "llvm/Support/DataTypes.h"
#include <cstdint>
#include <cstring>
namespace llvm {
namespace ELF {
typedef uint32_t Elf32_Addr; // Program address
typedef uint32_t Elf32_Off; // File offset
typedef uint16_t Elf32_Half;
typedef uint32_t Elf32_Word;
typedef int32_t Elf32_Sword;
using Elf32_Addr = uint32_t; // Program address
using Elf32_Off = uint32_t; // File offset
using Elf32_Half = uint16_t;
using Elf32_Word = uint32_t;
using Elf32_Sword = int32_t;
typedef uint64_t Elf64_Addr;
typedef uint64_t Elf64_Off;
typedef uint16_t Elf64_Half;
typedef uint32_t Elf64_Word;
typedef int32_t Elf64_Sword;
typedef uint64_t Elf64_Xword;
typedef int64_t Elf64_Sxword;
using Elf64_Addr = uint64_t;
using Elf64_Off = uint64_t;
using Elf64_Half = uint16_t;
using Elf64_Word = uint32_t;
using Elf64_Sword = int32_t;
using Elf64_Xword = uint64_t;
using Elf64_Sxword = int64_t;
// Object file magic string.
static const char ElfMagic[] = {0x7f, 'E', 'L', 'F', '\0'};
@ -75,9 +73,11 @@ struct Elf32_Ehdr {
Elf32_Half e_shentsize; // Size of an entry in the section header table
Elf32_Half e_shnum; // Number of entries in the section header table
Elf32_Half e_shstrndx; // Sect hdr table index of sect name string table
bool checkMagic() const {
return (memcmp(e_ident, ElfMagic, strlen(ElfMagic))) == 0;
}
unsigned char getFileClass() const { return e_ident[EI_CLASS]; }
unsigned char getDataEncoding() const { return e_ident[EI_DATA]; }
};
@ -99,9 +99,11 @@ struct Elf64_Ehdr {
Elf64_Half e_shentsize;
Elf64_Half e_shnum;
Elf64_Half e_shstrndx;
bool checkMagic() const {
return (memcmp(e_ident, ElfMagic, strlen(ElfMagic))) == 0;
}
unsigned char getFileClass() const { return e_ident[EI_CLASS]; }
unsigned char getDataEncoding() const { return e_ident[EI_DATA]; }
};
@ -683,6 +685,7 @@ enum : unsigned {
SHT_GROUP = 17, // Section group.
SHT_SYMTAB_SHNDX = 18, // Indices for SHN_XINDEX entries.
SHT_LOOS = 0x60000000, // Lowest operating system-specific type.
SHT_LLVM_ODRTAB = 0x6fff4c00, // LLVM ODR table.
SHT_GNU_ATTRIBUTES = 0x6ffffff5, // Object attributes.
SHT_GNU_HASH = 0x6ffffff6, // GNU-style hash table.
SHT_GNU_verdef = 0x6ffffffd, // GNU version definitions.
@ -1356,7 +1359,6 @@ enum {
};
} // end namespace ELF
} // end namespace llvm
#endif
#endif // LLVM_BINARYFORMAT_ELF_H

View File

@ -42,6 +42,12 @@ namespace llvm {
struct BitcodeFileContents;
/// Basic information extracted from a bitcode module to be used for LTO.
struct BitcodeLTOInfo {
bool IsThinLTO;
bool HasSummary;
};
/// Represents a module in a bitcode file.
class BitcodeModule {
// This covers the identification (if present) and module blocks.
@ -90,15 +96,17 @@ namespace llvm {
/// Read the entire bitcode module and return it.
Expected<std::unique_ptr<Module>> parseModule(LLVMContext &Context);
/// Check if the given bitcode buffer contains a summary block.
Expected<bool> hasSummary();
/// Returns information about the module to be used for LTO: whether to
/// compile with ThinLTO, and whether it has a summary.
Expected<BitcodeLTOInfo> getLTOInfo();
/// Parse the specified bitcode buffer, returning the module summary index.
Expected<std::unique_ptr<ModuleSummaryIndex>> getSummary();
/// Parse the specified bitcode buffer and merge its module summary index
/// into CombinedIndex.
Error readSummary(ModuleSummaryIndex &CombinedIndex, unsigned ModuleId);
Error readSummary(ModuleSummaryIndex &CombinedIndex, StringRef ModulePath,
uint64_t ModuleId);
};
struct BitcodeFileContents {
@ -147,8 +155,8 @@ namespace llvm {
Expected<std::unique_ptr<Module>> parseBitcodeFile(MemoryBufferRef Buffer,
LLVMContext &Context);
/// Check if the given bitcode buffer contains a summary block.
Expected<bool> hasGlobalValueSummary(MemoryBufferRef Buffer);
/// Returns LTO information for the specified bitcode file.
Expected<BitcodeLTOInfo> getBitcodeLTOInfo(MemoryBufferRef Buffer);
/// Parse the specified bitcode buffer, returning the module summary index.
Expected<std::unique_ptr<ModuleSummaryIndex>>
@ -157,7 +165,7 @@ namespace llvm {
/// Parse the specified bitcode buffer and merge the index into CombinedIndex.
Error readModuleSummaryIndex(MemoryBufferRef Buffer,
ModuleSummaryIndex &CombinedIndex,
unsigned ModuleId);
uint64_t ModuleId);
/// Parse the module summary index out of an IR file and return the module
/// summary index object if found, or an empty summary if not. If Path refers

View File

@ -67,6 +67,10 @@ namespace llvm {
void writeModule(const Module *M, bool ShouldPreserveUseListOrder = false,
const ModuleSummaryIndex *Index = nullptr,
bool GenerateHash = false, ModuleHash *ModHash = nullptr);
void writeIndex(
const ModuleSummaryIndex *Index,
const std::map<std::string, GVSummaryMapTy> *ModuleToSummariesForIndex);
};
/// \brief Write the specified module to the specified raw output stream.

View File

@ -240,6 +240,14 @@ enum GlobalValueSummarySymtabCodes {
// summaries, but it can also appear in per-module summaries for PGO data.
// [valueid, guid]
FS_VALUE_GUID = 16,
// The list of local functions with CFI jump tables. Function names are
// strings in strtab.
// [n * name]
FS_CFI_FUNCTION_DEFS = 17,
// The list of external functions with CFI jump tables. Function names are
// strings in strtab.
// [n * name]
FS_CFI_FUNCTION_DECLS = 18,
};
enum MetadataCodes {

View File

@ -93,6 +93,8 @@ class BasicTTIImplBase : public TargetTransformInfoImplCRTPBase<T> {
bool isSourceOfDivergence(const Value *V) { return false; }
bool isAlwaysUniform(const Value *V) { return false; }
unsigned getFlatAddressSpace() {
// Return an invalid address space.
return -1;
@ -346,7 +348,7 @@ class BasicTTIImplBase : public TargetTransformInfoImplCRTPBase<T> {
unsigned getNumberOfRegisters(bool Vector) { return Vector ? 0 : 1; }
unsigned getRegisterBitWidth(bool Vector) { return 32; }
unsigned getRegisterBitWidth(bool Vector) const { return 32; }
/// Estimate the overhead of scalarizing an instruction. Insert and Extract
/// are set if the result needs to be inserted and/or extracted from vectors.

View File

@ -82,6 +82,11 @@ class FunctionLoweringInfo {
DenseMap<std::pair<const MachineBasicBlock *, const Value *>, unsigned>
SwiftErrorVRegUpwardsUse;
/// A map from instructions that define/use a swifterror value to the virtual
/// register that represents that def/use.
llvm::DenseMap<PointerIntPair<const Instruction *, 1, bool>, unsigned>
SwiftErrorVRegDefUses;
/// The swifterror argument of the current function.
const Value *SwiftErrorArg;
@ -101,6 +106,13 @@ class FunctionLoweringInfo {
void setCurrentSwiftErrorVReg(const MachineBasicBlock *MBB, const Value *,
unsigned);
/// Get or create the swifterror value virtual register for a def of a
/// swifterror by an instruction.
std::pair<unsigned, bool> getOrCreateSwiftErrorVRegDefAt(const Instruction *);
std::pair<unsigned, bool>
getOrCreateSwiftErrorVRegUseAt(const Instruction *, const MachineBasicBlock *,
const Value *);
/// ValueMap - Since we emit code for the function a basic block at a time,
/// we must remember which virtual registers hold the values for
/// cross-basic-block values.

View File

@ -21,9 +21,11 @@
#ifndef LLVM_CODEGEN_GLOBALISEL_MACHINELEGALIZEHELPER_H
#define LLVM_CODEGEN_GLOBALISEL_MACHINELEGALIZEHELPER_H
#include "llvm/CodeGen/GlobalISel/CallLowering.h"
#include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
#include "llvm/CodeGen/LowLevelType.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
#include "llvm/CodeGen/RuntimeLibcalls.h"
namespace llvm {
// Forward declarations.
@ -99,6 +101,12 @@ class LegalizerHelper {
const LegalizerInfo &LI;
};
/// Helper function that replaces \p MI with a libcall.
LegalizerHelper::LegalizeResult
replaceWithLibcall(MachineInstr &MI, MachineIRBuilder &MIRBuilder,
RTLIB::Libcall Libcall, const CallLowering::ArgInfo &Result,
ArrayRef<CallLowering::ArgInfo> Args);
} // End namespace llvm.
#endif

View File

@ -40,8 +40,8 @@ class MachineIRBuilder {
MachineFunction *MF;
/// Information used to access the description of the opcodes.
const TargetInstrInfo *TII;
/// Information used to verify types are consistent.
const MachineRegisterInfo *MRI;
/// Information used to verify types are consistent and to create virtual registers.
MachineRegisterInfo *MRI;
/// Debug location to be set to any instruction we create.
DebugLoc DL;
@ -229,6 +229,26 @@ class MachineIRBuilder {
MachineInstrBuilder buildGEP(unsigned Res, unsigned Op0,
unsigned Op1);
/// Materialize and insert \p Res<def> = G_GEP \p Op0, (G_CONSTANT \p Value)
///
/// G_GEP adds \p Value bytes to the pointer specified by \p Op0,
/// storing the resulting pointer in \p Res. If \p Value is zero then no
/// G_GEP or G_CONSTANT will be created and \pre Op0 will be assigned to
/// \p Res.
///
/// \pre setBasicBlock or setMI must have been called.
/// \pre \p Op0 must be a generic virtual register with pointer type.
/// \pre \p ValueTy must be a scalar type.
/// \pre \p Res must be 0. This is to detect confusion between
/// materializeGEP() and buildGEP().
/// \post \p Res will either be a new generic virtual register of the same
/// type as \p Op0 or \p Op0 itself.
///
/// \return a MachineInstrBuilder for the newly created instruction.
Optional<MachineInstrBuilder> materializeGEP(unsigned &Res, unsigned Op0,
const LLT &ValueTy,
uint64_t Value);
/// Build and insert \p Res<def> = G_PTR_MASK \p Op0, \p NumBits
///
/// G_PTR_MASK clears the low bits of a pointer operand without destroying its

View File

@ -333,12 +333,12 @@ namespace RTLIB {
MEMSET,
MEMMOVE,
// ELEMENT-WISE ATOMIC MEMORY
MEMCPY_ELEMENT_ATOMIC_1,
MEMCPY_ELEMENT_ATOMIC_2,
MEMCPY_ELEMENT_ATOMIC_4,
MEMCPY_ELEMENT_ATOMIC_8,
MEMCPY_ELEMENT_ATOMIC_16,
// ELEMENT-WISE UNORDERED-ATOMIC MEMORY of different element sizes
MEMCPY_ELEMENT_UNORDERED_ATOMIC_1,
MEMCPY_ELEMENT_UNORDERED_ATOMIC_2,
MEMCPY_ELEMENT_UNORDERED_ATOMIC_4,
MEMCPY_ELEMENT_UNORDERED_ATOMIC_8,
MEMCPY_ELEMENT_UNORDERED_ATOMIC_16,
// EXCEPTION HANDLING
UNWIND_RESUME,
@ -511,9 +511,10 @@ namespace RTLIB {
/// UNKNOWN_LIBCALL if there is none.
Libcall getSYNC(unsigned Opc, MVT VT);
/// getMEMCPY_ELEMENT_ATOMIC - Return MEMCPY_ELEMENT_ATOMIC_* value for the
/// given element size or UNKNOW_LIBCALL if there is none.
Libcall getMEMCPY_ELEMENT_ATOMIC(uint64_t ElementSize);
/// getMEMCPY_ELEMENT_UNORDERED_ATOMIC - Return
/// MEMCPY_ELEMENT_UNORDERED_ATOMIC_* value for the given element size or
/// UNKNOW_LIBCALL if there is none.
Libcall getMEMCPY_ELEMENT_UNORDERED_ATOMIC(uint64_t ElementSize);
}
}

View File

@ -1217,6 +1217,12 @@ class SelectionDAG {
void ReplaceAllUsesOfValuesWith(const SDValue *From, const SDValue *To,
unsigned Num);
/// If an existing load has uses of its chain, create a token factor node with
/// that chain and the new memory node's chain and update users of the old
/// chain to the token factor. This ensures that the new memory node will have
/// the same relative memory dependency position as the old load.
void makeEquivalentMemoryOrdering(LoadSDNode *Old, SDValue New);
/// Topological-sort the AllNodes list and a
/// assign a unique node id for each node in the DAG based on their
/// topological order. Returns the number of nodes.

View File

@ -42,9 +42,8 @@ class TargetLoweringObjectFileELF : public TargetLoweringObjectFile {
~TargetLoweringObjectFileELF() override = default;
/// Emit Obj-C garbage collection and linker options.
void emitModuleFlags(MCStreamer &Streamer,
ArrayRef<Module::ModuleFlagEntry> ModuleFlags,
const TargetMachine &TM) const override;
void emitModuleMetadata(MCStreamer &Streamer, Module &M,
const TargetMachine &TM) const override;
void emitPersonalityValue(MCStreamer &Streamer, const DataLayout &TM,
const MCSymbol *Sym) const override;
@ -99,9 +98,8 @@ class TargetLoweringObjectFileMachO : public TargetLoweringObjectFile {
void Initialize(MCContext &Ctx, const TargetMachine &TM) override;
/// Emit the module flags that specify the garbage collection information.
void emitModuleFlags(MCStreamer &Streamer,
ArrayRef<Module::ModuleFlagEntry> ModuleFlags,
const TargetMachine &TM) const override;
void emitModuleMetadata(MCStreamer &Streamer, Module &M,
const TargetMachine &TM) const override;
MCSection *SelectSectionForGlobal(const GlobalObject *GO, SectionKind Kind,
const TargetMachine &TM) const override;
@ -155,9 +153,8 @@ class TargetLoweringObjectFileCOFF : public TargetLoweringObjectFile {
const TargetMachine &TM) const override;
/// Emit Obj-C garbage collection and linker options.
void emitModuleFlags(MCStreamer &Streamer,
ArrayRef<Module::ModuleFlagEntry> ModuleFlags,
const TargetMachine &TM) const override;
void emitModuleMetadata(MCStreamer &Streamer, Module &M,
const TargetMachine &TM) const override;
MCSection *getStaticCtorSection(unsigned Priority,
const MCSymbol *KeySym) const override;

View File

@ -418,6 +418,8 @@ CV_DEFINE_ENUM_CLASS_FLAGS_OPERATORS(ProcSymFlags)
/// Corresponds to COMPILESYM2::Flags bitfield.
enum class CompileSym2Flags : uint32_t {
None = 0,
SourceLanguageMask = 0xFF,
EC = 1 << 8,
NoDbgInfo = 1 << 9,
LTCG = 1 << 10,
@ -432,6 +434,8 @@ CV_DEFINE_ENUM_CLASS_FLAGS_OPERATORS(CompileSym2Flags)
/// Corresponds to COMPILESYM3::Flags bitfield.
enum class CompileSym3Flags : uint32_t {
None = 0,
SourceLanguageMask = 0xFF,
EC = 1 << 8,
NoDbgInfo = 1 << 9,
LTCG = 1 << 10,
@ -448,6 +452,7 @@ enum class CompileSym3Flags : uint32_t {
CV_DEFINE_ENUM_CLASS_FLAGS_OPERATORS(CompileSym3Flags)
enum class ExportFlags : uint16_t {
None = 0,
IsConstant = 1 << 0,
IsData = 1 << 1,
IsPrivate = 1 << 2,

View File

@ -49,6 +49,7 @@ class DebugFrameDataSubsection final : public DebugSubsection {
Error commit(BinaryStreamWriter &Writer) const override;
void addFrameData(const FrameData &Frame);
void setFrames(ArrayRef<FrameData> Frames);
private:
std::vector<FrameData> Frames;

View File

@ -19,7 +19,7 @@
namespace llvm {
namespace codeview {
class DebugInlineeLinesSubsectionsRef;
class DebugInlineeLinesSubsectionRef;
class DebugChecksumsSubsection;
enum class InlineeLinesSignature : uint32_t {

View File

@ -49,13 +49,13 @@ class DebugSubsectionRecord {
class DebugSubsectionRecordBuilder {
public:
DebugSubsectionRecordBuilder(std::unique_ptr<DebugSubsection> Subsection,
DebugSubsectionRecordBuilder(std::shared_ptr<DebugSubsection> Subsection,
CodeViewContainer Container);
uint32_t calculateSerializedLength();
Error commit(BinaryStreamWriter &Writer) const;
private:
std::unique_ptr<DebugSubsection> Subsection;
std::shared_ptr<DebugSubsection> Subsection;
CodeViewContainer Container;
};
@ -64,6 +64,9 @@ class DebugSubsectionRecordBuilder {
template <> struct VarStreamArrayExtractor<codeview::DebugSubsectionRecord> {
Error operator()(BinaryStreamRef Stream, uint32_t &Length,
codeview::DebugSubsectionRecord &Info) {
// FIXME: We need to pass the container type through to this function. In
// practice this isn't super important since the subsection header describes
// its length and we can just skip it. It's more important when writing.
if (auto EC = codeview::DebugSubsectionRecord::initialize(
Stream, Info, codeview::CodeViewContainer::Pdb))
return EC;

View File

@ -12,6 +12,7 @@
#include "llvm/DebugInfo/CodeView/CodeView.h"
#include "llvm/DebugInfo/CodeView/DebugSubsectionRecord.h"
#include "llvm/DebugInfo/CodeView/StringsAndChecksums.h"
#include "llvm/Support/Error.h"
#include <cstdint>
@ -30,56 +31,7 @@ class DebugStringTableSubsectionRef;
class DebugSymbolRVASubsectionRef;
class DebugSymbolsSubsectionRef;
class DebugUnknownSubsectionRef;
struct DebugSubsectionState {
public:
// If no subsections are known about initially, we find as much as we can.
DebugSubsectionState();
// If only a string table subsection is given, we find a checksums subsection.
explicit DebugSubsectionState(const DebugStringTableSubsectionRef &Strings);
// If both subsections are given, we don't need to find anything.
DebugSubsectionState(const DebugStringTableSubsectionRef &Strings,
const DebugChecksumsSubsectionRef &Checksums);
template <typename T> void initialize(T &&FragmentRange) {
for (const DebugSubsectionRecord &R : FragmentRange) {
if (Strings && Checksums)
return;
if (R.kind() == DebugSubsectionKind::FileChecksums) {
initializeChecksums(R);
continue;
}
if (R.kind() == DebugSubsectionKind::StringTable && !Strings) {
// While in practice we should never encounter a string table even
// though the string table is already initialized, in theory it's
// possible. PDBs are supposed to have one global string table and
// then this subsection should not appear. Whereas object files are
// supposed to have this subsection appear exactly once. However,
// for testing purposes it's nice to be able to test this subsection
// independently of one format or the other, so for some tests we
// manually construct a PDB that contains this subsection in addition
// to a global string table.
initializeStrings(R);
continue;
}
}
}
const DebugStringTableSubsectionRef &strings() const { return *Strings; }
const DebugChecksumsSubsectionRef &checksums() const { return *Checksums; }
private:
void initializeStrings(const DebugSubsectionRecord &SR);
void initializeChecksums(const DebugSubsectionRecord &FCR);
std::unique_ptr<DebugStringTableSubsectionRef> OwnedStrings;
std::unique_ptr<DebugChecksumsSubsectionRef> OwnedChecksums;
const DebugStringTableSubsectionRef *Strings = nullptr;
const DebugChecksumsSubsectionRef *Checksums = nullptr;
};
class StringsAndChecksumsRef;
class DebugSubsectionVisitor {
public:
@ -89,38 +41,38 @@ class DebugSubsectionVisitor {
return Error::success();
}
virtual Error visitLines(DebugLinesSubsectionRef &Lines,
const DebugSubsectionState &State) = 0;
const StringsAndChecksumsRef &State) = 0;
virtual Error visitFileChecksums(DebugChecksumsSubsectionRef &Checksums,
const DebugSubsectionState &State) = 0;
const StringsAndChecksumsRef &State) = 0;
virtual Error visitInlineeLines(DebugInlineeLinesSubsectionRef &Inlinees,
const DebugSubsectionState &State) = 0;
const StringsAndChecksumsRef &State) = 0;
virtual Error
visitCrossModuleExports(DebugCrossModuleExportsSubsectionRef &CSE,
const DebugSubsectionState &State) = 0;
const StringsAndChecksumsRef &State) = 0;
virtual Error
visitCrossModuleImports(DebugCrossModuleImportsSubsectionRef &CSE,
const DebugSubsectionState &State) = 0;
const StringsAndChecksumsRef &State) = 0;
virtual Error visitStringTable(DebugStringTableSubsectionRef &ST,
const DebugSubsectionState &State) = 0;
const StringsAndChecksumsRef &State) = 0;
virtual Error visitSymbols(DebugSymbolsSubsectionRef &CSE,
const DebugSubsectionState &State) = 0;
const StringsAndChecksumsRef &State) = 0;
virtual Error visitFrameData(DebugFrameDataSubsectionRef &FD,
const DebugSubsectionState &State) = 0;
const StringsAndChecksumsRef &State) = 0;
virtual Error visitCOFFSymbolRVAs(DebugSymbolRVASubsectionRef &RVAs,
const DebugSubsectionState &State) = 0;
const StringsAndChecksumsRef &State) = 0;
};
Error visitDebugSubsection(const DebugSubsectionRecord &R,
DebugSubsectionVisitor &V,
const DebugSubsectionState &State);
const StringsAndChecksumsRef &State);
namespace detail {
template <typename T>
Error visitDebugSubsections(T &&FragmentRange, DebugSubsectionVisitor &V,
DebugSubsectionState &State) {
StringsAndChecksumsRef &State) {
State.initialize(std::forward<T>(FragmentRange));
for (const DebugSubsectionRecord &L : FragmentRange) {
@ -133,7 +85,7 @@ Error visitDebugSubsections(T &&FragmentRange, DebugSubsectionVisitor &V,
template <typename T>
Error visitDebugSubsections(T &&FragmentRange, DebugSubsectionVisitor &V) {
DebugSubsectionState State;
StringsAndChecksumsRef State;
return detail::visitDebugSubsections(std::forward<T>(FragmentRange), V,
State);
}
@ -141,7 +93,7 @@ Error visitDebugSubsections(T &&FragmentRange, DebugSubsectionVisitor &V) {
template <typename T>
Error visitDebugSubsections(T &&FragmentRange, DebugSubsectionVisitor &V,
const DebugStringTableSubsectionRef &Strings) {
DebugSubsectionState State(Strings);
StringsAndChecksumsRef State(Strings);
return detail::visitDebugSubsections(std::forward<T>(FragmentRange), V,
State);
}
@ -150,7 +102,7 @@ template <typename T>
Error visitDebugSubsections(T &&FragmentRange, DebugSubsectionVisitor &V,
const DebugStringTableSubsectionRef &Strings,
const DebugChecksumsSubsectionRef &Checksums) {
DebugSubsectionState State(Strings, Checksums);
StringsAndChecksumsRef State(Strings, Checksums);
return detail::visitDebugSubsections(std::forward<T>(FragmentRange), V,
State);
}

View File

@ -12,7 +12,10 @@
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/DebugInfo/CodeView/TypeIndex.h"
#include "llvm/Support/FormatAdapters.h"
#include "llvm/Support/FormatProviders.h"
#include "llvm/Support/FormatVariadic.h"
namespace llvm {
namespace codeview {
@ -35,6 +38,20 @@ inline detail::GuidAdapter fmt_guid(ArrayRef<uint8_t> Item) {
return detail::GuidAdapter(Item);
}
}
template <> struct format_provider<codeview::TypeIndex> {
public:
static void format(const codeview::TypeIndex &V, llvm::raw_ostream &Stream,
StringRef Style) {
if (V.isNoneType())
Stream << "<no type>";
else {
Stream << formatv("{0:X+4}", V.getIndex());
if (V.isSimple())
Stream << " (" << codeview::TypeIndex::simpleTypeName(V) << ")";
}
}
};
}
#endif

View File

@ -0,0 +1,106 @@
//===- StringsAndChecksums.h ------------------------------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_DEBUGINFO_CODEVIEW_STRINGS_AND_CHECKSUMS_H
#define LLVM_DEBUGINFO_CODEVIEW_STRINGS_AND_CHECKSUMS_H
#include "llvm/DebugInfo/CodeView/CodeView.h"
#include "llvm/DebugInfo/CodeView/DebugSubsectionRecord.h"
#include <memory>
namespace llvm {
namespace codeview {
class DebugSubsectionRecord;
class DebugChecksumsSubsectionRef;
class DebugStringTableSubsectionRef;
class DebugChecksumsSubsection;
class DebugStringTableSubsection;
class StringsAndChecksumsRef {
public:
// If no subsections are known about initially, we find as much as we can.
StringsAndChecksumsRef();
// If only a string table subsection is given, we find a checksums subsection.
explicit StringsAndChecksumsRef(const DebugStringTableSubsectionRef &Strings);
// If both subsections are given, we don't need to find anything.
StringsAndChecksumsRef(const DebugStringTableSubsectionRef &Strings,
const DebugChecksumsSubsectionRef &Checksums);
void setChecksums(const DebugChecksumsSubsectionRef &CS);
template <typename T> void initialize(T &&FragmentRange) {
for (const DebugSubsectionRecord &R : FragmentRange) {
if (Strings && Checksums)
return;
if (R.kind() == DebugSubsectionKind::FileChecksums) {
initializeChecksums(R);
continue;
}
if (R.kind() == DebugSubsectionKind::StringTable && !Strings) {
// While in practice we should never encounter a string table even
// though the string table is already initialized, in theory it's
// possible. PDBs are supposed to have one global string table and
// then this subsection should not appear. Whereas object files are
// supposed to have this subsection appear exactly once. However,
// for testing purposes it's nice to be able to test this subsection
// independently of one format or the other, so for some tests we
// manually construct a PDB that contains this subsection in addition
// to a global string table.
initializeStrings(R);
continue;
}
}
}
const DebugStringTableSubsectionRef &strings() const { return *Strings; }
const DebugChecksumsSubsectionRef &checksums() const { return *Checksums; }
bool hasStrings() const { return Strings != nullptr; }
bool hasChecksums() const { return Checksums != nullptr; }
private:
void initializeStrings(const DebugSubsectionRecord &SR);
void initializeChecksums(const DebugSubsectionRecord &FCR);
std::unique_ptr<DebugStringTableSubsectionRef> OwnedStrings;
std::unique_ptr<DebugChecksumsSubsectionRef> OwnedChecksums;
const DebugStringTableSubsectionRef *Strings = nullptr;
const DebugChecksumsSubsectionRef *Checksums = nullptr;
};
class StringsAndChecksums {
public:
using StringsPtr = std::shared_ptr<DebugStringTableSubsection>;
using ChecksumsPtr = std::shared_ptr<DebugChecksumsSubsection>;
// If no subsections are known about initially, we find as much as we can.
StringsAndChecksums() {}
void setStrings(const StringsPtr &SP) { Strings = SP; }
void setChecksums(const ChecksumsPtr &CP) { Checksums = CP; }
const StringsPtr &strings() const { return Strings; }
const ChecksumsPtr &checksums() const { return Checksums; }
bool hasStrings() const { return Strings != nullptr; }
bool hasChecksums() const { return Checksums != nullptr; }
private:
StringsPtr Strings;
ChecksumsPtr Checksums;
};
} // namespace codeview
} // namespace llvm
#endif

View File

@ -363,7 +363,7 @@ class PublicSym32 : public SymbolRecord {
: SymbolRecord(SymbolRecordKind::PublicSym32),
RecordOffset(RecordOffset) {}
uint32_t Index;
TypeIndex Index;
uint32_t Offset;
uint16_t Segment;
StringRef Name;
@ -379,7 +379,7 @@ class RegisterSym : public SymbolRecord {
: SymbolRecord(SymbolRecordKind::RegisterSym),
RecordOffset(RecordOffset) {}
uint32_t Index;
TypeIndex Index;
RegisterId Register;
StringRef Name;
@ -679,7 +679,7 @@ class FileStaticSym : public SymbolRecord {
: SymbolRecord(SymbolRecordKind::FileStaticSym),
RecordOffset(RecordOffset) {}
uint32_t Index;
TypeIndex Index;
uint32_t ModFilenameOffset;
LocalSymFlags Flags;
StringRef Name;
@ -814,7 +814,7 @@ class FrameCookieSym : public SymbolRecord {
uint32_t CodeOffset;
uint16_t Register;
uint8_t CookieKind;
FrameCookieKind CookieKind;
uint8_t Flags;
uint32_t RecordOffset;
@ -871,7 +871,7 @@ class RegRelativeSym : public SymbolRecord {
uint32_t Offset;
TypeIndex Type;
uint16_t Register;
RegisterId Register;
StringRef Name;
uint32_t RecordOffset;

View File

@ -248,6 +248,8 @@ class TypeIndex {
return A.toArrayIndex() - B.toArrayIndex();
}
static StringRef simpleTypeName(TypeIndex TI);
private:
support::ulittle32_t Index;
};

View File

@ -50,6 +50,10 @@ class DWARFAcceleratorTable {
: AccelSection(AccelSection), StringSection(StringSection), Relocs(Relocs) {}
bool extract();
uint32_t getNumBuckets();
uint32_t getNumHashes();
uint32_t getSizeHdr();
uint32_t getHeaderDataLength();
void dump(raw_ostream &OS) const;
};

View File

@ -20,6 +20,7 @@ struct DWARFAttribute;
class DWARFContext;
class DWARFDie;
class DWARFUnit;
class DWARFAcceleratorTable;
/// A class that verifies DWARF debug information given a DWARF Context.
class DWARFVerifier {
@ -29,8 +30,9 @@ class DWARFVerifier {
/// can verify each reference points to a valid DIE and not an offset that
/// lies between to valid DIEs.
std::map<uint64_t, std::set<uint32_t>> ReferenceToDIEOffsets;
uint32_t NumDebugInfoErrors;
uint32_t NumDebugLineErrors;
uint32_t NumDebugInfoErrors = 0;
uint32_t NumDebugLineErrors = 0;
uint32_t NumAppleNamesErrors = 0;
/// Verifies the attribute's DWARF attribute and its value.
///
@ -38,8 +40,8 @@ class DWARFVerifier {
/// - DW_AT_ranges values is a valid .debug_ranges offset
/// - DW_AT_stmt_list is a valid .debug_line offset
///
/// @param Die The DWARF DIE that owns the attribute value
/// @param AttrValue The DWARF attribute value to check
/// \param Die The DWARF DIE that owns the attribute value
/// \param AttrValue The DWARF attribute value to check
void verifyDebugInfoAttribute(const DWARFDie &Die, DWARFAttribute &AttrValue);
/// Verifies the attribute's DWARF form.
@ -49,8 +51,8 @@ class DWARFVerifier {
/// - All DW_FORM_ref_addr values have valid .debug_info offsets
/// - All DW_FORM_strp values have valid .debug_str offsets
///
/// @param Die The DWARF DIE that owns the attribute value
/// @param AttrValue The DWARF attribute value to check
/// \param Die The DWARF DIE that owns the attribute value
/// \param AttrValue The DWARF attribute value to check
void verifyDebugInfoForm(const DWARFDie &Die, DWARFAttribute &AttrValue);
/// Verifies the all valid references that were found when iterating through
@ -75,13 +77,13 @@ class DWARFVerifier {
public:
DWARFVerifier(raw_ostream &S, DWARFContext &D)
: OS(S), DCtx(D), NumDebugInfoErrors(0), NumDebugLineErrors(0) {}
: OS(S), DCtx(D) {}
/// Verify the information in the .debug_info section.
///
/// Any errors are reported to the stream that was this object was
/// constructed with.
///
/// @return True if the .debug_info verifies successfully, false otherwise.
/// \returns true if the .debug_info verifies successfully, false otherwise.
bool handleDebugInfo();
/// Verify the information in the .debug_line section.
@ -89,8 +91,16 @@ class DWARFVerifier {
/// Any errors are reported to the stream that was this object was
/// constructed with.
///
/// @return True if the .debug_line verifies successfully, false otherwise.
/// \returns true if the .debug_line verifies successfully, false otherwise.
bool handleDebugLine();
/// Verify the information in the .apple_names accelerator table.
///
/// Any errors are reported to the stream that was this object was
/// constructed with.
///
/// \returns true if the .apple_names verifies successfully, false otherwise.
bool handleAppleNames();
};
} // end namespace llvm

View File

@ -50,12 +50,14 @@ class DbiModuleDescriptorBuilder {
void addSymbol(codeview::CVSymbol Symbol);
void
addDebugSubsection(std::unique_ptr<codeview::DebugSubsection> Subsection);
addDebugSubsection(std::shared_ptr<codeview::DebugSubsection> Subsection);
uint16_t getStreamIndex() const;
StringRef getModuleName() const { return ModuleName; }
StringRef getObjFileName() const { return ObjFileName; }
unsigned getModuleIndex() const { return Layout.Mod; }
ArrayRef<std::string> source_files() const {
return makeArrayRef(SourceFiles);
}

View File

@ -12,6 +12,7 @@
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/iterator.h"
#include "llvm/DebugInfo/CodeView/DebugChecksumsSubsection.h"
#include "llvm/DebugInfo/PDB/Native/DbiModuleDescriptor.h"
#include "llvm/Support/BinaryStreamArray.h"
#include "llvm/Support/BinaryStreamRef.h"
@ -21,6 +22,7 @@
#include <vector>
namespace llvm {
namespace codeview {}
namespace pdb {
class DbiModuleList;

View File

@ -49,7 +49,6 @@ class DbiStreamBuilder {
void setPdbDllRbld(uint16_t R);
void setFlags(uint16_t F);
void setMachineType(PDB_Machine M);
void setSectionContribs(ArrayRef<SectionContrib> SecMap);
void setSectionMap(ArrayRef<SecMapEntry> SecMap);
// Add given bytes as a new stream.
@ -65,10 +64,8 @@ class DbiStreamBuilder {
Error commit(const msf::MSFLayout &Layout, WritableBinaryStreamRef MsfBuffer);
// A helper function to create Section Contributions from COFF input
// section headers.
static std::vector<SectionContrib>
createSectionContribs(ArrayRef<llvm::object::coff_section> SecHdrs);
void addSectionContrib(DbiModuleDescriptorBuilder *ModuleDbi,
const llvm::object::coff_section *SecHdr);
// A helper function to create a Section Map from a COFF section header.
static std::vector<SecMapEntry>
@ -112,7 +109,7 @@ class DbiStreamBuilder {
WritableBinaryStreamRef NamesBuffer;
MutableBinaryByteStream FileInfoBuffer;
ArrayRef<SectionContrib> SectionContribs;
std::vector<SectionContrib> SectionContribs;
ArrayRef<SecMapEntry> SectionMap;
llvm::SmallVector<DebugStream, (int)DbgHeaderType::Max> DbgStreams;
};

View File

@ -35,6 +35,7 @@ class InfoStream {
uint32_t getStreamSize() const;
bool containsIdStream() const;
PdbRaw_ImplVer getVersion() const;
uint32_t getSignature() const;
uint32_t getAge() const;

View File

@ -31,6 +31,7 @@ class ModuleDebugStreamRef {
public:
ModuleDebugStreamRef(const DbiModuleDescriptor &Module,
std::unique_ptr<msf::MappedBlockStream> Stream);
ModuleDebugStreamRef(ModuleDebugStreamRef &&Other) = default;
~ModuleDebugStreamRef();
Error reload();
@ -40,6 +41,12 @@ class ModuleDebugStreamRef {
iterator_range<codeview::CVSymbolArray::Iterator>
symbols(bool *HadError) const;
const codeview::CVSymbolArray &getSymbolArray() const {
return SymbolsSubstream;
}
ModuleDebugStreamRef &operator=(ModuleDebugStreamRef &&Other) = default;
llvm::iterator_range<DebugSubsectionIterator> subsections() const;
bool hasDebugSubsections() const;
@ -54,7 +61,7 @@ class ModuleDebugStreamRef {
uint32_t Signature;
std::unique_ptr<msf::MappedBlockStream> Stream;
std::shared_ptr<msf::MappedBlockStream> Stream;
codeview::CVSymbolArray SymbolsSubstream;
BinaryStreamRef C11LinesSubstream;

View File

@ -108,6 +108,8 @@ class PDBFile : public msf::IMSFFile {
bool hasPDBTpiStream() const;
bool hasPDBStringTable();
uint32_t getPointerSize();
private:
Expected<std::unique_ptr<msf::MappedBlockStream>>
safelyCreateIndexedStream(const msf::MSFLayout &Layout,

View File

@ -45,7 +45,7 @@ class PDBStringTable {
FixedStreamArray<support::ulittle32_t> name_ids() const;
codeview::DebugStringTableSubsectionRef getStringTable() const;
const codeview::DebugStringTableSubsectionRef &getStringTable() const;
private:
Error readHeader(BinaryStreamReader &Reader);

View File

@ -41,10 +41,7 @@ class PDBStringTableBuilder {
uint32_t calculateSerializedSize() const;
Error commit(BinaryStreamWriter &Writer) const;
codeview::DebugStringTableSubsection &getStrings() { return Strings; }
const codeview::DebugStringTableSubsection &getStrings() const {
return Strings;
}
void setStrings(const codeview::DebugStringTableSubsection &Strings);
private:
uint32_t calculateHashTableSize() const;

View File

@ -35,6 +35,7 @@ class PublicsStream {
uint32_t getSymHash() const;
uint32_t getAddrMap() const;
uint32_t getNumBuckets() const { return NumBuckets; }
Expected<const codeview::CVSymbolArray &> getSymbolArray() const;
iterator_range<codeview::CVSymbolArray::Iterator>
getSymbols(bool *HadError) const;
FixedStreamArray<support::ulittle32_t> getHashBuckets() const {

View File

@ -98,15 +98,19 @@ enum class DbgHeaderType : uint16_t {
};
enum class OMFSegDescFlags : uint16_t {
None = 0,
Read = 1 << 0, // Segment is readable.
Write = 1 << 1, // Segment is writable.
Execute = 1 << 2, // Segment is executable.
AddressIs32Bit = 1 << 3, // Descriptor describes a 32-bit linear address.
IsSelector = 1 << 8, // Frame represents a selector.
IsAbsoluteAddress = 1 << 9, // Frame represents an absolute address.
IsGroup = 1 << 10 // If set, descriptor represents a group.
IsGroup = 1 << 10, // If set, descriptor represents a group.
LLVM_MARK_AS_BITMASK_ENUM(/* LargestValue = */ IsGroup)
};
LLVM_ENABLE_BITMASK_ENUMS_IN_NAMESPACE();
} // end namespace pdb
} // end namespace llvm

View File

@ -27,6 +27,10 @@ class SymbolStream {
~SymbolStream();
Error reload();
const codeview::CVSymbolArray &getSymbolArray() const {
return SymbolRecords;
}
iterator_range<codeview::CVSymbolArray::Iterator>
getSymbols(bool *HadError) const;

View File

@ -58,6 +58,8 @@ class TpiStreamBuilder {
Error finalizeMsfLayout();
uint32_t getRecordCount() const { return TypeRecords.size(); }
Error commit(const msf::MSFLayout &Layout, WritableBinaryStreamRef Buffer);
uint32_t calculateSerializedLength();

View File

@ -68,11 +68,8 @@ class ConstantData : public Constant {
void *operator new(size_t s) { return User::operator new(s, 0); }
public:
ConstantData() = delete;
ConstantData(const ConstantData &) = delete;
void *operator new(size_t, unsigned) = delete;
/// Methods to support type inquiry through isa, cast, and dyn_cast.
static bool classof(const Value *V) {
return V->getValueID() >= ConstantDataFirstVal &&
@ -691,8 +688,6 @@ class ConstantDataArray final : public ConstantDataSequential {
public:
ConstantDataArray(const ConstantDataArray &) = delete;
void *operator new(size_t, unsigned) = delete;
/// get() constructors - Return a constant with array type with an element
/// count and element type matching the ArrayRef passed in. Note that this
/// can return a ConstantAggregateZero object.
@ -752,8 +747,6 @@ class ConstantDataVector final : public ConstantDataSequential {
public:
ConstantDataVector(const ConstantDataVector &) = delete;
void *operator new(size_t, unsigned) = delete;
/// get() constructors - Return a constant with vector type with an element
/// count and element type matching the ArrayRef passed in. Note that this
/// can return a ConstantAggregateZero object.
@ -830,8 +823,6 @@ class BlockAddress final : public Constant {
Value *handleOperandChangeImpl(Value *From, Value *To);
public:
void *operator new(size_t, unsigned) = delete;
/// Return a BlockAddress for the specified function and basic block.
static BlockAddress *get(Function *F, BasicBlock *BB);

View File

@ -2117,9 +2117,6 @@ class DIVariable : public DINode {
/// variable, or the location of a single piece of a variable, or (when using
/// DW_OP_stack_value) is the constant variable value.
///
/// FIXME: Instead of DW_OP_plus taking an argument, this should use DW_OP_const
/// and have DW_OP_plus consume the topmost elements on the stack.
///
/// TODO: Co-allocate the expression elements.
/// TODO: Separate from MDNode, or otherwise drop Distinct and Temporary
/// storage types.

View File

@ -78,8 +78,6 @@ class GlobalVariable : public GlobalObject, public ilist_node<GlobalVariable> {
return User::operator new(s, 1);
}
void *operator new(size_t, unsigned) = delete;
/// Provide fast operand accessors
DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value);

View File

@ -435,27 +435,25 @@ class IRBuilderBase {
MDNode *ScopeTag = nullptr,
MDNode *NoAliasTag = nullptr);
/// \brief Create and insert an atomic memcpy between the specified
/// pointers.
/// \brief Create and insert an element unordered-atomic memcpy between the
/// specified pointers.
///
/// If the pointers aren't i8*, they will be converted. If a TBAA tag is
/// specified, it will be added to the instruction. Likewise with alias.scope
/// and noalias tags.
CallInst *CreateElementAtomicMemCpy(
Value *Dst, Value *Src, uint64_t NumElements, uint32_t ElementSize,
CallInst *CreateElementUnorderedAtomicMemCpy(
Value *Dst, Value *Src, uint64_t Size, uint32_t ElementSize,
MDNode *TBAATag = nullptr, MDNode *TBAAStructTag = nullptr,
MDNode *ScopeTag = nullptr, MDNode *NoAliasTag = nullptr) {
return CreateElementAtomicMemCpy(Dst, Src, getInt64(NumElements),
ElementSize, TBAATag, TBAAStructTag,
ScopeTag, NoAliasTag);
return CreateElementUnorderedAtomicMemCpy(
Dst, Src, getInt64(Size), ElementSize, TBAATag, TBAAStructTag, ScopeTag,
NoAliasTag);
}
CallInst *CreateElementAtomicMemCpy(Value *Dst, Value *Src,
Value *NumElements, uint32_t ElementSize,
MDNode *TBAATag = nullptr,
MDNode *TBAAStructTag = nullptr,
MDNode *ScopeTag = nullptr,
MDNode *NoAliasTag = nullptr);
CallInst *CreateElementUnorderedAtomicMemCpy(
Value *Dst, Value *Src, Value *Size, uint32_t ElementSize,
MDNode *TBAATag = nullptr, MDNode *TBAAStructTag = nullptr,
MDNode *ScopeTag = nullptr, MDNode *NoAliasTag = nullptr);
/// \brief Create and insert a memmove between the specified
/// pointers.

View File

@ -294,8 +294,6 @@ class UnaryInstruction : public Instruction {
return User::operator new(s, 1);
}
void *operator new(size_t, unsigned) = delete;
/// Transparently provide more efficient getOperand methods.
DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value);
@ -343,8 +341,6 @@ class BinaryOperator : public Instruction {
return User::operator new(s, 2);
}
void *operator new(size_t, unsigned) = delete;
/// Transparently provide more efficient getOperand methods.
DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value);
@ -907,15 +903,11 @@ class CmpInst : public Instruction {
BasicBlock *InsertAtEnd);
public:
CmpInst() = delete;
// allocate space for exactly two operands
void *operator new(size_t s) {
return User::operator new(s, 2);
}
void *operator new(size_t, unsigned) = delete;
/// Construct a compare instruction, given the opcode, the predicate and
/// the two operands. Optionally (if InstBefore is specified) insert the
/// instruction into a BasicBlock right before the specified instruction.

View File

@ -337,8 +337,6 @@ class StoreInst : public Instruction {
return User::operator new(s, 2);
}
void *operator new(size_t, unsigned) = delete;
/// Return true if this is a store to a volatile memory location.
bool isVolatile() const { return getSubclassDataFromInstruction() & 1; }
@ -460,8 +458,6 @@ class FenceInst : public Instruction {
return User::operator new(s, 0);
}
void *operator new(size_t, unsigned) = delete;
/// Returns the ordering effect of this fence.
AtomicOrdering getOrdering() const {
return AtomicOrdering(getSubclassDataFromInstruction() >> 1);
@ -538,8 +534,6 @@ class AtomicCmpXchgInst : public Instruction {
return User::operator new(s, 3);
}
void *operator new(size_t, unsigned) = delete;
/// Return true if this is a cmpxchg from a volatile memory
/// location.
///
@ -728,8 +722,6 @@ class AtomicRMWInst : public Instruction {
return User::operator new(s, 2);
}
void *operator new(size_t, unsigned) = delete;
BinOp getOperation() const {
return static_cast<BinOp>(getSubclassDataFromInstruction() >> 5);
}
@ -2234,8 +2226,6 @@ class ShuffleVectorInst : public Instruction {
return User::operator new(s, 3);
}
void *operator new(size_t, unsigned) = delete;
/// Return true if a shufflevector instruction can be
/// formed with the specified operands.
static bool isValidOperands(const Value *V1, const Value *V2,
@ -2467,8 +2457,6 @@ class InsertValueInst : public Instruction {
return User::operator new(s, 2);
}
void *operator new(size_t, unsigned) = delete;
static InsertValueInst *Create(Value *Agg, Value *Val,
ArrayRef<unsigned> Idxs,
const Twine &NameStr = "",
@ -2596,11 +2584,6 @@ class PHINode : public Instruction {
allocHungoffUses(ReservedSpace);
}
// allocate space for exactly zero operands
void *operator new(size_t s) {
return User::operator new(s);
}
protected:
// Note: Instruction needs to be a friend here to call cloneImpl.
friend class Instruction;
@ -2615,8 +2598,6 @@ class PHINode : public Instruction {
}
public:
void *operator new(size_t, unsigned) = delete;
/// Constructors - NumReservedValues is a hint for the number of incoming
/// edges that this phi node will have (use 0 if you really have no idea).
static PHINode *Create(Type *Ty, unsigned NumReservedValues,
@ -2834,8 +2815,6 @@ class LandingPadInst : public Instruction {
LandingPadInst *cloneImpl() const;
public:
void *operator new(size_t, unsigned) = delete;
/// Constructors - NumReservedClauses is a hint for the number of incoming
/// clauses that this landingpad will have (use 0 if you really have no idea).
static LandingPadInst *Create(Type *RetTy, unsigned NumReservedClauses,
@ -3134,8 +3113,6 @@ class SwitchInst : public TerminatorInst {
SwitchInst *cloneImpl() const;
public:
void *operator new(size_t, unsigned) = delete;
// -2
static const unsigned DefaultPseudoIndex = static_cast<unsigned>(~0L-1);
@ -3489,8 +3466,6 @@ class IndirectBrInst : public TerminatorInst {
IndirectBrInst *cloneImpl() const;
public:
void *operator new(size_t, unsigned) = delete;
static IndirectBrInst *Create(Value *Address, unsigned NumDests,
Instruction *InsertBefore = nullptr) {
return new IndirectBrInst(Address, NumDests, InsertBefore);
@ -4173,8 +4148,6 @@ class CatchSwitchInst : public TerminatorInst {
CatchSwitchInst *cloneImpl() const;
public:
void *operator new(size_t, unsigned) = delete;
static CatchSwitchInst *Create(Value *ParentPad, BasicBlock *UnwindDest,
unsigned NumHandlers,
const Twine &NameStr = "",
@ -4609,8 +4582,6 @@ class UnreachableInst : public TerminatorInst {
return User::operator new(s, 0);
}
void *operator new(size_t, unsigned) = delete;
unsigned getNumSuccessors() const { return 0; }
// Methods for support type inquiry through isa, cast, and dyn_cast:

View File

@ -205,25 +205,91 @@ namespace llvm {
};
/// This class represents atomic memcpy intrinsic
/// TODO: Integrate this class into MemIntrinsic hierarchy.
class ElementAtomicMemCpyInst : public IntrinsicInst {
/// TODO: Integrate this class into MemIntrinsic hierarchy; for now this is
/// C&P of all methods from that hierarchy
class ElementUnorderedAtomicMemCpyInst : public IntrinsicInst {
private:
enum { ARG_DEST = 0, ARG_SOURCE = 1, ARG_LENGTH = 2, ARG_ELEMENTSIZE = 3 };
public:
Value *getRawDest() const { return getArgOperand(0); }
Value *getRawSource() const { return getArgOperand(1); }
Value *getRawDest() const {
return const_cast<Value *>(getArgOperand(ARG_DEST));
}
const Use &getRawDestUse() const { return getArgOperandUse(ARG_DEST); }
Use &getRawDestUse() { return getArgOperandUse(ARG_DEST); }
Value *getNumElements() const { return getArgOperand(2); }
void setNumElements(Value *V) { setArgOperand(2, V); }
/// Return the arguments to the instruction.
Value *getRawSource() const {
return const_cast<Value *>(getArgOperand(ARG_SOURCE));
}
const Use &getRawSourceUse() const { return getArgOperandUse(ARG_SOURCE); }
Use &getRawSourceUse() { return getArgOperandUse(ARG_SOURCE); }
uint64_t getSrcAlignment() const { return getParamAlignment(0); }
uint64_t getDstAlignment() const { return getParamAlignment(1); }
Value *getLength() const {
return const_cast<Value *>(getArgOperand(ARG_LENGTH));
}
const Use &getLengthUse() const { return getArgOperandUse(ARG_LENGTH); }
Use &getLengthUse() { return getArgOperandUse(ARG_LENGTH); }
uint64_t getElementSizeInBytes() const {
Value *Arg = getArgOperand(3);
return cast<ConstantInt>(Arg)->getZExtValue();
bool isVolatile() const { return false; }
Value *getRawElementSizeInBytes() const {
return const_cast<Value *>(getArgOperand(ARG_ELEMENTSIZE));
}
ConstantInt *getElementSizeInBytesCst() const {
return cast<ConstantInt>(getRawElementSizeInBytes());
}
uint32_t getElementSizeInBytes() const {
return getElementSizeInBytesCst()->getZExtValue();
}
/// This is just like getRawDest, but it strips off any cast
/// instructions that feed it, giving the original input. The returned
/// value is guaranteed to be a pointer.
Value *getDest() const { return getRawDest()->stripPointerCasts(); }
/// This is just like getRawSource, but it strips off any cast
/// instructions that feed it, giving the original input. The returned
/// value is guaranteed to be a pointer.
Value *getSource() const { return getRawSource()->stripPointerCasts(); }
unsigned getDestAddressSpace() const {
return cast<PointerType>(getRawDest()->getType())->getAddressSpace();
}
unsigned getSourceAddressSpace() const {
return cast<PointerType>(getRawSource()->getType())->getAddressSpace();
}
/// Set the specified arguments of the instruction.
void setDest(Value *Ptr) {
assert(getRawDest()->getType() == Ptr->getType() &&
"setDest called with pointer of wrong type!");
setArgOperand(ARG_DEST, Ptr);
}
void setSource(Value *Ptr) {
assert(getRawSource()->getType() == Ptr->getType() &&
"setSource called with pointer of wrong type!");
setArgOperand(ARG_SOURCE, Ptr);
}
void setLength(Value *L) {
assert(getLength()->getType() == L->getType() &&
"setLength called with value of wrong type!");
setArgOperand(ARG_LENGTH, L);
}
void setElementSizeInBytes(Constant *V) {
assert(V->getType() == Type::getInt8Ty(getContext()) &&
"setElementSizeInBytes called with value of wrong type!");
setArgOperand(ARG_ELEMENTSIZE, V);
}
static inline bool classof(const IntrinsicInst *I) {
return I->getIntrinsicID() == Intrinsic::memcpy_element_atomic;
return I->getIntrinsicID() == Intrinsic::memcpy_element_unordered_atomic;
}
static inline bool classof(const Value *V) {
return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V));

View File

@ -862,11 +862,16 @@ def int_xray_customevent : Intrinsic<[], [llvm_ptr_ty, llvm_i32_ty],
//===------ Memory intrinsics with element-wise atomicity guarantees ------===//
//
def int_memcpy_element_atomic : Intrinsic<[],
[llvm_anyptr_ty, llvm_anyptr_ty,
llvm_i64_ty, llvm_i32_ty],
[IntrArgMemOnly, NoCapture<0>, NoCapture<1>,
WriteOnly<0>, ReadOnly<1>]>;
// @llvm.memcpy.element.unordered.atomic.*(dest, src, length, elementsize)
def int_memcpy_element_unordered_atomic
: Intrinsic<[],
[
llvm_anyptr_ty, llvm_anyptr_ty, llvm_anyint_ty, llvm_i32_ty
],
[
IntrArgMemOnly, NoCapture<0>, NoCapture<1>, WriteOnly<0>,
ReadOnly<1>
]>;
//===------------------------ Reduction Intrinsics ------------------------===//
//

View File

@ -32,6 +32,7 @@
#include <cstdint>
#include <map>
#include <memory>
#include <set>
#include <string>
#include <utility>
#include <vector>
@ -542,6 +543,9 @@ class ModuleSummaryIndex {
/// considered live.
bool WithGlobalValueDeadStripping = false;
std::set<std::string> CfiFunctionDefs;
std::set<std::string> CfiFunctionDecls;
// YAML I/O support.
friend yaml::MappingTraits<ModuleSummaryIndex>;
@ -567,6 +571,7 @@ class ModuleSummaryIndex {
bool isGlobalValueLive(const GlobalValueSummary *GVS) const {
return !WithGlobalValueDeadStripping || GVS->isLive();
}
bool isGUIDLive(GlobalValue::GUID GUID) const;
/// Return a ValueInfo for GUID if it exists, otherwise return ValueInfo().
ValueInfo getValueInfo(GlobalValue::GUID GUID) const {
@ -592,6 +597,12 @@ class ModuleSummaryIndex {
return I == OidGuidMap.end() ? 0 : I->second;
}
std::set<std::string> &cfiFunctionDefs() { return CfiFunctionDefs; }
const std::set<std::string> &cfiFunctionDefs() const { return CfiFunctionDefs; }
std::set<std::string> &cfiFunctionDecls() { return CfiFunctionDecls; }
const std::set<std::string> &cfiFunctionDecls() const { return CfiFunctionDecls; }
/// Add a global value summary for a value of the given name.
void addGlobalValueSummary(StringRef ValueName,
std::unique_ptr<GlobalValueSummary> Summary) {
@ -691,14 +702,13 @@ class ModuleSummaryIndex {
return Pair.first;
}
/// Add a new module path with the given \p Hash, mapped to the given \p
/// ModID, and return an iterator to the entry in the index.
ModulePathStringTableTy::iterator
addModulePath(StringRef ModPath, uint64_t ModId,
ModuleHash Hash = ModuleHash{{0}}) {
return ModulePathStringTable.insert(std::make_pair(
ModPath,
std::make_pair(ModId, Hash))).first;
typedef ModulePathStringTableTy::value_type ModuleInfo;
/// Add a new module with the given \p Hash, mapped to the given \p
/// ModID, and return a reference to the module.
ModuleInfo *addModule(StringRef ModPath, uint64_t ModId,
ModuleHash Hash = ModuleHash{{0}}) {
return &*ModulePathStringTable.insert({ModPath, {ModId, Hash}}).first;
}
/// Check if the given Module has any functions available for exporting

View File

@ -188,6 +188,7 @@ template <> struct MappingTraits<FunctionSummaryYaml> {
LLVM_YAML_IS_STRING_MAP(TypeIdSummary)
LLVM_YAML_IS_SEQUENCE_VECTOR(FunctionSummaryYaml)
LLVM_YAML_IS_SEQUENCE_VECTOR(std::string)
namespace llvm {
namespace yaml {
@ -240,6 +241,23 @@ template <> struct MappingTraits<ModuleSummaryIndex> {
io.mapOptional("TypeIdMap", index.TypeIdMap);
io.mapOptional("WithGlobalValueDeadStripping",
index.WithGlobalValueDeadStripping);
if (io.outputting()) {
std::vector<std::string> CfiFunctionDefs(index.CfiFunctionDefs.begin(),
index.CfiFunctionDefs.end());
io.mapOptional("CfiFunctionDefs", CfiFunctionDefs);
std::vector<std::string> CfiFunctionDecls(index.CfiFunctionDecls.begin(),
index.CfiFunctionDecls.end());
io.mapOptional("CfiFunctionDecls", CfiFunctionDecls);
} else {
std::vector<std::string> CfiFunctionDefs;
io.mapOptional("CfiFunctionDefs", CfiFunctionDefs);
index.CfiFunctionDefs = {CfiFunctionDefs.begin(), CfiFunctionDefs.end()};
std::vector<std::string> CfiFunctionDecls;
io.mapOptional("CfiFunctionDecls", CfiFunctionDecls);
index.CfiFunctionDecls = {CfiFunctionDecls.begin(),
CfiFunctionDecls.end()};
}
}
};

View File

@ -35,7 +35,6 @@ class Operator : public User {
Operator() = delete;
~Operator() = delete;
void *operator new(size_t, unsigned) = delete;
void *operator new(size_t s) = delete;
/// Return the opcode for this Instruction or ConstantExpr.

View File

@ -1027,7 +1027,7 @@ struct MaxMin_match {
(TrueVal != RHS || FalseVal != LHS))
return false;
typename CmpInst_t::Predicate Pred =
LHS == TrueVal ? Cmp->getPredicate() : Cmp->getSwappedPredicate();
LHS == TrueVal ? Cmp->getPredicate() : Cmp->getInversePredicate();
// Does "(x pred y) ? x : y" represent the desired max/min operation?
if (!Pred_t::match(Pred))
return false;
@ -1138,7 +1138,7 @@ inline MaxMin_match<FCmpInst, LHS, RHS, ofmax_pred_ty> m_OrdFMax(const LHS &L,
/// semantics. In the presence of 'NaN' we have to preserve the original
/// select(fcmp(olt/le, L, R), L, R) semantics matched by this predicate.
///
/// max(L, R) iff L and R are not NaN
/// min(L, R) iff L and R are not NaN
/// m_OrdFMin(L, R) = R iff L or R are NaN
template <typename LHS, typename RHS>
inline MaxMin_match<FCmpInst, LHS, RHS, ofmin_pred_ty> m_OrdFMin(const LHS &L,
@ -1154,13 +1154,28 @@ inline MaxMin_match<FCmpInst, LHS, RHS, ofmin_pred_ty> m_OrdFMin(const LHS &L,
/// select(fcmp(ugt/ge, L, R), L, R) semantics matched by this predicate.
///
/// max(L, R) iff L and R are not NaN
/// m_UnordFMin(L, R) = L iff L or R are NaN
/// m_UnordFMax(L, R) = L iff L or R are NaN
template <typename LHS, typename RHS>
inline MaxMin_match<FCmpInst, LHS, RHS, ufmax_pred_ty>
m_UnordFMax(const LHS &L, const RHS &R) {
return MaxMin_match<FCmpInst, LHS, RHS, ufmax_pred_ty>(L, R);
}
/// \brief Match an 'unordered' floating point minimum function.
/// Floating point has one special value 'NaN'. Therefore, there is no total
/// order. However, if we can ignore the 'NaN' value (for example, because of a
/// 'no-nans-float-math' flag) a combination of a fcmp and select has 'minimum'
/// semantics. In the presence of 'NaN' we have to preserve the original
/// select(fcmp(ult/le, L, R), L, R) semantics matched by this predicate.
///
/// min(L, R) iff L and R are not NaN
/// m_UnordFMin(L, R) = L iff L or R are NaN
template <typename LHS, typename RHS>
inline MaxMin_match<FCmpInst, LHS, RHS, ufmin_pred_ty>
m_UnordFMin(const LHS &L, const RHS &R) {
return MaxMin_match<FCmpInst, LHS, RHS, ufmin_pred_ty>(L, R);
}
//===----------------------------------------------------------------------===//
// Matchers for overflow check patterns: e.g. (a + b) u< a
//
@ -1207,21 +1222,6 @@ m_UAddWithOverflow(const LHS_t &L, const RHS_t &R, const Sum_t &S) {
return UAddWithOverflow_match<LHS_t, RHS_t, Sum_t>(L, R, S);
}
/// \brief Match an 'unordered' floating point minimum function.
/// Floating point has one special value 'NaN'. Therefore, there is no total
/// order. However, if we can ignore the 'NaN' value (for example, because of a
/// 'no-nans-float-math' flag) a combination of a fcmp and select has 'minimum'
/// semantics. In the presence of 'NaN' we have to preserve the original
/// select(fcmp(ult/le, L, R), L, R) semantics matched by this predicate.
///
/// max(L, R) iff L and R are not NaN
/// m_UnordFMin(L, R) = L iff L or R are NaN
template <typename LHS, typename RHS>
inline MaxMin_match<FCmpInst, LHS, RHS, ufmin_pred_ty>
m_UnordFMin(const LHS &L, const RHS &R) {
return MaxMin_match<FCmpInst, LHS, RHS, ufmin_pred_ty>(L, R);
}
template <typename Opnd_t> struct Argument_match {
unsigned OpI;
Opnd_t Val;

View File

@ -281,6 +281,16 @@ class LTO {
bool HasModule = false;
std::unique_ptr<Module> CombinedModule;
std::unique_ptr<IRMover> Mover;
// This stores the information about a regular LTO module that we have added
// to the link. It will either be linked immediately (for modules without
// summaries) or after summary-based dead stripping (for modules with
// summaries).
struct AddedModule {
std::unique_ptr<Module> M;
std::vector<GlobalValue *> Keep;
};
std::vector<AddedModule> ModsWithSummaries;
} RegularLTO;
struct ThinLTOState {
@ -303,9 +313,10 @@ class LTO {
/// The unmangled name of the global.
std::string IRName;
/// Keep track if the symbol is visible outside of ThinLTO (i.e. in
/// either a regular object or the regular LTO partition).
bool VisibleOutsideThinLTO = false;
/// Keep track if the symbol is visible outside of a module with a summary
/// (i.e. in either a regular object or a regular LTO module without a
/// summary).
bool VisibleOutsideSummary = false;
bool UnnamedAddr = true;
@ -339,8 +350,9 @@ class LTO {
// Global mapping from mangled symbol names to resolutions.
StringMap<GlobalResolution> GlobalResolutions;
void addSymbolToGlobalRes(const InputFile::Symbol &Sym, SymbolResolution Res,
unsigned Partition);
void addModuleToGlobalRes(ArrayRef<InputFile::Symbol> Syms,
ArrayRef<SymbolResolution> Res, unsigned Partition,
bool InSummary);
// These functions take a range of symbol resolutions [ResI, ResE) and consume
// the resolutions used by a single input module by incrementing ResI. After
@ -348,10 +360,13 @@ class LTO {
// the remaining modules in the InputFile.
Error addModule(InputFile &Input, unsigned ModI,
const SymbolResolution *&ResI, const SymbolResolution *ResE);
Error addRegularLTO(BitcodeModule BM,
ArrayRef<InputFile::Symbol> Syms,
const SymbolResolution *&ResI,
const SymbolResolution *ResE);
Expected<RegularLTOState::AddedModule>
addRegularLTO(BitcodeModule BM, ArrayRef<InputFile::Symbol> Syms,
const SymbolResolution *&ResI, const SymbolResolution *ResE);
Error linkRegularLTO(RegularLTOState::AddedModule Mod,
bool LivenessFromIndex);
Error addThinLTO(BitcodeModule BM, ArrayRef<InputFile::Symbol> Syms,
const SymbolResolution *&ResI, const SymbolResolution *ResE);

View File

@ -158,7 +158,7 @@ struct LTOModule {
private:
/// Parse metadata from the module
// FIXME: it only parses "Linker Options" metadata at the moment
// FIXME: it only parses "llvm.linker.options" metadata at the moment
void parseMetadata();
/// Parse the symbols from the module and model-level ASM and add them to

View File

@ -13,6 +13,7 @@
#include "llvm/MC/MCSymbol.h"
namespace llvm {
class MCSymbolWasm : public MCSymbol {
private:
bool IsFunction = false;
@ -52,6 +53,7 @@ class MCSymbolWasm : public MCSymbol {
Params = std::move(Pars);
}
};
}
#endif
} // end namespace llvm
#endif // LLVM_MC_MCSYMBOLWASM_H

View File

@ -12,20 +12,12 @@
#include "llvm/ADT/Triple.h"
#include "llvm/BinaryFormat/Wasm.h"
#include "llvm/MC/MCValue.h"
#include "llvm/Support/DataTypes.h"
#include "llvm/Support/raw_ostream.h"
#include <vector>
namespace llvm {
class MCAssembler;
class MCContext;
class MCFixup;
class MCFragment;
class MCObjectWriter;
class MCSectionWasm;
class MCSymbol;
class MCSymbolWasm;
class MCValue;
class raw_pwrite_stream;
@ -38,8 +30,8 @@ class MCWasmObjectTargetWriter {
public:
virtual ~MCWasmObjectTargetWriter();
virtual unsigned getRelocType(MCContext &Ctx, const MCValue &Target,
const MCFixup &Fixup, bool IsPCRel) const = 0;
virtual unsigned getRelocType(const MCValue &Target,
const MCFixup &Fixup) const = 0;
/// \name Accessors
/// @{
@ -54,6 +46,7 @@ class MCWasmObjectTargetWriter {
/// \returns The constructed object writer.
MCObjectWriter *createWasmObjectWriter(MCWasmObjectTargetWriter *MOTW,
raw_pwrite_stream &OS);
} // End llvm namespace
#endif

View File

@ -22,6 +22,7 @@ namespace llvm {
struct NewArchiveMember {
std::unique_ptr<MemoryBuffer> Buf;
StringRef MemberName;
sys::TimePoint<std::chrono::seconds> ModTime;
unsigned UID = 0, GID = 0, Perms = 0644;

View File

@ -118,7 +118,7 @@ class WindowsResourceParser {
class TreeNode;
WindowsResourceParser();
Error parse(WindowsResource *WR);
void printTree() const;
void printTree(raw_ostream &OS) const;
const TreeNode &getTree() const { return Root; }
const ArrayRef<std::vector<uint8_t>> getData() const { return Data; }
const ArrayRef<std::vector<UTF16>> getStringTable() const {
@ -159,14 +159,16 @@ class WindowsResourceParser {
TreeNode(uint16_t MajorVersion, uint16_t MinorVersion,
uint32_t Characteristics);
void addEntry(const ResourceEntryRef &Entry);
TreeNode &addTypeNode(const ResourceEntryRef &Entry);
TreeNode &addNameNode(const ResourceEntryRef &Entry);
void addEntry(const ResourceEntryRef &Entry, bool &IsNewTypeString,
bool &IsNewNameString);
TreeNode &addTypeNode(const ResourceEntryRef &Entry, bool &IsNewTypeString);
TreeNode &addNameNode(const ResourceEntryRef &Entry, bool &IsNewNameString);
TreeNode &addLanguageNode(const ResourceEntryRef &Entry);
TreeNode &addChild(uint32_t ID, bool IsDataNode = false,
uint16_t MajorVersion = 0, uint16_t MinorVersion = 0,
uint32_t Characteristics = 0);
TreeNode &addChild(ArrayRef<UTF16> NameRef);
TreeNode &addChild(ArrayRef<UTF16> NameRef, bool &IsNewString);
bool IsDataNode = false;
uint32_t StringIndex;
uint32_t DataIndex;

View File

@ -16,6 +16,8 @@
#include "llvm/ADT/Optional.h"
#include "llvm/BinaryFormat/COFF.h"
#include "llvm/ObjectYAML/CodeViewYAMLDebugSections.h"
#include "llvm/ObjectYAML/CodeViewYAMLTypes.h"
#include "llvm/ObjectYAML/YAML.h"
namespace llvm {
@ -56,6 +58,8 @@ namespace COFFYAML {
COFF::section Header;
unsigned Alignment = 0;
yaml::BinaryRef SectionData;
std::vector<CodeViewYAML::YAMLDebugSubsection> DebugS;
std::vector<CodeViewYAML::LeafRecord> DebugT;
std::vector<Relocation> Relocations;
StringRef Name;
Section();

View File

@ -28,6 +28,8 @@ class DebugStringTableSubsectionRef;
class DebugChecksumsSubsectionRef;
class DebugStringTableSubsection;
class DebugChecksumsSubsection;
class StringsAndChecksums;
class StringsAndChecksumsRef;
}
namespace CodeViewYAML {
@ -103,25 +105,24 @@ struct InlineeInfo {
struct YAMLDebugSubsection {
static Expected<YAMLDebugSubsection>
fromCodeViewSubection(const codeview::DebugStringTableSubsectionRef &Strings,
const codeview::DebugChecksumsSubsectionRef &Checksums,
fromCodeViewSubection(const codeview::StringsAndChecksumsRef &SC,
const codeview::DebugSubsectionRecord &SS);
std::shared_ptr<detail::YAMLSubsectionBase> Subsection;
};
Expected<std::vector<std::unique_ptr<codeview::DebugSubsection>>>
struct DebugSubsectionState {};
Expected<std::vector<std::shared_ptr<codeview::DebugSubsection>>>
toCodeViewSubsectionList(BumpPtrAllocator &Allocator,
ArrayRef<YAMLDebugSubsection> Subsections,
codeview::DebugStringTableSubsection &Strings);
Expected<std::vector<std::unique_ptr<codeview::DebugSubsection>>>
toCodeViewSubsectionList(
BumpPtrAllocator &Allocator, ArrayRef<YAMLDebugSubsection> Subsections,
std::unique_ptr<codeview::DebugStringTableSubsection> &TakeStrings,
codeview::DebugStringTableSubsection *StringsRef);
const codeview::StringsAndChecksums &SC);
std::unique_ptr<codeview::DebugStringTableSubsection>
findStringTable(ArrayRef<YAMLDebugSubsection> Sections);
std::vector<YAMLDebugSubsection>
fromDebugS(ArrayRef<uint8_t> Data, const codeview::StringsAndChecksumsRef &SC);
void initializeStringsAndChecksums(ArrayRef<YAMLDebugSubsection> Sections,
codeview::StringsAndChecksums &SC);
} // namespace CodeViewYAML
} // namespace llvm

View File

@ -41,6 +41,9 @@ struct LeafRecord {
codeview::CVType toCodeViewRecord(codeview::TypeTableBuilder &TS) const;
static Expected<LeafRecord> fromCodeViewRecord(codeview::CVType Type);
};
std::vector<LeafRecord> fromDebugT(ArrayRef<uint8_t> DebugT);
ArrayRef<uint8_t> toDebugT(ArrayRef<LeafRecord>, BumpPtrAllocator &Alloc);
} // namespace CodeViewYAML
} // namespace llvm

View File

@ -1,4 +1,4 @@
//===--- Arg.h - Parsed Argument Classes ------------------------*- C++ -*-===//
//===- Arg.h - Parsed Argument Classes --------------------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
@ -21,7 +21,11 @@
#include <string>
namespace llvm {
class raw_ostream;
namespace opt {
class ArgList;
/// \brief A concrete instance of a particular driver option.
@ -29,9 +33,6 @@ class ArgList;
/// The Arg class encodes just enough information to be able to
/// derive the argument values efficiently.
class Arg {
Arg(const Arg &) = delete;
void operator=(const Arg &) = delete;
private:
/// \brief The option this argument is an instance of.
const Option Opt;
@ -65,6 +66,8 @@ class Arg {
const char *Value0, const Arg *BaseArg = nullptr);
Arg(const Option Opt, StringRef Spelling, unsigned Index,
const char *Value0, const char *Value1, const Arg *BaseArg = nullptr);
Arg(const Arg &) = delete;
Arg &operator=(const Arg &) = delete;
~Arg();
const Option &getOption() const { return Opt; }
@ -89,6 +92,7 @@ class Arg {
void claim() const { getBaseArg().Claimed = true; }
unsigned getNumValues() const { return Values.size(); }
const char *getValue(unsigned N = 0) const {
return Values[N];
}
@ -122,6 +126,7 @@ class Arg {
};
} // end namespace opt
} // end namespace llvm
#endif
#endif // LLVM_OPTION_ARG_H

Some files were not shown because too many files have changed in this diff Show More