Vendor import of llvm trunk r304149:
https://llvm.org/svn/llvm-project/llvm/trunk@304149
This commit is contained in:
parent
b5630dbadf
commit
ab44ce3d59
@ -44,6 +44,13 @@ if (NOT PACKAGE_VERSION)
|
||||
"${LLVM_VERSION_MAJOR}.${LLVM_VERSION_MINOR}.${LLVM_VERSION_PATCH}${LLVM_VERSION_SUFFIX}")
|
||||
endif()
|
||||
|
||||
if ((CMAKE_GENERATOR MATCHES "Visual Studio") AND (CMAKE_GENERATOR_TOOLSET STREQUAL ""))
|
||||
message(WARNING "Visual Studio generators use the x86 host compiler by "
|
||||
"default, even for 64-bit targets. This can result in linker "
|
||||
"instability and out of memory errors. To use the 64-bit "
|
||||
"host compiler, pass -Thost=x64 on the CMake command line.")
|
||||
endif()
|
||||
|
||||
project(LLVM
|
||||
${cmake_3_0_PROJ_VERSION}
|
||||
${cmake_3_0_LANGUAGES}
|
||||
|
87
docs/Benchmarking.rst
Normal file
87
docs/Benchmarking.rst
Normal file
@ -0,0 +1,87 @@
|
||||
==================================
|
||||
Benchmarking tips
|
||||
==================================
|
||||
|
||||
|
||||
Introduction
|
||||
============
|
||||
|
||||
For benchmarking a patch we want to reduce all possible sources of
|
||||
noise as much as possible. How to do that is very OS dependent.
|
||||
|
||||
Note that low noise is required, but not sufficient. It does not
|
||||
exclude measurement bias. See
|
||||
https://www.cis.upenn.edu/~cis501/papers/producing-wrong-data.pdf for
|
||||
example.
|
||||
|
||||
General
|
||||
================================
|
||||
|
||||
* Use a high resolution timer, e.g. perf under linux.
|
||||
|
||||
* Run the benchmark multiple times to be able to recognize noise.
|
||||
|
||||
* Disable as many processes or services as possible on the target system.
|
||||
|
||||
* Disable frequency scaling, turbo boost and address space
|
||||
randomization (see OS specific section).
|
||||
|
||||
* Static link if the OS supports it. That avoids any variation that
|
||||
might be introduced by loading dynamic libraries. This can be done
|
||||
by passing ``-DLLVM_BUILD_STATIC=ON`` to cmake.
|
||||
|
||||
* Try to avoid storage. On some systems you can use tmpfs. Putting the
|
||||
program, inputs and outputs on tmpfs avoids touching a real storage
|
||||
system, which can have a pretty big variability.
|
||||
|
||||
To mount it (on linux and freebsd at least)::
|
||||
|
||||
mount -t tmpfs -o size=<XX>g none dir_to_mount
|
||||
|
||||
Linux
|
||||
=====
|
||||
|
||||
* Disable address space randomization::
|
||||
|
||||
echo 0 > /proc/sys/kernel/randomize_va_space
|
||||
|
||||
* Set scaling_governor to performance::
|
||||
|
||||
for i in /sys/devices/system/cpu/cpu*/cpufreq/scaling_governor
|
||||
do
|
||||
echo performance > /sys/devices/system/cpu/cpu*/cpufreq/scaling_governor
|
||||
done
|
||||
|
||||
* Use https://github.com/lpechacek/cpuset to reserve cpus for just the
|
||||
program you are benchmarking. If using perf, leave at least 2 cores
|
||||
so that perf runs in one and your program in another::
|
||||
|
||||
cset shield -c N1,N2 -k on
|
||||
|
||||
This will move all threads out of N1 and N2. The ``-k on`` means
|
||||
that even kernel threads are moved out.
|
||||
|
||||
* Disable the SMT pair of the cpus you will use for the benchmark. The
|
||||
pair of cpu N can be found in
|
||||
``/sys/devices/system/cpu/cpuN/topology/thread_siblings_list`` and
|
||||
disabled with::
|
||||
|
||||
echo 0 > /sys/devices/system/cpu/cpuX/online
|
||||
|
||||
|
||||
* Run the program with::
|
||||
|
||||
cset shield --exec -- perf stat -r 10 <cmd>
|
||||
|
||||
This will run the command after ``--`` in the isolated cpus. The
|
||||
particular perf command runs the ``<cmd>`` 10 times and reports
|
||||
statistics.
|
||||
|
||||
With these in place you can expect perf variations of less than 0.1%.
|
||||
|
||||
Linux Intel
|
||||
-----------
|
||||
|
||||
* Disable turbo mode::
|
||||
|
||||
echo 1 > /sys/devices/system/cpu/intel_pstate/no_turbo
|
@ -100,6 +100,10 @@ Here's the short story for getting up and running quickly with LLVM:
|
||||
* CMake generates project files for all build types. To select a specific
|
||||
build type, use the Configuration manager from the VS IDE or the
|
||||
``/property:Configuration`` command line option when using MSBuild.
|
||||
* By default, the Visual Studio project files generated by CMake use the
|
||||
32-bit toolset. If you are developing on a 64-bit version of Windows and
|
||||
want to use the 64-bit toolset, pass the ``-Thost=x64`` flag when
|
||||
generating the Visual Studio solution. This requires CMake 3.8.0 or later.
|
||||
|
||||
6. Start Visual Studio
|
||||
|
||||
|
471
docs/LangRef.rst
471
docs/LangRef.rst
@ -4415,12 +4415,6 @@ The current supported vocabulary is limited:
|
||||
address space identifier.
|
||||
- ``DW_OP_stack_value`` marks a constant value.
|
||||
|
||||
DIExpression nodes that contain a ``DW_OP_stack_value`` operator are standalone
|
||||
location descriptions that describe constant values. This form is used to
|
||||
describe global constants that have been optimized away. All other expressions
|
||||
are modifiers to another location: A debug intrinsic ties a location and a
|
||||
DIExpression together.
|
||||
|
||||
DWARF specifies three kinds of simple location descriptions: Register, memory,
|
||||
and implicit location descriptions. Register and memory location descriptions
|
||||
describe the *location* of a source variable (in the sense that a debugger might
|
||||
@ -12722,7 +12716,7 @@ Syntax:
|
||||
declare <type>
|
||||
@llvm.experimental.constrained.fadd(<type> <op1>, <type> <op2>,
|
||||
metadata <rounding mode>,
|
||||
metadata <exception behavior>)
|
||||
metadata <exception behavior>)
|
||||
|
||||
Overview:
|
||||
"""""""""
|
||||
@ -12759,7 +12753,7 @@ Syntax:
|
||||
declare <type>
|
||||
@llvm.experimental.constrained.fsub(<type> <op1>, <type> <op2>,
|
||||
metadata <rounding mode>,
|
||||
metadata <exception behavior>)
|
||||
metadata <exception behavior>)
|
||||
|
||||
Overview:
|
||||
"""""""""
|
||||
@ -12796,7 +12790,7 @@ Syntax:
|
||||
declare <type>
|
||||
@llvm.experimental.constrained.fmul(<type> <op1>, <type> <op2>,
|
||||
metadata <rounding mode>,
|
||||
metadata <exception behavior>)
|
||||
metadata <exception behavior>)
|
||||
|
||||
Overview:
|
||||
"""""""""
|
||||
@ -12833,7 +12827,7 @@ Syntax:
|
||||
declare <type>
|
||||
@llvm.experimental.constrained.fdiv(<type> <op1>, <type> <op2>,
|
||||
metadata <rounding mode>,
|
||||
metadata <exception behavior>)
|
||||
metadata <exception behavior>)
|
||||
|
||||
Overview:
|
||||
"""""""""
|
||||
@ -12870,7 +12864,7 @@ Syntax:
|
||||
declare <type>
|
||||
@llvm.experimental.constrained.frem(<type> <op1>, <type> <op2>,
|
||||
metadata <rounding mode>,
|
||||
metadata <exception behavior>)
|
||||
metadata <exception behavior>)
|
||||
|
||||
Overview:
|
||||
"""""""""
|
||||
@ -12899,6 +12893,461 @@ value operands and has the same type as the operands. The remainder has the
|
||||
same sign as the dividend.
|
||||
|
||||
|
||||
Constrained libm-equivalent Intrinsics
|
||||
--------------------------------------
|
||||
|
||||
In addition to the basic floating point operations for which constrained
|
||||
intrinsics are described above, there are constrained versions of various
|
||||
operations which provide equivalent behavior to a corresponding libm function.
|
||||
These intrinsics allow the precise behavior of these operations with respect to
|
||||
rounding mode and exception behavior to be controlled.
|
||||
|
||||
As with the basic constrained floating point intrinsics, the rounding mode
|
||||
and exception behavior arguments only control the behavior of the optimizer.
|
||||
They do not change the runtime floating point environment.
|
||||
|
||||
|
||||
'``llvm.experimental.constrained.sqrt``' Intrinsic
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
Syntax:
|
||||
"""""""
|
||||
|
||||
::
|
||||
|
||||
declare <type>
|
||||
@llvm.experimental.constrained.sqrt(<type> <op1>,
|
||||
metadata <rounding mode>,
|
||||
metadata <exception behavior>)
|
||||
|
||||
Overview:
|
||||
"""""""""
|
||||
|
||||
The '``llvm.experimental.constrained.sqrt``' intrinsic returns the square root
|
||||
of the specified value, returning the same value as the libm '``sqrt``'
|
||||
functions would, but without setting ``errno``.
|
||||
|
||||
Arguments:
|
||||
""""""""""
|
||||
|
||||
The first argument and the return type are floating point numbers of the same
|
||||
type.
|
||||
|
||||
The second and third arguments specify the rounding mode and exception
|
||||
behavior as described above.
|
||||
|
||||
Semantics:
|
||||
""""""""""
|
||||
|
||||
This function returns the nonnegative square root of the specified value.
|
||||
If the value is less than negative zero, a floating point exception occurs
|
||||
and the the return value is architecture specific.
|
||||
|
||||
|
||||
'``llvm.experimental.constrained.pow``' Intrinsic
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
Syntax:
|
||||
"""""""
|
||||
|
||||
::
|
||||
|
||||
declare <type>
|
||||
@llvm.experimental.constrained.pow(<type> <op1>, <type> <op2>,
|
||||
metadata <rounding mode>,
|
||||
metadata <exception behavior>)
|
||||
|
||||
Overview:
|
||||
"""""""""
|
||||
|
||||
The '``llvm.experimental.constrained.pow``' intrinsic returns the first operand
|
||||
raised to the (positive or negative) power specified by the second operand.
|
||||
|
||||
Arguments:
|
||||
""""""""""
|
||||
|
||||
The first two arguments and the return value are floating point numbers of the
|
||||
same type. The second argument specifies the power to which the first argument
|
||||
should be raised.
|
||||
|
||||
The third and fourth arguments specify the rounding mode and exception
|
||||
behavior as described above.
|
||||
|
||||
Semantics:
|
||||
""""""""""
|
||||
|
||||
This function returns the first value raised to the second power,
|
||||
returning the same values as the libm ``pow`` functions would, and
|
||||
handles error conditions in the same way.
|
||||
|
||||
|
||||
'``llvm.experimental.constrained.powi``' Intrinsic
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
Syntax:
|
||||
"""""""
|
||||
|
||||
::
|
||||
|
||||
declare <type>
|
||||
@llvm.experimental.constrained.powi(<type> <op1>, i32 <op2>,
|
||||
metadata <rounding mode>,
|
||||
metadata <exception behavior>)
|
||||
|
||||
Overview:
|
||||
"""""""""
|
||||
|
||||
The '``llvm.experimental.constrained.powi``' intrinsic returns the first operand
|
||||
raised to the (positive or negative) power specified by the second operand. The
|
||||
order of evaluation of multiplications is not defined. When a vector of floating
|
||||
point type is used, the second argument remains a scalar integer value.
|
||||
|
||||
|
||||
Arguments:
|
||||
""""""""""
|
||||
|
||||
The first argument and the return value are floating point numbers of the same
|
||||
type. The second argument is a 32-bit signed integer specifying the power to
|
||||
which the first argument should be raised.
|
||||
|
||||
The third and fourth arguments specify the rounding mode and exception
|
||||
behavior as described above.
|
||||
|
||||
Semantics:
|
||||
""""""""""
|
||||
|
||||
This function returns the first value raised to the second power with an
|
||||
unspecified sequence of rounding operations.
|
||||
|
||||
|
||||
'``llvm.experimental.constrained.sin``' Intrinsic
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
Syntax:
|
||||
"""""""
|
||||
|
||||
::
|
||||
|
||||
declare <type>
|
||||
@llvm.experimental.constrained.sin(<type> <op1>,
|
||||
metadata <rounding mode>,
|
||||
metadata <exception behavior>)
|
||||
|
||||
Overview:
|
||||
"""""""""
|
||||
|
||||
The '``llvm.experimental.constrained.sin``' intrinsic returns the sine of the
|
||||
first operand.
|
||||
|
||||
Arguments:
|
||||
""""""""""
|
||||
|
||||
The first argument and the return type are floating point numbers of the same
|
||||
type.
|
||||
|
||||
The second and third arguments specify the rounding mode and exception
|
||||
behavior as described above.
|
||||
|
||||
Semantics:
|
||||
""""""""""
|
||||
|
||||
This function returns the sine of the specified operand, returning the
|
||||
same values as the libm ``sin`` functions would, and handles error
|
||||
conditions in the same way.
|
||||
|
||||
|
||||
'``llvm.experimental.constrained.cos``' Intrinsic
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
Syntax:
|
||||
"""""""
|
||||
|
||||
::
|
||||
|
||||
declare <type>
|
||||
@llvm.experimental.constrained.cos(<type> <op1>,
|
||||
metadata <rounding mode>,
|
||||
metadata <exception behavior>)
|
||||
|
||||
Overview:
|
||||
"""""""""
|
||||
|
||||
The '``llvm.experimental.constrained.cos``' intrinsic returns the cosine of the
|
||||
first operand.
|
||||
|
||||
Arguments:
|
||||
""""""""""
|
||||
|
||||
The first argument and the return type are floating point numbers of the same
|
||||
type.
|
||||
|
||||
The second and third arguments specify the rounding mode and exception
|
||||
behavior as described above.
|
||||
|
||||
Semantics:
|
||||
""""""""""
|
||||
|
||||
This function returns the cosine of the specified operand, returning the
|
||||
same values as the libm ``cos`` functions would, and handles error
|
||||
conditions in the same way.
|
||||
|
||||
|
||||
'``llvm.experimental.constrained.exp``' Intrinsic
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
Syntax:
|
||||
"""""""
|
||||
|
||||
::
|
||||
|
||||
declare <type>
|
||||
@llvm.experimental.constrained.exp(<type> <op1>,
|
||||
metadata <rounding mode>,
|
||||
metadata <exception behavior>)
|
||||
|
||||
Overview:
|
||||
"""""""""
|
||||
|
||||
The '``llvm.experimental.constrained.exp``' intrinsic computes the base-e
|
||||
exponential of the specified value.
|
||||
|
||||
Arguments:
|
||||
""""""""""
|
||||
|
||||
The first argument and the return value are floating point numbers of the same
|
||||
type.
|
||||
|
||||
The second and third arguments specify the rounding mode and exception
|
||||
behavior as described above.
|
||||
|
||||
Semantics:
|
||||
""""""""""
|
||||
|
||||
This function returns the same values as the libm ``exp`` functions
|
||||
would, and handles error conditions in the same way.
|
||||
|
||||
|
||||
'``llvm.experimental.constrained.exp2``' Intrinsic
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
Syntax:
|
||||
"""""""
|
||||
|
||||
::
|
||||
|
||||
declare <type>
|
||||
@llvm.experimental.constrained.exp2(<type> <op1>,
|
||||
metadata <rounding mode>,
|
||||
metadata <exception behavior>)
|
||||
|
||||
Overview:
|
||||
"""""""""
|
||||
|
||||
The '``llvm.experimental.constrained.exp2``' intrinsic computes the base-2
|
||||
exponential of the specified value.
|
||||
|
||||
|
||||
Arguments:
|
||||
""""""""""
|
||||
|
||||
The first argument and the return value are floating point numbers of the same
|
||||
type.
|
||||
|
||||
The second and third arguments specify the rounding mode and exception
|
||||
behavior as described above.
|
||||
|
||||
Semantics:
|
||||
""""""""""
|
||||
|
||||
This function returns the same values as the libm ``exp2`` functions
|
||||
would, and handles error conditions in the same way.
|
||||
|
||||
|
||||
'``llvm.experimental.constrained.log``' Intrinsic
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
Syntax:
|
||||
"""""""
|
||||
|
||||
::
|
||||
|
||||
declare <type>
|
||||
@llvm.experimental.constrained.log(<type> <op1>,
|
||||
metadata <rounding mode>,
|
||||
metadata <exception behavior>)
|
||||
|
||||
Overview:
|
||||
"""""""""
|
||||
|
||||
The '``llvm.experimental.constrained.log``' intrinsic computes the base-e
|
||||
logarithm of the specified value.
|
||||
|
||||
Arguments:
|
||||
""""""""""
|
||||
|
||||
The first argument and the return value are floating point numbers of the same
|
||||
type.
|
||||
|
||||
The second and third arguments specify the rounding mode and exception
|
||||
behavior as described above.
|
||||
|
||||
|
||||
Semantics:
|
||||
""""""""""
|
||||
|
||||
This function returns the same values as the libm ``log`` functions
|
||||
would, and handles error conditions in the same way.
|
||||
|
||||
|
||||
'``llvm.experimental.constrained.log10``' Intrinsic
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
Syntax:
|
||||
"""""""
|
||||
|
||||
::
|
||||
|
||||
declare <type>
|
||||
@llvm.experimental.constrained.log10(<type> <op1>,
|
||||
metadata <rounding mode>,
|
||||
metadata <exception behavior>)
|
||||
|
||||
Overview:
|
||||
"""""""""
|
||||
|
||||
The '``llvm.experimental.constrained.log10``' intrinsic computes the base-10
|
||||
logarithm of the specified value.
|
||||
|
||||
Arguments:
|
||||
""""""""""
|
||||
|
||||
The first argument and the return value are floating point numbers of the same
|
||||
type.
|
||||
|
||||
The second and third arguments specify the rounding mode and exception
|
||||
behavior as described above.
|
||||
|
||||
Semantics:
|
||||
""""""""""
|
||||
|
||||
This function returns the same values as the libm ``log10`` functions
|
||||
would, and handles error conditions in the same way.
|
||||
|
||||
|
||||
'``llvm.experimental.constrained.log2``' Intrinsic
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
Syntax:
|
||||
"""""""
|
||||
|
||||
::
|
||||
|
||||
declare <type>
|
||||
@llvm.experimental.constrained.log2(<type> <op1>,
|
||||
metadata <rounding mode>,
|
||||
metadata <exception behavior>)
|
||||
|
||||
Overview:
|
||||
"""""""""
|
||||
|
||||
The '``llvm.experimental.constrained.log2``' intrinsic computes the base-2
|
||||
logarithm of the specified value.
|
||||
|
||||
Arguments:
|
||||
""""""""""
|
||||
|
||||
The first argument and the return value are floating point numbers of the same
|
||||
type.
|
||||
|
||||
The second and third arguments specify the rounding mode and exception
|
||||
behavior as described above.
|
||||
|
||||
Semantics:
|
||||
""""""""""
|
||||
|
||||
This function returns the same values as the libm ``log2`` functions
|
||||
would, and handles error conditions in the same way.
|
||||
|
||||
|
||||
'``llvm.experimental.constrained.rint``' Intrinsic
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
Syntax:
|
||||
"""""""
|
||||
|
||||
::
|
||||
|
||||
declare <type>
|
||||
@llvm.experimental.constrained.rint(<type> <op1>,
|
||||
metadata <rounding mode>,
|
||||
metadata <exception behavior>)
|
||||
|
||||
Overview:
|
||||
"""""""""
|
||||
|
||||
The '``llvm.experimental.constrained.rint``' intrinsic returns the first
|
||||
operand rounded to the nearest integer. It may raise an inexact floating point
|
||||
exception if the operand is not an integer.
|
||||
|
||||
Arguments:
|
||||
""""""""""
|
||||
|
||||
The first argument and the return value are floating point numbers of the same
|
||||
type.
|
||||
|
||||
The second and third arguments specify the rounding mode and exception
|
||||
behavior as described above.
|
||||
|
||||
Semantics:
|
||||
""""""""""
|
||||
|
||||
This function returns the same values as the libm ``rint`` functions
|
||||
would, and handles error conditions in the same way. The rounding mode is
|
||||
described, not determined, by the rounding mode argument. The actual rounding
|
||||
mode is determined by the runtime floating point environment. The rounding
|
||||
mode argument is only intended as information to the compiler.
|
||||
|
||||
|
||||
'``llvm.experimental.constrained.nearbyint``' Intrinsic
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
Syntax:
|
||||
"""""""
|
||||
|
||||
::
|
||||
|
||||
declare <type>
|
||||
@llvm.experimental.constrained.nearbyint(<type> <op1>,
|
||||
metadata <rounding mode>,
|
||||
metadata <exception behavior>)
|
||||
|
||||
Overview:
|
||||
"""""""""
|
||||
|
||||
The '``llvm.experimental.constrained.nearbyint``' intrinsic returns the first
|
||||
operand rounded to the nearest integer. It will not raise an inexact floating
|
||||
point exception if the operand is not an integer.
|
||||
|
||||
|
||||
Arguments:
|
||||
""""""""""
|
||||
|
||||
The first argument and the return value are floating point numbers of the same
|
||||
type.
|
||||
|
||||
The second and third arguments specify the rounding mode and exception
|
||||
behavior as described above.
|
||||
|
||||
Semantics:
|
||||
""""""""""
|
||||
|
||||
This function returns the same values as the libm ``nearbyint`` functions
|
||||
would, and handles error conditions in the same way. The rounding mode is
|
||||
described, not determined, by the rounding mode argument. The actual rounding
|
||||
mode is determined by the runtime floating point environment. The rounding
|
||||
mode argument is only intended as information to the compiler.
|
||||
|
||||
|
||||
General Intrinsics
|
||||
------------------
|
||||
|
||||
|
@ -99,7 +99,9 @@ Optimization remarks are enabled using:
|
||||
indicates if vectorization was specified.
|
||||
|
||||
``-Rpass-analysis=loop-vectorize`` identifies the statements that caused
|
||||
vectorization to fail.
|
||||
vectorization to fail. If in addition ``-fsave-optimization-record`` is
|
||||
provided, multiple causes of vectorization failure may be listed (this behavior
|
||||
might change in the future).
|
||||
|
||||
Consider the following loop:
|
||||
|
||||
|
@ -90,6 +90,7 @@ representation.
|
||||
CodeOfConduct
|
||||
CompileCudaWithLLVM
|
||||
ReportingGuide
|
||||
Benchmarking
|
||||
|
||||
:doc:`GettingStarted`
|
||||
Discusses how to get up and running quickly with the LLVM infrastructure.
|
||||
|
@ -1,4 +1,4 @@
|
||||
//===----- KaleidoscopeJIT.h - A simple JIT for Kaleidoscope ----*- C++ -*-===//
|
||||
//===- KaleidoscopeJIT.h - A simple JIT for Kaleidoscope --------*- C++ -*-===//
|
||||
//
|
||||
// The LLVM Compiler Infrastructure
|
||||
//
|
||||
@ -17,7 +17,7 @@
|
||||
#include "llvm/ADT/STLExtras.h"
|
||||
#include "llvm/ExecutionEngine/ExecutionEngine.h"
|
||||
#include "llvm/ExecutionEngine/JITSymbol.h"
|
||||
#include "llvm/ExecutionEngine/RuntimeDyld.h"
|
||||
#include "llvm/ExecutionEngine/RTDyldMemoryManager.h"
|
||||
#include "llvm/ExecutionEngine/SectionMemoryManager.h"
|
||||
#include "llvm/ExecutionEngine/Orc/CompileUtils.h"
|
||||
#include "llvm/ExecutionEngine/Orc/IRCompileLayer.h"
|
||||
@ -44,7 +44,7 @@ class KaleidoscopeJIT {
|
||||
IRCompileLayer<decltype(ObjectLayer)> CompileLayer;
|
||||
|
||||
public:
|
||||
typedef decltype(CompileLayer)::ModuleSetHandleT ModuleHandle;
|
||||
using ModuleHandle = decltype(CompileLayer)::ModuleSetHandleT;
|
||||
|
||||
KaleidoscopeJIT()
|
||||
: TM(EngineBuilder().selectTarget()), DL(TM->createDataLayout()),
|
||||
|
@ -1,4 +1,4 @@
|
||||
//===----- KaleidoscopeJIT.h - A simple JIT for Kaleidoscope ----*- C++ -*-===//
|
||||
//===- KaleidoscopeJIT.h - A simple JIT for Kaleidoscope --------*- C++ -*-===//
|
||||
//
|
||||
// The LLVM Compiler Infrastructure
|
||||
//
|
||||
@ -17,7 +17,7 @@
|
||||
#include "llvm/ADT/STLExtras.h"
|
||||
#include "llvm/ExecutionEngine/ExecutionEngine.h"
|
||||
#include "llvm/ExecutionEngine/JITSymbol.h"
|
||||
#include "llvm/ExecutionEngine/RuntimeDyld.h"
|
||||
#include "llvm/ExecutionEngine/RTDyldMemoryManager.h"
|
||||
#include "llvm/ExecutionEngine/SectionMemoryManager.h"
|
||||
#include "llvm/ExecutionEngine/Orc/CompileUtils.h"
|
||||
#include "llvm/ExecutionEngine/Orc/IRCompileLayer.h"
|
||||
@ -47,13 +47,13 @@ class KaleidoscopeJIT {
|
||||
RTDyldObjectLinkingLayer<> ObjectLayer;
|
||||
IRCompileLayer<decltype(ObjectLayer)> CompileLayer;
|
||||
|
||||
typedef std::function<std::unique_ptr<Module>(std::unique_ptr<Module>)>
|
||||
OptimizeFunction;
|
||||
using OptimizeFunction =
|
||||
std::function<std::unique_ptr<Module>(std::unique_ptr<Module>)>;
|
||||
|
||||
IRTransformLayer<decltype(CompileLayer), OptimizeFunction> OptimizeLayer;
|
||||
|
||||
public:
|
||||
typedef decltype(OptimizeLayer)::ModuleSetHandleT ModuleHandle;
|
||||
using ModuleHandle = decltype(OptimizeLayer)::ModuleSetHandleT;
|
||||
|
||||
KaleidoscopeJIT()
|
||||
: TM(EngineBuilder().selectTarget()), DL(TM->createDataLayout()),
|
||||
|
@ -1,4 +1,4 @@
|
||||
//===----- KaleidoscopeJIT.h - A simple JIT for Kaleidoscope ----*- C++ -*-===//
|
||||
//===- KaleidoscopeJIT.h - A simple JIT for Kaleidoscope --------*- C++ -*-===//
|
||||
//
|
||||
// The LLVM Compiler Infrastructure
|
||||
//
|
||||
@ -17,6 +17,7 @@
|
||||
#include "llvm/ADT/STLExtras.h"
|
||||
#include "llvm/ExecutionEngine/ExecutionEngine.h"
|
||||
#include "llvm/ExecutionEngine/JITSymbol.h"
|
||||
#include "llvm/ExecutionEngine/RTDyldMemoryManager.h"
|
||||
#include "llvm/ExecutionEngine/RuntimeDyld.h"
|
||||
#include "llvm/ExecutionEngine/SectionMemoryManager.h"
|
||||
#include "llvm/ExecutionEngine/Orc/CompileOnDemandLayer.h"
|
||||
@ -49,8 +50,8 @@ class KaleidoscopeJIT {
|
||||
RTDyldObjectLinkingLayer<> ObjectLayer;
|
||||
IRCompileLayer<decltype(ObjectLayer)> CompileLayer;
|
||||
|
||||
typedef std::function<std::unique_ptr<Module>(std::unique_ptr<Module>)>
|
||||
OptimizeFunction;
|
||||
using OptimizeFunction =
|
||||
std::function<std::unique_ptr<Module>(std::unique_ptr<Module>)>;
|
||||
|
||||
IRTransformLayer<decltype(CompileLayer), OptimizeFunction> OptimizeLayer;
|
||||
|
||||
@ -58,7 +59,7 @@ class KaleidoscopeJIT {
|
||||
CompileOnDemandLayer<decltype(OptimizeLayer)> CODLayer;
|
||||
|
||||
public:
|
||||
typedef decltype(CODLayer)::ModuleSetHandleT ModuleHandle;
|
||||
using ModuleHandle = decltype(CODLayer)::ModuleSetHandleT;
|
||||
|
||||
KaleidoscopeJIT()
|
||||
: TM(EngineBuilder().selectTarget()), DL(TM->createDataLayout()),
|
||||
|
@ -1,4 +1,4 @@
|
||||
//===----- KaleidoscopeJIT.h - A simple JIT for Kaleidoscope ----*- C++ -*-===//
|
||||
//===- KaleidoscopeJIT.h - A simple JIT for Kaleidoscope --------*- C++ -*-===//
|
||||
//
|
||||
// The LLVM Compiler Infrastructure
|
||||
//
|
||||
@ -17,10 +17,10 @@
|
||||
#include "llvm/ADT/STLExtras.h"
|
||||
#include "llvm/ExecutionEngine/ExecutionEngine.h"
|
||||
#include "llvm/ExecutionEngine/JITSymbol.h"
|
||||
#include "llvm/ExecutionEngine/RuntimeDyld.h"
|
||||
#include "llvm/ExecutionEngine/RTDyldMemoryManager.h"
|
||||
#include "llvm/ExecutionEngine/SectionMemoryManager.h"
|
||||
#include "llvm/ExecutionEngine/Orc/CompileOnDemandLayer.h"
|
||||
#include "llvm/ExecutionEngine/Orc/CompileUtils.h"
|
||||
#include "llvm/ExecutionEngine/Orc/IndirectionUtils.h"
|
||||
#include "llvm/ExecutionEngine/Orc/IRCompileLayer.h"
|
||||
#include "llvm/ExecutionEngine/Orc/IRTransformLayer.h"
|
||||
#include "llvm/ExecutionEngine/Orc/LambdaResolver.h"
|
||||
@ -76,8 +76,8 @@ class KaleidoscopeJIT {
|
||||
RTDyldObjectLinkingLayer<> ObjectLayer;
|
||||
IRCompileLayer<decltype(ObjectLayer)> CompileLayer;
|
||||
|
||||
typedef std::function<std::unique_ptr<Module>(std::unique_ptr<Module>)>
|
||||
OptimizeFunction;
|
||||
using OptimizeFunction =
|
||||
std::function<std::unique_ptr<Module>(std::unique_ptr<Module>)>;
|
||||
|
||||
IRTransformLayer<decltype(CompileLayer), OptimizeFunction> OptimizeLayer;
|
||||
|
||||
@ -85,7 +85,7 @@ class KaleidoscopeJIT {
|
||||
std::unique_ptr<IndirectStubsManager> IndirectStubsMgr;
|
||||
|
||||
public:
|
||||
typedef decltype(OptimizeLayer)::ModuleSetHandleT ModuleHandle;
|
||||
using ModuleHandle = decltype(OptimizeLayer)::ModuleSetHandleT;
|
||||
|
||||
KaleidoscopeJIT()
|
||||
: TM(EngineBuilder().selectTarget()),
|
||||
@ -106,7 +106,6 @@ class KaleidoscopeJIT {
|
||||
TargetMachine &getTargetMachine() { return *TM; }
|
||||
|
||||
ModuleHandle addModule(std::unique_ptr<Module> M) {
|
||||
|
||||
// Build our symbol resolver:
|
||||
// Lambda 1: Look back into the JIT itself to find symbols that are part of
|
||||
// the same "logical dylib".
|
||||
|
@ -1,4 +1,4 @@
|
||||
//===----- KaleidoscopeJIT.h - A simple JIT for Kaleidoscope ----*- C++ -*-===//
|
||||
//===- KaleidoscopeJIT.h - A simple JIT for Kaleidoscope --------*- C++ -*-===//
|
||||
//
|
||||
// The LLVM Compiler Infrastructure
|
||||
//
|
||||
@ -20,9 +20,8 @@
|
||||
#include "llvm/ADT/Triple.h"
|
||||
#include "llvm/ExecutionEngine/ExecutionEngine.h"
|
||||
#include "llvm/ExecutionEngine/JITSymbol.h"
|
||||
#include "llvm/ExecutionEngine/RuntimeDyld.h"
|
||||
#include "llvm/ExecutionEngine/Orc/CompileOnDemandLayer.h"
|
||||
#include "llvm/ExecutionEngine/Orc/CompileUtils.h"
|
||||
#include "llvm/ExecutionEngine/Orc/IndirectionUtils.h"
|
||||
#include "llvm/ExecutionEngine/Orc/IRCompileLayer.h"
|
||||
#include "llvm/ExecutionEngine/Orc/IRTransformLayer.h"
|
||||
#include "llvm/ExecutionEngine/Orc/LambdaResolver.h"
|
||||
@ -73,7 +72,7 @@ namespace llvm {
|
||||
namespace orc {
|
||||
|
||||
// Typedef the remote-client API.
|
||||
typedef remote::OrcRemoteTargetClient<FDRPCChannel> MyRemote;
|
||||
using MyRemote = remote::OrcRemoteTargetClient<FDRPCChannel>;
|
||||
|
||||
class KaleidoscopeJIT {
|
||||
private:
|
||||
@ -82,8 +81,8 @@ class KaleidoscopeJIT {
|
||||
RTDyldObjectLinkingLayer<> ObjectLayer;
|
||||
IRCompileLayer<decltype(ObjectLayer)> CompileLayer;
|
||||
|
||||
typedef std::function<std::unique_ptr<Module>(std::unique_ptr<Module>)>
|
||||
OptimizeFunction;
|
||||
using OptimizeFunction =
|
||||
std::function<std::unique_ptr<Module>(std::unique_ptr<Module>)>;
|
||||
|
||||
IRTransformLayer<decltype(CompileLayer), OptimizeFunction> OptimizeLayer;
|
||||
|
||||
@ -92,7 +91,7 @@ class KaleidoscopeJIT {
|
||||
MyRemote &Remote;
|
||||
|
||||
public:
|
||||
typedef decltype(OptimizeLayer)::ModuleSetHandleT ModuleHandle;
|
||||
using ModuleHandle = decltype(OptimizeLayer)::ModuleSetHandleT;
|
||||
|
||||
KaleidoscopeJIT(MyRemote &Remote)
|
||||
: TM(EngineBuilder().selectTarget(Triple(Remote.getTargetTriple()), "",
|
||||
@ -124,7 +123,6 @@ class KaleidoscopeJIT {
|
||||
TargetMachine &getTargetMachine() { return *TM; }
|
||||
|
||||
ModuleHandle addModule(std::unique_ptr<Module> M) {
|
||||
|
||||
// Build our symbol resolver:
|
||||
// Lambda 1: Look back into the JIT itself to find symbols that are part of
|
||||
// the same "logical dylib".
|
||||
|
@ -1,17 +1,19 @@
|
||||
#include "llvm/Support/CommandLine.h"
|
||||
#include "llvm/Support/DynamicLibrary.h"
|
||||
#include "llvm/Support/TargetSelect.h"
|
||||
#include "../RemoteJITUtils.h"
|
||||
#include "llvm/ExecutionEngine/RTDyldMemoryManager.h"
|
||||
#include "llvm/ExecutionEngine/Orc/OrcRemoteTargetServer.h"
|
||||
#include "llvm/ExecutionEngine/Orc/OrcABISupport.h"
|
||||
|
||||
#include "../RemoteJITUtils.h"
|
||||
|
||||
#include "llvm/Support/CommandLine.h"
|
||||
#include "llvm/Support/DynamicLibrary.h"
|
||||
#include "llvm/Support/Error.h"
|
||||
#include "llvm/Support/raw_ostream.h"
|
||||
#include "llvm/Support/TargetSelect.h"
|
||||
#include <cstdint>
|
||||
#include <cstdio>
|
||||
#include <cstring>
|
||||
#include <unistd.h>
|
||||
#include <string>
|
||||
#include <netinet/in.h>
|
||||
#include <sys/socket.h>
|
||||
|
||||
|
||||
using namespace llvm;
|
||||
using namespace llvm::orc;
|
||||
|
||||
@ -22,7 +24,7 @@ cl::opt<uint32_t> Port("port",
|
||||
|
||||
ExitOnError ExitOnErr;
|
||||
|
||||
typedef int (*MainFun)(int, const char*[]);
|
||||
using MainFun = int (*)(int, const char*[]);
|
||||
|
||||
template <typename NativePtrT>
|
||||
NativePtrT MakeNative(uint64_t P) {
|
||||
@ -36,7 +38,6 @@ void printExprResult(double Val) {
|
||||
|
||||
// --- LAZY COMPILE TEST ---
|
||||
int main(int argc, char* argv[]) {
|
||||
|
||||
if (argc == 0)
|
||||
ExitOnErr.setBanner("jit_server: ");
|
||||
else
|
||||
@ -59,14 +60,14 @@ int main(int argc, char* argv[]) {
|
||||
int sockfd = socket(PF_INET, SOCK_STREAM, 0);
|
||||
sockaddr_in servAddr, clientAddr;
|
||||
socklen_t clientAddrLen = sizeof(clientAddr);
|
||||
bzero(&servAddr, sizeof(servAddr));
|
||||
memset(&servAddr, 0, sizeof(servAddr));
|
||||
servAddr.sin_family = PF_INET;
|
||||
servAddr.sin_family = INADDR_ANY;
|
||||
servAddr.sin_port = htons(Port);
|
||||
|
||||
{
|
||||
// avoid "Address already in use" error.
|
||||
int yes=1;
|
||||
int yes = 1;
|
||||
if (setsockopt(sockfd,SOL_SOCKET,SO_REUSEADDR,&yes,sizeof(int)) == -1) {
|
||||
errs() << "Error calling setsockopt.\n";
|
||||
return 1;
|
||||
@ -98,7 +99,8 @@ int main(int argc, char* argv[]) {
|
||||
};
|
||||
|
||||
FDRPCChannel TCPChannel(newsockfd, newsockfd);
|
||||
typedef remote::OrcRemoteTargetServer<FDRPCChannel, OrcX86_64_SysV> MyServerT;
|
||||
|
||||
using MyServerT = remote::OrcRemoteTargetServer<FDRPCChannel, OrcX86_64_SysV>;
|
||||
|
||||
MyServerT Server(TCPChannel, SymbolLookup, RegisterEHFrames, DeregisterEHFrames);
|
||||
|
||||
|
@ -1,4 +1,4 @@
|
||||
//===----- KaleidoscopeJIT.h - A simple JIT for Kaleidoscope ----*- C++ -*-===//
|
||||
//===- KaleidoscopeJIT.h - A simple JIT for Kaleidoscope --------*- C++ -*-===//
|
||||
//
|
||||
// The LLVM Compiler Infrastructure
|
||||
//
|
||||
@ -19,7 +19,6 @@
|
||||
#include "llvm/ExecutionEngine/ExecutionEngine.h"
|
||||
#include "llvm/ExecutionEngine/JITSymbol.h"
|
||||
#include "llvm/ExecutionEngine/RTDyldMemoryManager.h"
|
||||
#include "llvm/ExecutionEngine/RuntimeDyld.h"
|
||||
#include "llvm/ExecutionEngine/SectionMemoryManager.h"
|
||||
#include "llvm/ExecutionEngine/Orc/CompileUtils.h"
|
||||
#include "llvm/ExecutionEngine/Orc/IRCompileLayer.h"
|
||||
@ -40,9 +39,9 @@ namespace orc {
|
||||
|
||||
class KaleidoscopeJIT {
|
||||
public:
|
||||
typedef RTDyldObjectLinkingLayer<> ObjLayerT;
|
||||
typedef IRCompileLayer<ObjLayerT> CompileLayerT;
|
||||
typedef CompileLayerT::ModuleSetHandleT ModuleHandleT;
|
||||
using ObjLayerT = RTDyldObjectLinkingLayer<>;
|
||||
using CompileLayerT = IRCompileLayer<ObjLayerT>;
|
||||
using ModuleHandleT = CompileLayerT::ModuleSetHandleT;
|
||||
|
||||
KaleidoscopeJIT()
|
||||
: TM(EngineBuilder().selectTarget()), DL(TM->createDataLayout()),
|
||||
|
@ -59,6 +59,7 @@ class Triple {
|
||||
mips64, // MIPS64: mips64
|
||||
mips64el, // MIPS64EL: mips64el
|
||||
msp430, // MSP430: msp430
|
||||
nios2, // NIOSII: nios2
|
||||
ppc, // PPC: powerpc
|
||||
ppc64, // PPC64: powerpc64, ppu
|
||||
ppc64le, // PPC64LE: powerpc64le
|
||||
|
@ -70,174 +70,173 @@ struct SimplifyQuery {
|
||||
Copy.CxtI = I;
|
||||
return Copy;
|
||||
}
|
||||
};
|
||||
};
|
||||
|
||||
// NOTE: the explicit multiple argument versions of these functions are
|
||||
// deprecated.
|
||||
// Please use the SimplifyQuery versions in new code.
|
||||
// NOTE: the explicit multiple argument versions of these functions are
|
||||
// deprecated.
|
||||
// Please use the SimplifyQuery versions in new code.
|
||||
|
||||
/// Given operands for an Add, fold the result or return null.
|
||||
Value *SimplifyAddInst(Value *LHS, Value *RHS, bool isNSW, bool isNUW,
|
||||
/// Given operands for an Add, fold the result or return null.
|
||||
Value *SimplifyAddInst(Value *LHS, Value *RHS, bool isNSW, bool isNUW,
|
||||
const SimplifyQuery &Q);
|
||||
|
||||
/// Given operands for a Sub, fold the result or return null.
|
||||
Value *SimplifySubInst(Value *LHS, Value *RHS, bool isNSW, bool isNUW,
|
||||
const SimplifyQuery &Q);
|
||||
/// Given operands for a Sub, fold the result or return null.
|
||||
Value *SimplifySubInst(Value *LHS, Value *RHS, bool isNSW, bool isNUW,
|
||||
const SimplifyQuery &Q);
|
||||
|
||||
/// Given operands for an FAdd, fold the result or return null.
|
||||
Value *SimplifyFAddInst(Value *LHS, Value *RHS, FastMathFlags FMF,
|
||||
/// Given operands for an FAdd, fold the result or return null.
|
||||
Value *SimplifyFAddInst(Value *LHS, Value *RHS, FastMathFlags FMF,
|
||||
const SimplifyQuery &Q);
|
||||
|
||||
/// Given operands for an FSub, fold the result or return null.
|
||||
Value *SimplifyFSubInst(Value *LHS, Value *RHS, FastMathFlags FMF,
|
||||
const SimplifyQuery &Q);
|
||||
|
||||
/// Given operands for an FMul, fold the result or return null.
|
||||
Value *SimplifyFMulInst(Value *LHS, Value *RHS, FastMathFlags FMF,
|
||||
const SimplifyQuery &Q);
|
||||
|
||||
/// Given operands for a Mul, fold the result or return null.
|
||||
Value *SimplifyMulInst(Value *LHS, Value *RHS, const SimplifyQuery &Q);
|
||||
|
||||
/// Given operands for an SDiv, fold the result or return null.
|
||||
Value *SimplifySDivInst(Value *LHS, Value *RHS, const SimplifyQuery &Q);
|
||||
|
||||
/// Given operands for a UDiv, fold the result or return null.
|
||||
Value *SimplifyUDivInst(Value *LHS, Value *RHS, const SimplifyQuery &Q);
|
||||
|
||||
/// Given operands for an FDiv, fold the result or return null.
|
||||
Value *SimplifyFDivInst(Value *LHS, Value *RHS, FastMathFlags FMF,
|
||||
const SimplifyQuery &Q);
|
||||
|
||||
/// Given operands for an SRem, fold the result or return null.
|
||||
Value *SimplifySRemInst(Value *LHS, Value *RHS, const SimplifyQuery &Q);
|
||||
|
||||
/// Given operands for a URem, fold the result or return null.
|
||||
Value *SimplifyURemInst(Value *LHS, Value *RHS, const SimplifyQuery &Q);
|
||||
|
||||
/// Given operands for an FRem, fold the result or return null.
|
||||
Value *SimplifyFRemInst(Value *LHS, Value *RHS, FastMathFlags FMF,
|
||||
const SimplifyQuery &Q);
|
||||
|
||||
/// Given operands for a Shl, fold the result or return null.
|
||||
Value *SimplifyShlInst(Value *Op0, Value *Op1, bool isNSW, bool isNUW,
|
||||
const SimplifyQuery &Q);
|
||||
|
||||
/// Given operands for a LShr, fold the result or return null.
|
||||
Value *SimplifyLShrInst(Value *Op0, Value *Op1, bool isExact,
|
||||
const SimplifyQuery &Q);
|
||||
|
||||
/// Given operands for a AShr, fold the result or return nulll.
|
||||
Value *SimplifyAShrInst(Value *Op0, Value *Op1, bool isExact,
|
||||
const SimplifyQuery &Q);
|
||||
|
||||
/// Given operands for an And, fold the result or return null.
|
||||
Value *SimplifyAndInst(Value *LHS, Value *RHS, const SimplifyQuery &Q);
|
||||
|
||||
/// Given operands for an Or, fold the result or return null.
|
||||
Value *SimplifyOrInst(Value *LHS, Value *RHS, const SimplifyQuery &Q);
|
||||
|
||||
/// Given operands for an Xor, fold the result or return null.
|
||||
Value *SimplifyXorInst(Value *LHS, Value *RHS, const SimplifyQuery &Q);
|
||||
|
||||
/// Given operands for an ICmpInst, fold the result or return null.
|
||||
Value *SimplifyICmpInst(unsigned Predicate, Value *LHS, Value *RHS,
|
||||
const SimplifyQuery &Q);
|
||||
|
||||
/// Given operands for an FCmpInst, fold the result or return null.
|
||||
Value *SimplifyFCmpInst(unsigned Predicate, Value *LHS, Value *RHS,
|
||||
FastMathFlags FMF, const SimplifyQuery &Q);
|
||||
|
||||
/// Given operands for a SelectInst, fold the result or return null.
|
||||
Value *SimplifySelectInst(Value *Cond, Value *TrueVal, Value *FalseVal,
|
||||
const SimplifyQuery &Q);
|
||||
|
||||
/// Given operands for an FSub, fold the result or return null.
|
||||
Value *SimplifyFSubInst(Value *LHS, Value *RHS, FastMathFlags FMF,
|
||||
const SimplifyQuery &Q);
|
||||
/// Given operands for a GetElementPtrInst, fold the result or return null.
|
||||
Value *SimplifyGEPInst(Type *SrcTy, ArrayRef<Value *> Ops,
|
||||
const SimplifyQuery &Q);
|
||||
|
||||
/// Given operands for an FMul, fold the result or return null.
|
||||
Value *SimplifyFMulInst(Value *LHS, Value *RHS, FastMathFlags FMF,
|
||||
const SimplifyQuery &Q);
|
||||
/// Given operands for an InsertValueInst, fold the result or return null.
|
||||
Value *SimplifyInsertValueInst(Value *Agg, Value *Val, ArrayRef<unsigned> Idxs,
|
||||
const SimplifyQuery &Q);
|
||||
|
||||
/// Given operands for a Mul, fold the result or return null.
|
||||
Value *SimplifyMulInst(Value *LHS, Value *RHS, const SimplifyQuery &Q);
|
||||
/// Given operands for an ExtractValueInst, fold the result or return null.
|
||||
Value *SimplifyExtractValueInst(Value *Agg, ArrayRef<unsigned> Idxs,
|
||||
const SimplifyQuery &Q);
|
||||
|
||||
/// Given operands for an SDiv, fold the result or return null.
|
||||
Value *SimplifySDivInst(Value *LHS, Value *RHS, const SimplifyQuery &Q);
|
||||
|
||||
/// Given operands for a UDiv, fold the result or return null.
|
||||
Value *SimplifyUDivInst(Value *LHS, Value *RHS, const SimplifyQuery &Q);
|
||||
|
||||
/// Given operands for an FDiv, fold the result or return null.
|
||||
Value *SimplifyFDivInst(Value *LHS, Value *RHS, FastMathFlags FMF,
|
||||
const SimplifyQuery &Q);
|
||||
|
||||
/// Given operands for an SRem, fold the result or return null.
|
||||
Value *SimplifySRemInst(Value *LHS, Value *RHS, const SimplifyQuery &Q);
|
||||
|
||||
/// Given operands for a URem, fold the result or return null.
|
||||
Value *SimplifyURemInst(Value *LHS, Value *RHS, const SimplifyQuery &Q);
|
||||
|
||||
/// Given operands for an FRem, fold the result or return null.
|
||||
Value *SimplifyFRemInst(Value *LHS, Value *RHS, FastMathFlags FMF,
|
||||
const SimplifyQuery &Q);
|
||||
|
||||
/// Given operands for a Shl, fold the result or return null.
|
||||
Value *SimplifyShlInst(Value *Op0, Value *Op1, bool isNSW, bool isNUW,
|
||||
const SimplifyQuery &Q);
|
||||
|
||||
/// Given operands for a LShr, fold the result or return null.
|
||||
Value *SimplifyLShrInst(Value *Op0, Value *Op1, bool isExact,
|
||||
const SimplifyQuery &Q);
|
||||
|
||||
/// Given operands for a AShr, fold the result or return nulll.
|
||||
Value *SimplifyAShrInst(Value *Op0, Value *Op1, bool isExact,
|
||||
const SimplifyQuery &Q);
|
||||
|
||||
/// Given operands for an And, fold the result or return null.
|
||||
Value *SimplifyAndInst(Value *LHS, Value *RHS, const SimplifyQuery &Q);
|
||||
|
||||
/// Given operands for an Or, fold the result or return null.
|
||||
Value *SimplifyOrInst(Value *LHS, Value *RHS, const SimplifyQuery &Q);
|
||||
|
||||
/// Given operands for an Xor, fold the result or return null.
|
||||
Value *SimplifyXorInst(Value *LHS, Value *RHS, const SimplifyQuery &Q);
|
||||
|
||||
/// Given operands for an ICmpInst, fold the result or return null.
|
||||
Value *SimplifyICmpInst(unsigned Predicate, Value *LHS, Value *RHS,
|
||||
const SimplifyQuery &Q);
|
||||
|
||||
/// Given operands for an FCmpInst, fold the result or return null.
|
||||
Value *SimplifyFCmpInst(unsigned Predicate, Value *LHS, Value *RHS,
|
||||
FastMathFlags FMF, const SimplifyQuery &Q);
|
||||
|
||||
/// Given operands for a SelectInst, fold the result or return null.
|
||||
Value *SimplifySelectInst(Value *Cond, Value *TrueVal, Value *FalseVal,
|
||||
const SimplifyQuery &Q);
|
||||
|
||||
/// Given operands for a GetElementPtrInst, fold the result or return null.
|
||||
Value *SimplifyGEPInst(Type *SrcTy, ArrayRef<Value *> Ops,
|
||||
const SimplifyQuery &Q);
|
||||
|
||||
/// Given operands for an InsertValueInst, fold the result or return null.
|
||||
Value *SimplifyInsertValueInst(Value *Agg, Value *Val,
|
||||
ArrayRef<unsigned> Idxs,
|
||||
const SimplifyQuery &Q);
|
||||
|
||||
/// Given operands for an ExtractValueInst, fold the result or return null.
|
||||
Value *SimplifyExtractValueInst(Value *Agg, ArrayRef<unsigned> Idxs,
|
||||
/// Given operands for an ExtractElementInst, fold the result or return null.
|
||||
Value *SimplifyExtractElementInst(Value *Vec, Value *Idx,
|
||||
const SimplifyQuery &Q);
|
||||
|
||||
/// Given operands for an ExtractElementInst, fold the result or return null.
|
||||
Value *SimplifyExtractElementInst(Value *Vec, Value *Idx,
|
||||
const SimplifyQuery &Q);
|
||||
/// Given operands for a CastInst, fold the result or return null.
|
||||
Value *SimplifyCastInst(unsigned CastOpc, Value *Op, Type *Ty,
|
||||
const SimplifyQuery &Q);
|
||||
|
||||
/// Given operands for a CastInst, fold the result or return null.
|
||||
Value *SimplifyCastInst(unsigned CastOpc, Value *Op, Type *Ty,
|
||||
const SimplifyQuery &Q);
|
||||
/// Given operands for a ShuffleVectorInst, fold the result or return null.
|
||||
Value *SimplifyShuffleVectorInst(Value *Op0, Value *Op1, Constant *Mask,
|
||||
Type *RetTy, const SimplifyQuery &Q);
|
||||
|
||||
/// Given operands for a ShuffleVectorInst, fold the result or return null.
|
||||
Value *SimplifyShuffleVectorInst(Value *Op0, Value *Op1, Constant *Mask,
|
||||
Type *RetTy, const SimplifyQuery &Q);
|
||||
//=== Helper functions for higher up the class hierarchy.
|
||||
|
||||
//=== Helper functions for higher up the class hierarchy.
|
||||
|
||||
|
||||
/// Given operands for a CmpInst, fold the result or return null.
|
||||
Value *SimplifyCmpInst(unsigned Predicate, Value *LHS, Value *RHS,
|
||||
const SimplifyQuery &Q);
|
||||
|
||||
/// Given operands for a BinaryOperator, fold the result or return null.
|
||||
Value *SimplifyBinOp(unsigned Opcode, Value *LHS, Value *RHS,
|
||||
/// Given operands for a CmpInst, fold the result or return null.
|
||||
Value *SimplifyCmpInst(unsigned Predicate, Value *LHS, Value *RHS,
|
||||
const SimplifyQuery &Q);
|
||||
|
||||
/// Given operands for an FP BinaryOperator, fold the result or return null.
|
||||
/// In contrast to SimplifyBinOp, try to use FastMathFlag when folding the
|
||||
/// result. In case we don't need FastMathFlags, simply fall to SimplifyBinOp.
|
||||
Value *SimplifyFPBinOp(unsigned Opcode, Value *LHS, Value *RHS,
|
||||
FastMathFlags FMF, const SimplifyQuery &Q);
|
||||
/// Given operands for a BinaryOperator, fold the result or return null.
|
||||
Value *SimplifyBinOp(unsigned Opcode, Value *LHS, Value *RHS,
|
||||
const SimplifyQuery &Q);
|
||||
|
||||
/// Given a function and iterators over arguments, fold the result or return
|
||||
/// null.
|
||||
Value *SimplifyCall(Value *V, User::op_iterator ArgBegin,
|
||||
User::op_iterator ArgEnd, const SimplifyQuery &Q);
|
||||
/// Given operands for an FP BinaryOperator, fold the result or return null.
|
||||
/// In contrast to SimplifyBinOp, try to use FastMathFlag when folding the
|
||||
/// result. In case we don't need FastMathFlags, simply fall to SimplifyBinOp.
|
||||
Value *SimplifyFPBinOp(unsigned Opcode, Value *LHS, Value *RHS,
|
||||
FastMathFlags FMF, const SimplifyQuery &Q);
|
||||
|
||||
/// Given a function and set of arguments, fold the result or return null.
|
||||
Value *SimplifyCall(Value *V, ArrayRef<Value *> Args, const SimplifyQuery &Q);
|
||||
/// Given a function and iterators over arguments, fold the result or return
|
||||
/// null.
|
||||
Value *SimplifyCall(Value *V, User::op_iterator ArgBegin,
|
||||
User::op_iterator ArgEnd, const SimplifyQuery &Q);
|
||||
|
||||
/// See if we can compute a simplified version of this instruction. If not,
|
||||
/// return null.
|
||||
Value *SimplifyInstruction(Instruction *I, const SimplifyQuery &Q,
|
||||
OptimizationRemarkEmitter *ORE = nullptr);
|
||||
/// Given a function and set of arguments, fold the result or return null.
|
||||
Value *SimplifyCall(Value *V, ArrayRef<Value *> Args, const SimplifyQuery &Q);
|
||||
|
||||
/// Replace all uses of 'I' with 'SimpleV' and simplify the uses recursively.
|
||||
///
|
||||
/// This first performs a normal RAUW of I with SimpleV. It then recursively
|
||||
/// attempts to simplify those users updated by the operation. The 'I'
|
||||
/// instruction must not be equal to the simplified value 'SimpleV'.
|
||||
///
|
||||
/// The function returns true if any simplifications were performed.
|
||||
bool replaceAndRecursivelySimplify(Instruction *I, Value *SimpleV,
|
||||
const TargetLibraryInfo *TLI = nullptr,
|
||||
const DominatorTree *DT = nullptr,
|
||||
AssumptionCache *AC = nullptr);
|
||||
/// See if we can compute a simplified version of this instruction. If not,
|
||||
/// return null.
|
||||
Value *SimplifyInstruction(Instruction *I, const SimplifyQuery &Q,
|
||||
OptimizationRemarkEmitter *ORE = nullptr);
|
||||
|
||||
/// Recursively attempt to simplify an instruction.
|
||||
///
|
||||
/// This routine uses SimplifyInstruction to simplify 'I', and if successful
|
||||
/// replaces uses of 'I' with the simplified value. It then recurses on each
|
||||
/// of the users impacted. It returns true if any simplifications were
|
||||
/// performed.
|
||||
bool recursivelySimplifyInstruction(Instruction *I,
|
||||
const TargetLibraryInfo *TLI = nullptr,
|
||||
const DominatorTree *DT = nullptr,
|
||||
AssumptionCache *AC = nullptr);
|
||||
// These helper functions return a SimplifyQuery structure that contains as
|
||||
// many of the optional analysis we use as are currently valid. This is the
|
||||
// strongly preferred way of constructing SimplifyQuery in passes.
|
||||
const SimplifyQuery getBestSimplifyQuery(Pass &, Function &);
|
||||
template <class T, class... TArgs>
|
||||
const SimplifyQuery getBestSimplifyQuery(AnalysisManager<T, TArgs...> &,
|
||||
Function &);
|
||||
const SimplifyQuery getBestSimplifyQuery(LoopStandardAnalysisResults &,
|
||||
const DataLayout &);
|
||||
/// Replace all uses of 'I' with 'SimpleV' and simplify the uses recursively.
|
||||
///
|
||||
/// This first performs a normal RAUW of I with SimpleV. It then recursively
|
||||
/// attempts to simplify those users updated by the operation. The 'I'
|
||||
/// instruction must not be equal to the simplified value 'SimpleV'.
|
||||
///
|
||||
/// The function returns true if any simplifications were performed.
|
||||
bool replaceAndRecursivelySimplify(Instruction *I, Value *SimpleV,
|
||||
const TargetLibraryInfo *TLI = nullptr,
|
||||
const DominatorTree *DT = nullptr,
|
||||
AssumptionCache *AC = nullptr);
|
||||
|
||||
/// Recursively attempt to simplify an instruction.
|
||||
///
|
||||
/// This routine uses SimplifyInstruction to simplify 'I', and if successful
|
||||
/// replaces uses of 'I' with the simplified value. It then recurses on each
|
||||
/// of the users impacted. It returns true if any simplifications were
|
||||
/// performed.
|
||||
bool recursivelySimplifyInstruction(Instruction *I,
|
||||
const TargetLibraryInfo *TLI = nullptr,
|
||||
const DominatorTree *DT = nullptr,
|
||||
AssumptionCache *AC = nullptr);
|
||||
|
||||
// These helper functions return a SimplifyQuery structure that contains as
|
||||
// many of the optional analysis we use as are currently valid. This is the
|
||||
// strongly preferred way of constructing SimplifyQuery in passes.
|
||||
const SimplifyQuery getBestSimplifyQuery(Pass &, Function &);
|
||||
template <class T, class... TArgs>
|
||||
const SimplifyQuery getBestSimplifyQuery(AnalysisManager<T, TArgs...> &,
|
||||
Function &);
|
||||
const SimplifyQuery getBestSimplifyQuery(LoopStandardAnalysisResults &,
|
||||
const DataLayout &);
|
||||
} // end namespace llvm
|
||||
|
||||
#endif
|
||||
|
@ -126,9 +126,8 @@ class LPPassManager : public FunctionPass, public PMDataManager {
|
||||
}
|
||||
|
||||
public:
|
||||
// Add a new loop into the loop queue as a child of the given parent, or at
|
||||
// the top level if \c ParentLoop is null.
|
||||
Loop &addLoop(Loop *ParentLoop);
|
||||
// Add a new loop into the loop queue.
|
||||
void addLoop(Loop &L);
|
||||
|
||||
//===--------------------------------------------------------------------===//
|
||||
/// SimpleAnalysis - Provides simple interface to update analysis info
|
||||
|
@ -1533,6 +1533,12 @@ class ScalarEvolution {
|
||||
/// specified loop.
|
||||
bool isLoopInvariant(const SCEV *S, const Loop *L);
|
||||
|
||||
/// Determine if the SCEV can be evaluated at loop's entry. It is true if it
|
||||
/// doesn't depend on a SCEVUnknown of an instruction which is dominated by
|
||||
/// the header of loop L.
|
||||
bool isAvailableAtLoopEntry(const SCEV *S, const Loop *L, DominatorTree &DT,
|
||||
LoopInfo &LI);
|
||||
|
||||
/// Return true if the given SCEV changes value in a known way in the
|
||||
/// specified loop. This property being true implies that the value is
|
||||
/// variant in the loop AND that we can emit an expression to compute the
|
||||
|
@ -396,6 +396,9 @@ class TargetTransformInfo {
|
||||
bool isLegalMaskedScatter(Type *DataType) const;
|
||||
bool isLegalMaskedGather(Type *DataType) const;
|
||||
|
||||
/// Return true if target doesn't mind addresses in vectors.
|
||||
bool prefersVectorizedAddressing() const;
|
||||
|
||||
/// \brief Return the cost of the scaling factor used in the addressing
|
||||
/// mode represented by AM for this target, for a load/store
|
||||
/// of the specified type.
|
||||
@ -807,6 +810,7 @@ class TargetTransformInfo::Concept {
|
||||
virtual bool isLegalMaskedLoad(Type *DataType) = 0;
|
||||
virtual bool isLegalMaskedScatter(Type *DataType) = 0;
|
||||
virtual bool isLegalMaskedGather(Type *DataType) = 0;
|
||||
virtual bool prefersVectorizedAddressing() = 0;
|
||||
virtual int getScalingFactorCost(Type *Ty, GlobalValue *BaseGV,
|
||||
int64_t BaseOffset, bool HasBaseReg,
|
||||
int64_t Scale, unsigned AddrSpace) = 0;
|
||||
@ -1000,6 +1004,9 @@ class TargetTransformInfo::Model final : public TargetTransformInfo::Concept {
|
||||
bool isLegalMaskedGather(Type *DataType) override {
|
||||
return Impl.isLegalMaskedGather(DataType);
|
||||
}
|
||||
bool prefersVectorizedAddressing() override {
|
||||
return Impl.prefersVectorizedAddressing();
|
||||
}
|
||||
int getScalingFactorCost(Type *Ty, GlobalValue *BaseGV, int64_t BaseOffset,
|
||||
bool HasBaseReg, int64_t Scale,
|
||||
unsigned AddrSpace) override {
|
||||
|
@ -237,6 +237,8 @@ class TargetTransformInfoImplBase {
|
||||
|
||||
bool isLegalMaskedGather(Type *DataType) { return false; }
|
||||
|
||||
bool prefersVectorizedAddressing() { return true; }
|
||||
|
||||
int getScalingFactorCost(Type *Ty, GlobalValue *BaseGV, int64_t BaseOffset,
|
||||
bool HasBaseReg, int64_t Scale, unsigned AddrSpace) {
|
||||
// Guess that all legal addressing mode are free.
|
||||
|
@ -60,7 +60,8 @@ template <typename T> class ArrayRef;
|
||||
KnownBits computeKnownBits(const Value *V, const DataLayout &DL,
|
||||
unsigned Depth = 0, AssumptionCache *AC = nullptr,
|
||||
const Instruction *CxtI = nullptr,
|
||||
const DominatorTree *DT = nullptr);
|
||||
const DominatorTree *DT = nullptr,
|
||||
OptimizationRemarkEmitter *ORE = nullptr);
|
||||
/// Compute known bits from the range metadata.
|
||||
/// \p KnownZero the set of bits that are known to be zero
|
||||
/// \p KnownOne the set of bits that are known to be one
|
||||
|
@ -34,6 +34,7 @@
|
||||
namespace llvm {
|
||||
|
||||
class AsmPrinterHandler;
|
||||
class BasicBlock;
|
||||
class BlockAddress;
|
||||
class Constant;
|
||||
class ConstantArray;
|
||||
@ -43,6 +44,7 @@ class DIEAbbrev;
|
||||
class DwarfDebug;
|
||||
class GCMetadataPrinter;
|
||||
class GlobalIndirectSymbol;
|
||||
class GlobalObject;
|
||||
class GlobalValue;
|
||||
class GlobalVariable;
|
||||
class GCStrategy;
|
||||
@ -65,6 +67,8 @@ class MCSubtargetInfo;
|
||||
class MCSymbol;
|
||||
class MCTargetOptions;
|
||||
class MDNode;
|
||||
class Module;
|
||||
class raw_ostream;
|
||||
class TargetLoweringObjectFile;
|
||||
class TargetMachine;
|
||||
|
||||
@ -109,7 +113,7 @@ class AsmPrinter : public MachineFunctionPass {
|
||||
|
||||
/// Map global GOT equivalent MCSymbols to GlobalVariables and keep track of
|
||||
/// its number of uses by other globals.
|
||||
typedef std::pair<const GlobalVariable *, unsigned> GOTEquivUsePair;
|
||||
using GOTEquivUsePair = std::pair<const GlobalVariable *, unsigned>;
|
||||
MapVector<const MCSymbol *, GOTEquivUsePair> GlobalGOTEquivs;
|
||||
|
||||
/// Enable print [latency:throughput] in output
|
||||
|
@ -1,4 +1,4 @@
|
||||
//===-- AtomicExpandUtils.h - Utilities for expanding atomic instructions -===//
|
||||
//===- AtomicExpandUtils.h - Utilities for expanding atomic instructions --===//
|
||||
//
|
||||
// The LLVM Compiler Infrastructure
|
||||
//
|
||||
@ -7,19 +7,24 @@
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#ifndef LLVM_CODEGEN_ATOMICEXPANDUTILS_H
|
||||
#define LLVM_CODEGEN_ATOMICEXPANDUTILS_H
|
||||
|
||||
#include "llvm/ADT/STLExtras.h"
|
||||
#include "llvm/IR/IRBuilder.h"
|
||||
#include "llvm/Support/AtomicOrdering.h"
|
||||
|
||||
namespace llvm {
|
||||
class Value;
|
||||
class AtomicRMWInst;
|
||||
|
||||
class AtomicRMWInst;
|
||||
class Value;
|
||||
|
||||
/// Parameters (see the expansion example below):
|
||||
/// (the builder, %addr, %loaded, %new_val, ordering,
|
||||
/// /* OUT */ %success, /* OUT */ %new_loaded)
|
||||
typedef function_ref<void(IRBuilder<> &, Value *, Value *, Value *,
|
||||
AtomicOrdering, Value *&, Value *&)> CreateCmpXchgInstFun;
|
||||
using CreateCmpXchgInstFun =
|
||||
function_ref<void(IRBuilder<> &, Value *, Value *, Value *, AtomicOrdering,
|
||||
Value *&, Value *&)>;
|
||||
|
||||
/// \brief Expand an atomic RMW instruction into a loop utilizing
|
||||
/// cmpxchg. You'll want to make sure your target machine likes cmpxchg
|
||||
@ -42,7 +47,8 @@ typedef function_ref<void(IRBuilder<> &, Value *, Value *, Value *,
|
||||
/// loop:
|
||||
/// %loaded = phi iN [ %init_loaded, %entry ], [ %new_loaded, %loop ]
|
||||
/// %new = some_op iN %loaded, %incr
|
||||
/// ; This is what -atomic-expand will produce using this function on i686 targets:
|
||||
/// ; This is what -atomic-expand will produce using this function on i686
|
||||
/// targets:
|
||||
/// %pair = cmpxchg iN* %addr, iN %loaded, iN %new_val
|
||||
/// %new_loaded = extractvalue { iN, i1 } %pair, 0
|
||||
/// %success = extractvalue { iN, i1 } %pair, 1
|
||||
@ -52,6 +58,8 @@ typedef function_ref<void(IRBuilder<> &, Value *, Value *, Value *,
|
||||
/// [...]
|
||||
///
|
||||
/// Returns true if the containing function was modified.
|
||||
bool
|
||||
expandAtomicRMWToCmpXchg(AtomicRMWInst *AI, CreateCmpXchgInstFun Factory);
|
||||
}
|
||||
bool expandAtomicRMWToCmpXchg(AtomicRMWInst *AI, CreateCmpXchgInstFun Factory);
|
||||
|
||||
} // end namespace llvm
|
||||
|
||||
#endif // LLVM_CODEGEN_ATOMICEXPANDUTILS_H
|
||||
|
@ -1,4 +1,4 @@
|
||||
//===--- lib/CodeGen/DIE.h - DWARF Info Entries -----------------*- C++ -*-===//
|
||||
//===- lib/CodeGen/DIE.h - DWARF Info Entries -------------------*- C++ -*-===//
|
||||
//
|
||||
// The LLVM Compiler Infrastructure
|
||||
//
|
||||
@ -31,6 +31,7 @@
|
||||
#include <iterator>
|
||||
#include <new>
|
||||
#include <type_traits>
|
||||
#include <utility>
|
||||
#include <vector>
|
||||
|
||||
namespace llvm {
|
||||
@ -53,11 +54,11 @@ class DIEAbbrevData {
|
||||
dwarf::Form Form;
|
||||
|
||||
/// Dwarf attribute value for DW_FORM_implicit_const
|
||||
int64_t Value;
|
||||
int64_t Value = 0;
|
||||
|
||||
public:
|
||||
DIEAbbrevData(dwarf::Attribute A, dwarf::Form F)
|
||||
: Attribute(A), Form(F), Value(0) {}
|
||||
: Attribute(A), Form(F) {}
|
||||
DIEAbbrevData(dwarf::Attribute A, int64_t V)
|
||||
: Attribute(A), Form(dwarf::DW_FORM_implicit_const), Value(V) {}
|
||||
|
||||
@ -136,13 +137,14 @@ class DIEAbbrevSet {
|
||||
/// storage container.
|
||||
BumpPtrAllocator &Alloc;
|
||||
/// \brief FoldingSet that uniques the abbreviations.
|
||||
llvm::FoldingSet<DIEAbbrev> AbbreviationsSet;
|
||||
FoldingSet<DIEAbbrev> AbbreviationsSet;
|
||||
/// A list of all the unique abbreviations in use.
|
||||
std::vector<DIEAbbrev *> Abbreviations;
|
||||
|
||||
public:
|
||||
DIEAbbrevSet(BumpPtrAllocator &A) : Alloc(A) {}
|
||||
~DIEAbbrevSet();
|
||||
|
||||
/// Generate the abbreviation declaration for a DIE and return a pointer to
|
||||
/// the generated abbreviation.
|
||||
///
|
||||
@ -289,13 +291,11 @@ class DIEInlineString {
|
||||
/// A pointer to another debug information entry. An instance of this class can
|
||||
/// also be used as a proxy for a debug information entry not yet defined
|
||||
/// (ie. types.)
|
||||
class DIE;
|
||||
class DIEEntry {
|
||||
DIE *Entry;
|
||||
|
||||
DIEEntry() = delete;
|
||||
|
||||
public:
|
||||
DIEEntry() = delete;
|
||||
explicit DIEEntry(DIE &E) : Entry(&E) {}
|
||||
|
||||
DIE &getEntry() const { return *Entry; }
|
||||
@ -348,10 +348,10 @@ class DIEValue {
|
||||
///
|
||||
/// All values that aren't standard layout (or are larger than 8 bytes)
|
||||
/// should be stored by reference instead of by value.
|
||||
typedef AlignedCharArrayUnion<DIEInteger, DIEString, DIEExpr, DIELabel,
|
||||
DIEDelta *, DIEEntry, DIEBlock *, DIELoc *,
|
||||
DIELocList>
|
||||
ValTy;
|
||||
using ValTy = AlignedCharArrayUnion<DIEInteger, DIEString, DIEExpr, DIELabel,
|
||||
DIEDelta *, DIEEntry, DIEBlock *,
|
||||
DIELoc *, DIELocList>;
|
||||
|
||||
static_assert(sizeof(ValTy) <= sizeof(uint64_t) ||
|
||||
sizeof(ValTy) <= sizeof(void *),
|
||||
"Expected all large types to be stored via pointer");
|
||||
@ -486,10 +486,12 @@ struct IntrusiveBackListNode {
|
||||
};
|
||||
|
||||
struct IntrusiveBackListBase {
|
||||
typedef IntrusiveBackListNode Node;
|
||||
using Node = IntrusiveBackListNode;
|
||||
|
||||
Node *Last = nullptr;
|
||||
|
||||
bool empty() const { return !Last; }
|
||||
|
||||
void push_back(Node &N) {
|
||||
assert(N.Next.getPointer() == &N && "Expected unlinked node");
|
||||
assert(N.Next.getInt() == true && "Expected unlinked node");
|
||||
@ -505,6 +507,7 @@ struct IntrusiveBackListBase {
|
||||
template <class T> class IntrusiveBackList : IntrusiveBackListBase {
|
||||
public:
|
||||
using IntrusiveBackListBase::empty;
|
||||
|
||||
void push_back(T &N) { IntrusiveBackListBase::push_back(N); }
|
||||
T &back() { return *static_cast<T *>(Last); }
|
||||
const T &back() const { return *static_cast<T *>(Last); }
|
||||
@ -513,6 +516,7 @@ template <class T> class IntrusiveBackList : IntrusiveBackListBase {
|
||||
class iterator
|
||||
: public iterator_facade_base<iterator, std::forward_iterator_tag, T> {
|
||||
friend class const_iterator;
|
||||
|
||||
Node *N = nullptr;
|
||||
|
||||
public:
|
||||
@ -585,10 +589,12 @@ template <class T> class IntrusiveBackList : IntrusiveBackListBase {
|
||||
class DIEValueList {
|
||||
struct Node : IntrusiveBackListNode {
|
||||
DIEValue V;
|
||||
|
||||
explicit Node(DIEValue V) : V(V) {}
|
||||
};
|
||||
|
||||
typedef IntrusiveBackList<Node> ListTy;
|
||||
using ListTy = IntrusiveBackList<Node>;
|
||||
|
||||
ListTy List;
|
||||
|
||||
public:
|
||||
@ -597,9 +603,10 @@ class DIEValueList {
|
||||
: public iterator_adaptor_base<value_iterator, ListTy::iterator,
|
||||
std::forward_iterator_tag, DIEValue> {
|
||||
friend class const_value_iterator;
|
||||
typedef iterator_adaptor_base<value_iterator, ListTy::iterator,
|
||||
std::forward_iterator_tag,
|
||||
DIEValue> iterator_adaptor;
|
||||
|
||||
using iterator_adaptor =
|
||||
iterator_adaptor_base<value_iterator, ListTy::iterator,
|
||||
std::forward_iterator_tag, DIEValue>;
|
||||
|
||||
public:
|
||||
value_iterator() = default;
|
||||
@ -612,9 +619,9 @@ class DIEValueList {
|
||||
class const_value_iterator : public iterator_adaptor_base<
|
||||
const_value_iterator, ListTy::const_iterator,
|
||||
std::forward_iterator_tag, const DIEValue> {
|
||||
typedef iterator_adaptor_base<const_value_iterator, ListTy::const_iterator,
|
||||
std::forward_iterator_tag,
|
||||
const DIEValue> iterator_adaptor;
|
||||
using iterator_adaptor =
|
||||
iterator_adaptor_base<const_value_iterator, ListTy::const_iterator,
|
||||
std::forward_iterator_tag, const DIEValue>;
|
||||
|
||||
public:
|
||||
const_value_iterator() = default;
|
||||
@ -627,8 +634,8 @@ class DIEValueList {
|
||||
const DIEValue &operator*() const { return wrapped()->V; }
|
||||
};
|
||||
|
||||
typedef iterator_range<value_iterator> value_range;
|
||||
typedef iterator_range<const_value_iterator> const_value_range;
|
||||
using value_range = iterator_range<value_iterator>;
|
||||
using const_value_range = iterator_range<const_value_iterator>;
|
||||
|
||||
value_iterator addValue(BumpPtrAllocator &Alloc, const DIEValue &V) {
|
||||
List.push_back(*new (Alloc) Node(V));
|
||||
@ -657,15 +664,15 @@ class DIE : IntrusiveBackListNode, public DIEValueList {
|
||||
friend class DIEUnit;
|
||||
|
||||
/// Dwarf unit relative offset.
|
||||
unsigned Offset;
|
||||
unsigned Offset = 0;
|
||||
/// Size of instance + children.
|
||||
unsigned Size;
|
||||
unsigned Size = 0;
|
||||
unsigned AbbrevNumber = ~0u;
|
||||
/// Dwarf tag code.
|
||||
dwarf::Tag Tag = (dwarf::Tag)0;
|
||||
/// Set to true to force a DIE to emit an abbreviation that says it has
|
||||
/// children even when it doesn't. This is used for unit testing purposes.
|
||||
bool ForceChildren;
|
||||
bool ForceChildren = false;
|
||||
/// Children DIEs.
|
||||
IntrusiveBackList<DIE> Children;
|
||||
|
||||
@ -673,20 +680,19 @@ class DIE : IntrusiveBackListNode, public DIEValueList {
|
||||
/// DIEUnit which contains this DIE as its unit DIE.
|
||||
PointerUnion<DIE *, DIEUnit *> Owner;
|
||||
|
||||
DIE() = delete;
|
||||
explicit DIE(dwarf::Tag Tag) : Offset(0), Size(0), Tag(Tag),
|
||||
ForceChildren(false) {}
|
||||
explicit DIE(dwarf::Tag Tag) : Tag(Tag) {}
|
||||
|
||||
public:
|
||||
DIE() = delete;
|
||||
DIE(const DIE &RHS) = delete;
|
||||
DIE(DIE &&RHS) = delete;
|
||||
DIE &operator=(const DIE &RHS) = delete;
|
||||
DIE &operator=(const DIE &&RHS) = delete;
|
||||
|
||||
static DIE *get(BumpPtrAllocator &Alloc, dwarf::Tag Tag) {
|
||||
return new (Alloc) DIE(Tag);
|
||||
}
|
||||
|
||||
DIE(const DIE &RHS) = delete;
|
||||
DIE(DIE &&RHS) = delete;
|
||||
void operator=(const DIE &RHS) = delete;
|
||||
void operator=(const DIE &&RHS) = delete;
|
||||
|
||||
// Accessors.
|
||||
unsigned getAbbrevNumber() const { return AbbrevNumber; }
|
||||
dwarf::Tag getTag() const { return Tag; }
|
||||
@ -696,10 +702,10 @@ class DIE : IntrusiveBackListNode, public DIEValueList {
|
||||
bool hasChildren() const { return ForceChildren || !Children.empty(); }
|
||||
void setForceChildren(bool B) { ForceChildren = B; }
|
||||
|
||||
typedef IntrusiveBackList<DIE>::iterator child_iterator;
|
||||
typedef IntrusiveBackList<DIE>::const_iterator const_child_iterator;
|
||||
typedef iterator_range<child_iterator> child_range;
|
||||
typedef iterator_range<const_child_iterator> const_child_range;
|
||||
using child_iterator = IntrusiveBackList<DIE>::iterator;
|
||||
using const_child_iterator = IntrusiveBackList<DIE>::const_iterator;
|
||||
using child_range = iterator_range<child_iterator>;
|
||||
using const_child_range = iterator_range<const_child_iterator>;
|
||||
|
||||
child_range children() {
|
||||
return make_range(Children.begin(), Children.end());
|
||||
@ -838,10 +844,10 @@ struct BasicDIEUnit final : DIEUnit {
|
||||
/// DIELoc - Represents an expression location.
|
||||
//
|
||||
class DIELoc : public DIEValueList {
|
||||
mutable unsigned Size; // Size in bytes excluding size header.
|
||||
mutable unsigned Size = 0; // Size in bytes excluding size header.
|
||||
|
||||
public:
|
||||
DIELoc() : Size(0) {}
|
||||
DIELoc() = default;
|
||||
|
||||
/// ComputeSize - Calculate the size of the location expression.
|
||||
///
|
||||
@ -872,10 +878,10 @@ class DIELoc : public DIEValueList {
|
||||
/// DIEBlock - Represents a block of values.
|
||||
//
|
||||
class DIEBlock : public DIEValueList {
|
||||
mutable unsigned Size; // Size in bytes excluding size header.
|
||||
mutable unsigned Size = 0; // Size in bytes excluding size header.
|
||||
|
||||
public:
|
||||
DIEBlock() : Size(0) {}
|
||||
DIEBlock() = default;
|
||||
|
||||
/// ComputeSize - Calculate the size of the location expression.
|
||||
///
|
||||
|
@ -56,7 +56,7 @@ class FaultMaps {
|
||||
HandlerOffsetExpr(HandlerOffset) {}
|
||||
};
|
||||
|
||||
typedef std::vector<FaultInfo> FunctionFaultInfos;
|
||||
using FunctionFaultInfos = std::vector<FaultInfo>;
|
||||
|
||||
// We'd like to keep a stable iteration order for FunctionInfos to help
|
||||
// FileCheck based testing.
|
||||
@ -78,20 +78,17 @@ class FaultMaps {
|
||||
/// generated by the version of LLVM that includes it. No guarantees are made
|
||||
/// with respect to forward or backward compatibility.
|
||||
class FaultMapParser {
|
||||
typedef uint8_t FaultMapVersionType;
|
||||
static const size_t FaultMapVersionOffset = 0;
|
||||
using FaultMapVersionType = uint8_t;
|
||||
using Reserved0Type = uint8_t;
|
||||
using Reserved1Type = uint16_t;
|
||||
using NumFunctionsType = uint32_t;
|
||||
|
||||
typedef uint8_t Reserved0Type;
|
||||
static const size_t FaultMapVersionOffset = 0;
|
||||
static const size_t Reserved0Offset =
|
||||
FaultMapVersionOffset + sizeof(FaultMapVersionType);
|
||||
|
||||
typedef uint16_t Reserved1Type;
|
||||
static const size_t Reserved1Offset = Reserved0Offset + sizeof(Reserved0Type);
|
||||
|
||||
typedef uint32_t NumFunctionsType;
|
||||
static const size_t NumFunctionsOffset =
|
||||
Reserved1Offset + sizeof(Reserved1Type);
|
||||
|
||||
static const size_t FunctionInfosOffset =
|
||||
NumFunctionsOffset + sizeof(NumFunctionsType);
|
||||
|
||||
@ -105,14 +102,13 @@ class FaultMapParser {
|
||||
|
||||
public:
|
||||
class FunctionFaultInfoAccessor {
|
||||
typedef uint32_t FaultKindType;
|
||||
static const size_t FaultKindOffset = 0;
|
||||
using FaultKindType = uint32_t;
|
||||
using FaultingPCOffsetType = uint32_t;
|
||||
using HandlerPCOffsetType = uint32_t;
|
||||
|
||||
typedef uint32_t FaultingPCOffsetType;
|
||||
static const size_t FaultKindOffset = 0;
|
||||
static const size_t FaultingPCOffsetOffset =
|
||||
FaultKindOffset + sizeof(FaultKindType);
|
||||
|
||||
typedef uint32_t HandlerPCOffsetType;
|
||||
static const size_t HandlerPCOffsetOffset =
|
||||
FaultingPCOffsetOffset + sizeof(FaultingPCOffsetType);
|
||||
|
||||
@ -140,20 +136,17 @@ class FaultMapParser {
|
||||
};
|
||||
|
||||
class FunctionInfoAccessor {
|
||||
typedef uint64_t FunctionAddrType;
|
||||
static const size_t FunctionAddrOffset = 0;
|
||||
using FunctionAddrType = uint64_t;
|
||||
using NumFaultingPCsType = uint32_t;
|
||||
using ReservedType = uint32_t;
|
||||
|
||||
typedef uint32_t NumFaultingPCsType;
|
||||
static const size_t FunctionAddrOffset = 0;
|
||||
static const size_t NumFaultingPCsOffset =
|
||||
FunctionAddrOffset + sizeof(FunctionAddrType);
|
||||
|
||||
typedef uint32_t ReservedType;
|
||||
static const size_t ReservedOffset =
|
||||
NumFaultingPCsOffset + sizeof(NumFaultingPCsType);
|
||||
|
||||
static const size_t FunctionFaultInfosOffset =
|
||||
ReservedOffset + sizeof(ReservedType);
|
||||
|
||||
static const size_t FunctionInfoHeaderSize = FunctionFaultInfosOffset;
|
||||
|
||||
const uint8_t *P = nullptr;
|
||||
|
78
include/llvm/CodeGen/GlobalISel/Localizer.h
Normal file
78
include/llvm/CodeGen/GlobalISel/Localizer.h
Normal file
@ -0,0 +1,78 @@
|
||||
//== llvm/CodeGen/GlobalISel/Localizer.h - Localizer -------------*- C++ -*-==//
|
||||
//
|
||||
// The LLVM Compiler Infrastructure
|
||||
//
|
||||
// This file is distributed under the University of Illinois Open Source
|
||||
// License. See LICENSE.TXT for details.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
//
|
||||
/// \file This file describes the interface of the Localizer pass.
|
||||
/// This pass moves/duplicates constant-like instructions close to their uses.
|
||||
/// Its primarily goal is to workaround the deficiencies of the fast register
|
||||
/// allocator.
|
||||
/// With GlobalISel constants are all materialized in the entry block of
|
||||
/// a function. However, the fast allocator cannot rematerialize constants and
|
||||
/// has a lot more live-ranges to deal with and will most likely end up
|
||||
/// spilling a lot.
|
||||
/// By pushing the constants close to their use, we only create small
|
||||
/// live-ranges.
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#ifndef LLVM_CODEGEN_GLOBALISEL_LOCALIZER_H
|
||||
#define LLVM_CODEGEN_GLOBALISEL_LOCALIZER_H
|
||||
|
||||
#include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
|
||||
#include "llvm/CodeGen/MachineFunctionPass.h"
|
||||
|
||||
namespace llvm {
|
||||
// Forward declarations.
|
||||
class MachineRegisterInfo;
|
||||
|
||||
/// This pass implements the localization mechanism described at the
|
||||
/// top of this file. One specificity of the implementation is that
|
||||
/// it will materialize one and only one instance of a constant per
|
||||
/// basic block, thus enabling reuse of that constant within that block.
|
||||
/// Moreover, it only materializes constants in blocks where they
|
||||
/// are used. PHI uses are considered happening at the end of the
|
||||
/// related predecessor.
|
||||
class Localizer : public MachineFunctionPass {
|
||||
public:
|
||||
static char ID;
|
||||
|
||||
private:
|
||||
/// MRI contains all the register class/bank information that this
|
||||
/// pass uses and updates.
|
||||
MachineRegisterInfo *MRI;
|
||||
|
||||
/// Check whether or not \p MI needs to be moved close to its uses.
|
||||
static bool shouldLocalize(const MachineInstr &MI);
|
||||
|
||||
/// Check if \p MOUse is used in the same basic block as \p Def.
|
||||
/// If the use is in the same block, we say it is local.
|
||||
/// When the use is not local, \p InsertMBB will contain the basic
|
||||
/// block when to insert \p Def to have a local use.
|
||||
static bool isLocalUse(MachineOperand &MOUse, const MachineInstr &Def,
|
||||
MachineBasicBlock *&InsertMBB);
|
||||
|
||||
/// Initialize the field members using \p MF.
|
||||
void init(MachineFunction &MF);
|
||||
|
||||
public:
|
||||
Localizer();
|
||||
|
||||
StringRef getPassName() const override { return "Localizer"; }
|
||||
|
||||
MachineFunctionProperties getRequiredProperties() const override {
|
||||
return MachineFunctionProperties()
|
||||
.set(MachineFunctionProperties::Property::IsSSA)
|
||||
.set(MachineFunctionProperties::Property::Legalized)
|
||||
.set(MachineFunctionProperties::Property::RegBankSelected);
|
||||
}
|
||||
|
||||
bool runOnMachineFunction(MachineFunction &MF) override;
|
||||
};
|
||||
|
||||
} // End namespace llvm.
|
||||
|
||||
#endif
|
@ -264,6 +264,14 @@ namespace ISD {
|
||||
/// optimized.
|
||||
STRICT_FADD, STRICT_FSUB, STRICT_FMUL, STRICT_FDIV, STRICT_FREM,
|
||||
|
||||
/// Constrained versions of libm-equivalent floating point intrinsics.
|
||||
/// These will be lowered to the equivalent non-constrained pseudo-op
|
||||
/// (or expanded to the equivalent library call) before final selection.
|
||||
/// They are used to limit optimizations while the DAG is being optimized.
|
||||
STRICT_FSQRT, STRICT_FPOW, STRICT_FPOWI, STRICT_FSIN, STRICT_FCOS,
|
||||
STRICT_FEXP, STRICT_FEXP2, STRICT_FLOG, STRICT_FLOG10, STRICT_FLOG2,
|
||||
STRICT_FRINT, STRICT_FNEARBYINT,
|
||||
|
||||
/// FMA - Perform a * b + c with no intermediate rounding step.
|
||||
FMA,
|
||||
|
||||
|
@ -1,4 +1,4 @@
|
||||
//===-- llvm/CodeGen/LiveInterval.h - Interval representation ---*- C++ -*-===//
|
||||
//===- llvm/CodeGen/LiveInterval.h - Interval representation ----*- C++ -*-===//
|
||||
//
|
||||
// The LLVM Compiler Infrastructure
|
||||
//
|
||||
@ -21,22 +21,30 @@
|
||||
#ifndef LLVM_CODEGEN_LIVEINTERVAL_H
|
||||
#define LLVM_CODEGEN_LIVEINTERVAL_H
|
||||
|
||||
#include "llvm/ADT/ArrayRef.h"
|
||||
#include "llvm/ADT/IntEqClasses.h"
|
||||
#include "llvm/ADT/iterator_range.h"
|
||||
#include "llvm/ADT/SmallVector.h"
|
||||
#include "llvm/ADT/STLExtras.h"
|
||||
#include "llvm/CodeGen/SlotIndexes.h"
|
||||
#include "llvm/MC/LaneBitmask.h"
|
||||
#include "llvm/Support/Allocator.h"
|
||||
#include "llvm/Target/TargetRegisterInfo.h"
|
||||
#include "llvm/Support/MathExtras.h"
|
||||
#include <algorithm>
|
||||
#include <cassert>
|
||||
#include <climits>
|
||||
#include <cstddef>
|
||||
#include <functional>
|
||||
#include <memory>
|
||||
#include <set>
|
||||
#include <tuple>
|
||||
#include <utility>
|
||||
|
||||
namespace llvm {
|
||||
|
||||
class CoalescerPair;
|
||||
class LiveIntervals;
|
||||
class MachineInstr;
|
||||
class MachineRegisterInfo;
|
||||
class TargetRegisterInfo;
|
||||
class raw_ostream;
|
||||
template <typename T, unsigned Small> class SmallPtrSet;
|
||||
|
||||
/// VNInfo - Value Number Information.
|
||||
/// This class holds information about a machine level values, including
|
||||
@ -44,7 +52,7 @@ namespace llvm {
|
||||
///
|
||||
class VNInfo {
|
||||
public:
|
||||
typedef BumpPtrAllocator Allocator;
|
||||
using Allocator = BumpPtrAllocator;
|
||||
|
||||
/// The ID number of this value.
|
||||
unsigned id;
|
||||
@ -53,14 +61,10 @@ namespace llvm {
|
||||
SlotIndex def;
|
||||
|
||||
/// VNInfo constructor.
|
||||
VNInfo(unsigned i, SlotIndex d)
|
||||
: id(i), def(d)
|
||||
{ }
|
||||
VNInfo(unsigned i, SlotIndex d) : id(i), def(d) {}
|
||||
|
||||
/// VNInfo constructor, copies values from orig, except for the value number.
|
||||
VNInfo(unsigned i, const VNInfo &orig)
|
||||
: id(i), def(orig.def)
|
||||
{ }
|
||||
VNInfo(unsigned i, const VNInfo &orig) : id(i), def(orig.def) {}
|
||||
|
||||
/// Copy from the parameter into this VNInfo.
|
||||
void copyFrom(VNInfo &src) {
|
||||
@ -152,16 +156,16 @@ namespace llvm {
|
||||
/// segment with a new value number is used.
|
||||
class LiveRange {
|
||||
public:
|
||||
|
||||
/// This represents a simple continuous liveness interval for a value.
|
||||
/// The start point is inclusive, the end point exclusive. These intervals
|
||||
/// are rendered as [start,end).
|
||||
struct Segment {
|
||||
SlotIndex start; // Start point of the interval (inclusive)
|
||||
SlotIndex end; // End point of the interval (exclusive)
|
||||
VNInfo *valno; // identifier for the value contained in this segment.
|
||||
VNInfo *valno = nullptr; // identifier for the value contained in this
|
||||
// segment.
|
||||
|
||||
Segment() : valno(nullptr) {}
|
||||
Segment() = default;
|
||||
|
||||
Segment(SlotIndex S, SlotIndex E, VNInfo *V)
|
||||
: start(S), end(E), valno(V) {
|
||||
@ -189,8 +193,8 @@ namespace llvm {
|
||||
void dump() const;
|
||||
};
|
||||
|
||||
typedef SmallVector<Segment, 2> Segments;
|
||||
typedef SmallVector<VNInfo *, 2> VNInfoList;
|
||||
using Segments = SmallVector<Segment, 2>;
|
||||
using VNInfoList = SmallVector<VNInfo *, 2>;
|
||||
|
||||
Segments segments; // the liveness segments
|
||||
VNInfoList valnos; // value#'s
|
||||
@ -198,22 +202,24 @@ namespace llvm {
|
||||
// The segment set is used temporarily to accelerate initial computation
|
||||
// of live ranges of physical registers in computeRegUnitRange.
|
||||
// After that the set is flushed to the segment vector and deleted.
|
||||
typedef std::set<Segment> SegmentSet;
|
||||
using SegmentSet = std::set<Segment>;
|
||||
std::unique_ptr<SegmentSet> segmentSet;
|
||||
|
||||
typedef Segments::iterator iterator;
|
||||
using iterator = Segments::iterator;
|
||||
using const_iterator = Segments::const_iterator;
|
||||
|
||||
iterator begin() { return segments.begin(); }
|
||||
iterator end() { return segments.end(); }
|
||||
|
||||
typedef Segments::const_iterator const_iterator;
|
||||
const_iterator begin() const { return segments.begin(); }
|
||||
const_iterator end() const { return segments.end(); }
|
||||
|
||||
typedef VNInfoList::iterator vni_iterator;
|
||||
using vni_iterator = VNInfoList::iterator;
|
||||
using const_vni_iterator = VNInfoList::const_iterator;
|
||||
|
||||
vni_iterator vni_begin() { return valnos.begin(); }
|
||||
vni_iterator vni_end() { return valnos.end(); }
|
||||
|
||||
typedef VNInfoList::const_iterator const_vni_iterator;
|
||||
const_vni_iterator vni_begin() const { return valnos.begin(); }
|
||||
const_vni_iterator vni_end() const { return valnos.end(); }
|
||||
|
||||
@ -631,40 +637,37 @@ namespace llvm {
|
||||
/// or stack slot.
|
||||
class LiveInterval : public LiveRange {
|
||||
public:
|
||||
typedef LiveRange super;
|
||||
using super = LiveRange;
|
||||
|
||||
/// A live range for subregisters. The LaneMask specifies which parts of the
|
||||
/// super register are covered by the interval.
|
||||
/// (@sa TargetRegisterInfo::getSubRegIndexLaneMask()).
|
||||
class SubRange : public LiveRange {
|
||||
public:
|
||||
SubRange *Next;
|
||||
SubRange *Next = nullptr;
|
||||
LaneBitmask LaneMask;
|
||||
|
||||
/// Constructs a new SubRange object.
|
||||
SubRange(LaneBitmask LaneMask)
|
||||
: Next(nullptr), LaneMask(LaneMask) {
|
||||
}
|
||||
SubRange(LaneBitmask LaneMask) : LaneMask(LaneMask) {}
|
||||
|
||||
/// Constructs a new SubRange object by copying liveness from @p Other.
|
||||
SubRange(LaneBitmask LaneMask, const LiveRange &Other,
|
||||
BumpPtrAllocator &Allocator)
|
||||
: LiveRange(Other, Allocator), Next(nullptr), LaneMask(LaneMask) {
|
||||
}
|
||||
: LiveRange(Other, Allocator), LaneMask(LaneMask) {}
|
||||
|
||||
void print(raw_ostream &OS) const;
|
||||
void dump() const;
|
||||
};
|
||||
|
||||
private:
|
||||
SubRange *SubRanges; ///< Single linked list of subregister live ranges.
|
||||
SubRange *SubRanges = nullptr; ///< Single linked list of subregister live
|
||||
/// ranges.
|
||||
|
||||
public:
|
||||
const unsigned reg; // the register or stack slot of this interval.
|
||||
float weight; // weight of this interval
|
||||
|
||||
LiveInterval(unsigned Reg, float Weight)
|
||||
: SubRanges(nullptr), reg(Reg), weight(Weight) {}
|
||||
LiveInterval(unsigned Reg, float Weight) : reg(Reg), weight(Weight) {}
|
||||
|
||||
~LiveInterval() {
|
||||
clearSubRanges();
|
||||
@ -673,8 +676,10 @@ namespace llvm {
|
||||
template<typename T>
|
||||
class SingleLinkedListIterator {
|
||||
T *P;
|
||||
|
||||
public:
|
||||
SingleLinkedListIterator<T>(T *P) : P(P) {}
|
||||
|
||||
SingleLinkedListIterator<T> &operator++() {
|
||||
P = P->Next;
|
||||
return *this;
|
||||
@ -698,7 +703,9 @@ namespace llvm {
|
||||
}
|
||||
};
|
||||
|
||||
typedef SingleLinkedListIterator<SubRange> subrange_iterator;
|
||||
using subrange_iterator = SingleLinkedListIterator<SubRange>;
|
||||
using const_subrange_iterator = SingleLinkedListIterator<const SubRange>;
|
||||
|
||||
subrange_iterator subrange_begin() {
|
||||
return subrange_iterator(SubRanges);
|
||||
}
|
||||
@ -706,7 +713,6 @@ namespace llvm {
|
||||
return subrange_iterator(nullptr);
|
||||
}
|
||||
|
||||
typedef SingleLinkedListIterator<const SubRange> const_subrange_iterator;
|
||||
const_subrange_iterator subrange_begin() const {
|
||||
return const_subrange_iterator(SubRanges);
|
||||
}
|
||||
@ -759,12 +765,12 @@ namespace llvm {
|
||||
|
||||
/// isSpillable - Can this interval be spilled?
|
||||
bool isSpillable() const {
|
||||
return weight != llvm::huge_valf;
|
||||
return weight != huge_valf;
|
||||
}
|
||||
|
||||
/// markNotSpillable - Mark interval as not spillable
|
||||
void markNotSpillable() {
|
||||
weight = llvm::huge_valf;
|
||||
weight = huge_valf;
|
||||
}
|
||||
|
||||
/// For a given lane mask @p LaneMask, compute indexes at which the
|
||||
@ -931,5 +937,7 @@ namespace llvm {
|
||||
void Distribute(LiveInterval &LI, LiveInterval *LIV[],
|
||||
MachineRegisterInfo &MRI);
|
||||
};
|
||||
}
|
||||
#endif
|
||||
|
||||
} // end namespace llvm
|
||||
|
||||
#endif // LLVM_CODEGEN_LIVEINTERVAL_H
|
||||
|
@ -1,4 +1,4 @@
|
||||
//===-- LiveIntervalAnalysis.h - Live Interval Analysis ---------*- C++ -*-===//
|
||||
//===- LiveIntervalAnalysis.h - Live Interval Analysis ----------*- C++ -*-===//
|
||||
//
|
||||
// The LLVM Compiler Infrastructure
|
||||
//
|
||||
@ -20,6 +20,7 @@
|
||||
#ifndef LLVM_CODEGEN_LIVEINTERVALANALYSIS_H
|
||||
#define LLVM_CODEGEN_LIVEINTERVALANALYSIS_H
|
||||
|
||||
#include "llvm/ADT/ArrayRef.h"
|
||||
#include "llvm/ADT/IndexedMap.h"
|
||||
#include "llvm/ADT/SmallVector.h"
|
||||
#include "llvm/Analysis/AliasAnalysis.h"
|
||||
@ -27,27 +28,29 @@
|
||||
#include "llvm/CodeGen/MachineBasicBlock.h"
|
||||
#include "llvm/CodeGen/MachineFunctionPass.h"
|
||||
#include "llvm/CodeGen/SlotIndexes.h"
|
||||
#include "llvm/Support/Allocator.h"
|
||||
#include "llvm/MC/LaneBitmask.h"
|
||||
#include "llvm/Support/CommandLine.h"
|
||||
#include "llvm/Support/Compiler.h"
|
||||
#include "llvm/Support/ErrorHandling.h"
|
||||
#include "llvm/Target/TargetRegisterInfo.h"
|
||||
#include <cmath>
|
||||
#include <cassert>
|
||||
#include <cstdint>
|
||||
#include <utility>
|
||||
|
||||
namespace llvm {
|
||||
|
||||
extern cl::opt<bool> UseSegmentSetForPhysRegs;
|
||||
|
||||
class BitVector;
|
||||
class BlockFrequency;
|
||||
class LiveRangeCalc;
|
||||
class LiveVariables;
|
||||
class MachineDominatorTree;
|
||||
class MachineLoopInfo;
|
||||
class TargetRegisterInfo;
|
||||
class MachineRegisterInfo;
|
||||
class TargetInstrInfo;
|
||||
class TargetRegisterClass;
|
||||
class VirtRegMap;
|
||||
class MachineBlockFrequencyInfo;
|
||||
class BitVector;
|
||||
class LiveRangeCalc;
|
||||
class MachineBlockFrequencyInfo;
|
||||
class MachineDominatorTree;
|
||||
class MachineFunction;
|
||||
class MachineInstr;
|
||||
class MachineRegisterInfo;
|
||||
class raw_ostream;
|
||||
class TargetInstrInfo;
|
||||
class VirtRegMap;
|
||||
|
||||
class LiveIntervals : public MachineFunctionPass {
|
||||
MachineFunction* MF;
|
||||
@ -56,8 +59,8 @@ extern cl::opt<bool> UseSegmentSetForPhysRegs;
|
||||
const TargetInstrInfo* TII;
|
||||
AliasAnalysis *AA;
|
||||
SlotIndexes* Indexes;
|
||||
MachineDominatorTree *DomTree;
|
||||
LiveRangeCalc *LRCalc;
|
||||
MachineDominatorTree *DomTree = nullptr;
|
||||
LiveRangeCalc *LRCalc = nullptr;
|
||||
|
||||
/// Special pool allocator for VNInfo's (LiveInterval val#).
|
||||
VNInfo::Allocator VNInfoAllocator;
|
||||
@ -95,6 +98,7 @@ extern cl::opt<bool> UseSegmentSetForPhysRegs;
|
||||
|
||||
public:
|
||||
static char ID;
|
||||
|
||||
LiveIntervals();
|
||||
~LiveIntervals() override;
|
||||
|
||||
@ -466,6 +470,7 @@ extern cl::opt<bool> UseSegmentSetForPhysRegs;
|
||||
|
||||
class HMEditor;
|
||||
};
|
||||
} // End llvm namespace
|
||||
|
||||
#endif
|
||||
} // end namespace llvm
|
||||
|
||||
#endif // LLVM_CODEGEN_LIVEINTERVALANALYSIS_H
|
||||
|
@ -26,12 +26,14 @@
|
||||
|
||||
namespace llvm {
|
||||
|
||||
class raw_ostream;
|
||||
class TargetRegisterInfo;
|
||||
|
||||
#ifndef NDEBUG
|
||||
// forward declaration
|
||||
template <unsigned Element> class SparseBitVector;
|
||||
typedef SparseBitVector<128> LiveVirtRegBitSet;
|
||||
|
||||
using LiveVirtRegBitSet = SparseBitVector<128>;
|
||||
#endif
|
||||
|
||||
/// Union of live intervals that are strong candidates for coalescing into a
|
||||
@ -42,19 +44,19 @@ class LiveIntervalUnion {
|
||||
// A set of live virtual register segments that supports fast insertion,
|
||||
// intersection, and removal.
|
||||
// Mapping SlotIndex intervals to virtual register numbers.
|
||||
typedef IntervalMap<SlotIndex, LiveInterval*> LiveSegments;
|
||||
using LiveSegments = IntervalMap<SlotIndex, LiveInterval*>;
|
||||
|
||||
public:
|
||||
// SegmentIter can advance to the next segment ordered by starting position
|
||||
// which may belong to a different live virtual register. We also must be able
|
||||
// to reach the current segment's containing virtual register.
|
||||
typedef LiveSegments::iterator SegmentIter;
|
||||
using SegmentIter = LiveSegments::iterator;
|
||||
|
||||
/// Const version of SegmentIter.
|
||||
typedef LiveSegments::const_iterator ConstSegmentIter;
|
||||
using ConstSegmentIter = LiveSegments::const_iterator;
|
||||
|
||||
// LiveIntervalUnions share an external allocator.
|
||||
typedef LiveSegments::Allocator Allocator;
|
||||
using Allocator = LiveSegments::Allocator;
|
||||
|
||||
private:
|
||||
unsigned Tag = 0; // unique tag for current contents.
|
||||
@ -76,7 +78,7 @@ class LiveIntervalUnion {
|
||||
SlotIndex startIndex() const { return Segments.start(); }
|
||||
|
||||
// Provide public access to the underlying map to allow overlap iteration.
|
||||
typedef LiveSegments Map;
|
||||
using Map = LiveSegments;
|
||||
const Map &getMap() const { return Segments; }
|
||||
|
||||
/// getTag - Return an opaque tag representing the current state of the union.
|
||||
|
@ -7,23 +7,24 @@
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
//
|
||||
// This file implements the LivePhysRegs utility for tracking liveness of
|
||||
// physical registers. This can be used for ad-hoc liveness tracking after
|
||||
// register allocation. You can start with the live-ins/live-outs at the
|
||||
// beginning/end of a block and update the information while walking the
|
||||
// instructions inside the block. This implementation tracks the liveness on a
|
||||
// sub-register granularity.
|
||||
//
|
||||
// We assume that the high bits of a physical super-register are not preserved
|
||||
// unless the instruction has an implicit-use operand reading the super-
|
||||
// register.
|
||||
//
|
||||
// X86 Example:
|
||||
// %YMM0<def> = ...
|
||||
// %XMM0<def> = ... (Kills %XMM0, all %XMM0s sub-registers, and %YMM0)
|
||||
//
|
||||
// %YMM0<def> = ...
|
||||
// %XMM0<def> = ..., %YMM0<imp-use> (%YMM0 and all its sub-registers are alive)
|
||||
/// \file
|
||||
/// This file implements the LivePhysRegs utility for tracking liveness of
|
||||
/// physical registers. This can be used for ad-hoc liveness tracking after
|
||||
/// register allocation. You can start with the live-ins/live-outs at the
|
||||
/// beginning/end of a block and update the information while walking the
|
||||
/// instructions inside the block. This implementation tracks the liveness on a
|
||||
/// sub-register granularity.
|
||||
///
|
||||
/// We assume that the high bits of a physical super-register are not preserved
|
||||
/// unless the instruction has an implicit-use operand reading the super-
|
||||
/// register.
|
||||
///
|
||||
/// X86 Example:
|
||||
/// %YMM0<def> = ...
|
||||
/// %XMM0<def> = ... (Kills %XMM0, all %XMM0s sub-registers, and %YMM0)
|
||||
///
|
||||
/// %YMM0<def> = ...
|
||||
/// %XMM0<def> = ..., %YMM0<imp-use> (%YMM0 and all its sub-registers are alive)
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#ifndef LLVM_CODEGEN_LIVEPHYSREGS_H
|
||||
@ -39,40 +40,42 @@
|
||||
namespace llvm {
|
||||
|
||||
class MachineInstr;
|
||||
class MachineOperand;
|
||||
class MachineRegisterInfo;
|
||||
class raw_ostream;
|
||||
|
||||
/// \brief A set of live physical registers with functions to track liveness
|
||||
/// \brief A set of physical registers with utility functions to track liveness
|
||||
/// when walking backward/forward through a basic block.
|
||||
class LivePhysRegs {
|
||||
const TargetRegisterInfo *TRI = nullptr;
|
||||
SparseSet<unsigned> LiveRegs;
|
||||
|
||||
public:
|
||||
/// Constructs an unitialized set. init() needs to be called to initialize it.
|
||||
LivePhysRegs() = default;
|
||||
|
||||
/// Constructs and initializes an empty set.
|
||||
LivePhysRegs(const TargetRegisterInfo &TRI) : TRI(&TRI) {
|
||||
LiveRegs.setUniverse(TRI.getNumRegs());
|
||||
}
|
||||
|
||||
LivePhysRegs(const LivePhysRegs&) = delete;
|
||||
LivePhysRegs &operator=(const LivePhysRegs&) = delete;
|
||||
|
||||
public:
|
||||
/// \brief Constructs a new empty LivePhysRegs set.
|
||||
LivePhysRegs() = default;
|
||||
|
||||
/// \brief Constructs and initialize an empty LivePhysRegs set.
|
||||
LivePhysRegs(const TargetRegisterInfo *TRI) : TRI(TRI) {
|
||||
assert(TRI && "Invalid TargetRegisterInfo pointer.");
|
||||
LiveRegs.setUniverse(TRI->getNumRegs());
|
||||
}
|
||||
|
||||
/// \brief Clear and initialize the LivePhysRegs set.
|
||||
/// (re-)initializes and clears the set.
|
||||
void init(const TargetRegisterInfo &TRI) {
|
||||
this->TRI = &TRI;
|
||||
LiveRegs.clear();
|
||||
LiveRegs.setUniverse(TRI.getNumRegs());
|
||||
}
|
||||
|
||||
/// \brief Clears the LivePhysRegs set.
|
||||
/// Clears the set.
|
||||
void clear() { LiveRegs.clear(); }
|
||||
|
||||
/// \brief Returns true if the set is empty.
|
||||
/// Returns true if the set is empty.
|
||||
bool empty() const { return LiveRegs.empty(); }
|
||||
|
||||
/// \brief Adds a physical register and all its sub-registers to the set.
|
||||
/// Adds a physical register and all its sub-registers to the set.
|
||||
void addReg(unsigned Reg) {
|
||||
assert(TRI && "LivePhysRegs is not initialized.");
|
||||
assert(Reg <= TRI->getNumRegs() && "Expected a physical register.");
|
||||
@ -90,12 +93,13 @@ class LivePhysRegs {
|
||||
LiveRegs.erase(*R);
|
||||
}
|
||||
|
||||
/// \brief Removes physical registers clobbered by the regmask operand @p MO.
|
||||
/// Removes physical registers clobbered by the regmask operand \p MO.
|
||||
void removeRegsInMask(const MachineOperand &MO,
|
||||
SmallVectorImpl<std::pair<unsigned, const MachineOperand*>> *Clobbers);
|
||||
SmallVectorImpl<std::pair<unsigned, const MachineOperand*>> *Clobbers =
|
||||
nullptr);
|
||||
|
||||
/// \brief Returns true if register @p Reg is contained in the set. This also
|
||||
/// works if only the super register of @p Reg has been defined, because
|
||||
/// \brief Returns true if register \p Reg is contained in the set. This also
|
||||
/// works if only the super register of \p Reg has been defined, because
|
||||
/// addReg() always adds all sub-registers to the set as well.
|
||||
/// Note: Returns false if just some sub registers are live, use available()
|
||||
/// when searching a free register.
|
||||
@ -104,48 +108,48 @@ class LivePhysRegs {
|
||||
/// Returns true if register \p Reg and no aliasing register is in the set.
|
||||
bool available(const MachineRegisterInfo &MRI, unsigned Reg) const;
|
||||
|
||||
/// \brief Simulates liveness when stepping backwards over an
|
||||
/// instruction(bundle): Remove Defs, add uses. This is the recommended way of
|
||||
/// calculating liveness.
|
||||
/// Simulates liveness when stepping backwards over an instruction(bundle).
|
||||
/// Remove Defs, add uses. This is the recommended way of calculating
|
||||
/// liveness.
|
||||
void stepBackward(const MachineInstr &MI);
|
||||
|
||||
/// \brief Simulates liveness when stepping forward over an
|
||||
/// instruction(bundle): Remove killed-uses, add defs. This is the not
|
||||
/// recommended way, because it depends on accurate kill flags. If possible
|
||||
/// use stepBackward() instead of this function.
|
||||
/// The clobbers set will be the list of registers either defined or clobbered
|
||||
/// by a regmask. The operand will identify whether this is a regmask or
|
||||
/// register operand.
|
||||
/// Simulates liveness when stepping forward over an instruction(bundle).
|
||||
/// Remove killed-uses, add defs. This is the not recommended way, because it
|
||||
/// depends on accurate kill flags. If possible use stepBackward() instead of
|
||||
/// this function. The clobbers set will be the list of registers either
|
||||
/// defined or clobbered by a regmask. The operand will identify whether this
|
||||
/// is a regmask or register operand.
|
||||
void stepForward(const MachineInstr &MI,
|
||||
SmallVectorImpl<std::pair<unsigned, const MachineOperand*>> &Clobbers);
|
||||
|
||||
/// Adds all live-in registers of basic block @p MBB.
|
||||
/// Adds all live-in registers of basic block \p MBB.
|
||||
/// Live in registers are the registers in the blocks live-in list and the
|
||||
/// pristine registers.
|
||||
void addLiveIns(const MachineBasicBlock &MBB);
|
||||
|
||||
/// Adds all live-out registers of basic block @p MBB.
|
||||
/// Adds all live-out registers of basic block \p MBB.
|
||||
/// Live out registers are the union of the live-in registers of the successor
|
||||
/// blocks and pristine registers. Live out registers of the end block are the
|
||||
/// callee saved registers.
|
||||
void addLiveOuts(const MachineBasicBlock &MBB);
|
||||
|
||||
/// Like addLiveOuts() but does not add pristine registers/callee saved
|
||||
/// Adds all live-out registers of basic block \p MBB but skips pristine
|
||||
/// registers.
|
||||
void addLiveOutsNoPristines(const MachineBasicBlock &MBB);
|
||||
|
||||
typedef SparseSet<unsigned>::const_iterator const_iterator;
|
||||
using const_iterator = SparseSet<unsigned>::const_iterator;
|
||||
|
||||
const_iterator begin() const { return LiveRegs.begin(); }
|
||||
const_iterator end() const { return LiveRegs.end(); }
|
||||
|
||||
/// \brief Prints the currently live registers to @p OS.
|
||||
/// Prints the currently live registers to \p OS.
|
||||
void print(raw_ostream &OS) const;
|
||||
|
||||
/// \brief Dumps the currently live registers to the debug output.
|
||||
/// Dumps the currently live registers to the debug output.
|
||||
void dump() const;
|
||||
|
||||
private:
|
||||
/// Adds live-in registers from basic block @p MBB, taking associated
|
||||
/// \brief Adds live-in registers from basic block \p MBB, taking associated
|
||||
/// lane masks into consideration.
|
||||
void addBlockLiveIns(const MachineBasicBlock &MBB);
|
||||
};
|
||||
@ -155,11 +159,11 @@ inline raw_ostream &operator<<(raw_ostream &OS, const LivePhysRegs& LR) {
|
||||
return OS;
|
||||
}
|
||||
|
||||
/// Compute the live-in list for \p MBB assuming all of its successors live-in
|
||||
/// lists are up-to-date. Uses the given LivePhysReg instance \p LiveRegs; This
|
||||
/// is just here to avoid repeated heap allocations when calling this multiple
|
||||
/// times in a pass.
|
||||
void computeLiveIns(LivePhysRegs &LiveRegs, const TargetRegisterInfo &TRI,
|
||||
/// \brief Computes the live-in list for \p MBB assuming all of its successors
|
||||
/// live-in lists are up-to-date. Uses the given LivePhysReg instance \p
|
||||
/// LiveRegs; This is just here to avoid repeated heap allocations when calling
|
||||
/// this multiple times in a pass.
|
||||
void computeLiveIns(LivePhysRegs &LiveRegs, const MachineRegisterInfo &MRI,
|
||||
MachineBasicBlock &MBB);
|
||||
|
||||
} // end namespace llvm
|
||||
|
@ -1,4 +1,4 @@
|
||||
//===---- LiveRangeEdit.h - Basic tools for split and spill -----*- C++ -*-===//
|
||||
//===- LiveRangeEdit.h - Basic tools for split and spill --------*- C++ -*-===//
|
||||
//
|
||||
// The LLVM Compiler Infrastructure
|
||||
//
|
||||
@ -19,19 +19,28 @@
|
||||
#define LLVM_CODEGEN_LIVERANGEEDIT_H
|
||||
|
||||
#include "llvm/ADT/ArrayRef.h"
|
||||
#include "llvm/ADT/None.h"
|
||||
#include "llvm/ADT/SetVector.h"
|
||||
#include "llvm/ADT/SmallPtrSet.h"
|
||||
#include "llvm/ADT/SmallVector.h"
|
||||
#include "llvm/Analysis/AliasAnalysis.h"
|
||||
#include "llvm/CodeGen/LiveInterval.h"
|
||||
#include "llvm/CodeGen/MachineBasicBlock.h"
|
||||
#include "llvm/CodeGen/MachineFunction.h"
|
||||
#include "llvm/CodeGen/MachineRegisterInfo.h"
|
||||
#include "llvm/Target/TargetMachine.h"
|
||||
#include "llvm/CodeGen/SlotIndexes.h"
|
||||
#include "llvm/Target/TargetSubtargetInfo.h"
|
||||
#include <cassert>
|
||||
|
||||
namespace llvm {
|
||||
|
||||
class LiveIntervals;
|
||||
class MachineBlockFrequencyInfo;
|
||||
class MachineInstr;
|
||||
class MachineLoopInfo;
|
||||
class MachineOperand;
|
||||
class TargetInstrInfo;
|
||||
class TargetRegisterInfo;
|
||||
class VirtRegMap;
|
||||
|
||||
class LiveRangeEdit : private MachineRegisterInfo::Delegate {
|
||||
@ -39,7 +48,10 @@ class LiveRangeEdit : private MachineRegisterInfo::Delegate {
|
||||
/// Callback methods for LiveRangeEdit owners.
|
||||
class Delegate {
|
||||
virtual void anchor();
|
||||
|
||||
public:
|
||||
virtual ~Delegate() = default;
|
||||
|
||||
/// Called immediately before erasing a dead machine instruction.
|
||||
virtual void LRE_WillEraseInstruction(MachineInstr *MI) {}
|
||||
|
||||
@ -53,8 +65,6 @@ class LiveRangeEdit : private MachineRegisterInfo::Delegate {
|
||||
/// Called after cloning a virtual register.
|
||||
/// This is used for new registers representing connected components of Old.
|
||||
virtual void LRE_DidCloneVirtReg(unsigned New, unsigned Old) {}
|
||||
|
||||
virtual ~Delegate() {}
|
||||
};
|
||||
|
||||
private:
|
||||
@ -70,7 +80,7 @@ class LiveRangeEdit : private MachineRegisterInfo::Delegate {
|
||||
const unsigned FirstNew;
|
||||
|
||||
/// ScannedRemattable - true when remattable values have been identified.
|
||||
bool ScannedRemattable;
|
||||
bool ScannedRemattable = false;
|
||||
|
||||
/// DeadRemats - The saved instructions which have already been dead after
|
||||
/// rematerialization but not deleted yet -- to be done in postOptimization.
|
||||
@ -78,11 +88,11 @@ class LiveRangeEdit : private MachineRegisterInfo::Delegate {
|
||||
|
||||
/// Remattable - Values defined by remattable instructions as identified by
|
||||
/// tii.isTriviallyReMaterializable().
|
||||
SmallPtrSet<const VNInfo*,4> Remattable;
|
||||
SmallPtrSet<const VNInfo *, 4> Remattable;
|
||||
|
||||
/// Rematted - Values that were actually rematted, and so need to have their
|
||||
/// live range trimmed or entirely removed.
|
||||
SmallPtrSet<const VNInfo*,4> Rematted;
|
||||
SmallPtrSet<const VNInfo *, 4> Rematted;
|
||||
|
||||
/// scanRemattable - Identify the Parent values that may rematerialize.
|
||||
void scanRemattable(AliasAnalysis *aa);
|
||||
@ -94,11 +104,11 @@ class LiveRangeEdit : private MachineRegisterInfo::Delegate {
|
||||
|
||||
/// foldAsLoad - If LI has a single use and a single def that can be folded as
|
||||
/// a load, eliminate the register by folding the def into the use.
|
||||
bool foldAsLoad(LiveInterval *LI, SmallVectorImpl<MachineInstr*> &Dead);
|
||||
bool foldAsLoad(LiveInterval *LI, SmallVectorImpl<MachineInstr *> &Dead);
|
||||
|
||||
using ToShrinkSet = SetVector<LiveInterval *, SmallVector<LiveInterval *, 8>,
|
||||
SmallPtrSet<LiveInterval *, 8>>;
|
||||
|
||||
typedef SetVector<LiveInterval*,
|
||||
SmallVector<LiveInterval*, 8>,
|
||||
SmallPtrSet<LiveInterval*, 8> > ToShrinkSet;
|
||||
/// Helper for eliminateDeadDefs.
|
||||
void eliminateDeadDef(MachineInstr *MI, ToShrinkSet &ToShrink,
|
||||
AliasAnalysis *AA);
|
||||
@ -129,26 +139,26 @@ class LiveRangeEdit : private MachineRegisterInfo::Delegate {
|
||||
SmallPtrSet<MachineInstr *, 32> *deadRemats = nullptr)
|
||||
: Parent(parent), NewRegs(newRegs), MRI(MF.getRegInfo()), LIS(lis),
|
||||
VRM(vrm), TII(*MF.getSubtarget().getInstrInfo()), TheDelegate(delegate),
|
||||
FirstNew(newRegs.size()), ScannedRemattable(false),
|
||||
DeadRemats(deadRemats) {
|
||||
FirstNew(newRegs.size()), DeadRemats(deadRemats) {
|
||||
MRI.setDelegate(this);
|
||||
}
|
||||
|
||||
~LiveRangeEdit() override { MRI.resetDelegate(this); }
|
||||
|
||||
LiveInterval &getParent() const {
|
||||
assert(Parent && "No parent LiveInterval");
|
||||
return *Parent;
|
||||
assert(Parent && "No parent LiveInterval");
|
||||
return *Parent;
|
||||
}
|
||||
|
||||
unsigned getReg() const { return getParent().reg; }
|
||||
|
||||
/// Iterator for accessing the new registers added by this edit.
|
||||
typedef SmallVectorImpl<unsigned>::const_iterator iterator;
|
||||
iterator begin() const { return NewRegs.begin()+FirstNew; }
|
||||
using iterator = SmallVectorImpl<unsigned>::const_iterator;
|
||||
iterator begin() const { return NewRegs.begin() + FirstNew; }
|
||||
iterator end() const { return NewRegs.end(); }
|
||||
unsigned size() const { return NewRegs.size()-FirstNew; }
|
||||
unsigned size() const { return NewRegs.size() - FirstNew; }
|
||||
bool empty() const { return size() == 0; }
|
||||
unsigned get(unsigned idx) const { return NewRegs[idx+FirstNew]; }
|
||||
unsigned get(unsigned idx) const { return NewRegs[idx + FirstNew]; }
|
||||
|
||||
/// pop_back - It allows LiveRangeEdit users to drop new registers.
|
||||
/// The context is when an original def instruction of a register is
|
||||
@ -176,26 +186,25 @@ class LiveRangeEdit : private MachineRegisterInfo::Delegate {
|
||||
return createEmptyIntervalFrom(getReg());
|
||||
}
|
||||
|
||||
unsigned create() {
|
||||
return createFrom(getReg());
|
||||
}
|
||||
unsigned create() { return createFrom(getReg()); }
|
||||
|
||||
/// anyRematerializable - Return true if any parent values may be
|
||||
/// rematerializable.
|
||||
/// This function must be called before any rematerialization is attempted.
|
||||
bool anyRematerializable(AliasAnalysis*);
|
||||
bool anyRematerializable(AliasAnalysis *);
|
||||
|
||||
/// checkRematerializable - Manually add VNI to the list of rematerializable
|
||||
/// values if DefMI may be rematerializable.
|
||||
bool checkRematerializable(VNInfo *VNI, const MachineInstr *DefMI,
|
||||
AliasAnalysis*);
|
||||
AliasAnalysis *);
|
||||
|
||||
/// Remat - Information needed to rematerialize at a specific location.
|
||||
struct Remat {
|
||||
VNInfo *ParentVNI; // parent_'s value at the remat location.
|
||||
MachineInstr *OrigMI; // Instruction defining OrigVNI. It contains the
|
||||
// real expr for remat.
|
||||
explicit Remat(VNInfo *ParentVNI) : ParentVNI(ParentVNI), OrigMI(nullptr) {}
|
||||
VNInfo *ParentVNI; // parent_'s value at the remat location.
|
||||
MachineInstr *OrigMI = nullptr; // Instruction defining OrigVNI. It contains
|
||||
// the real expr for remat.
|
||||
|
||||
explicit Remat(VNInfo *ParentVNI) : ParentVNI(ParentVNI) {}
|
||||
};
|
||||
|
||||
/// canRematerializeAt - Determine if ParentVNI can be rematerialized at
|
||||
@ -209,10 +218,8 @@ class LiveRangeEdit : private MachineRegisterInfo::Delegate {
|
||||
/// liveness is not updated.
|
||||
/// Return the SlotIndex of the new instruction.
|
||||
SlotIndex rematerializeAt(MachineBasicBlock &MBB,
|
||||
MachineBasicBlock::iterator MI,
|
||||
unsigned DestReg,
|
||||
const Remat &RM,
|
||||
const TargetRegisterInfo&,
|
||||
MachineBasicBlock::iterator MI, unsigned DestReg,
|
||||
const Remat &RM, const TargetRegisterInfo &,
|
||||
bool Late = false);
|
||||
|
||||
/// markRematerialized - explicitly mark a value as rematerialized after doing
|
||||
@ -248,11 +255,10 @@ class LiveRangeEdit : private MachineRegisterInfo::Delegate {
|
||||
|
||||
/// calculateRegClassAndHint - Recompute register class and hint for each new
|
||||
/// register.
|
||||
void calculateRegClassAndHint(MachineFunction&,
|
||||
const MachineLoopInfo&,
|
||||
const MachineBlockFrequencyInfo&);
|
||||
void calculateRegClassAndHint(MachineFunction &, const MachineLoopInfo &,
|
||||
const MachineBlockFrequencyInfo &);
|
||||
};
|
||||
|
||||
}
|
||||
} // end namespace llvm
|
||||
|
||||
#endif
|
||||
#endif // LLVM_CODEGEN_LIVERANGEEDIT_H
|
||||
|
@ -1,4 +1,4 @@
|
||||
//===-- LiveStackAnalysis.h - Live Stack Slot Analysis ----------*- C++ -*-===//
|
||||
//===- LiveStackAnalysis.h - Live Stack Slot Analysis -----------*- C++ -*-===//
|
||||
//
|
||||
// The LLVM Compiler Infrastructure
|
||||
//
|
||||
@ -18,13 +18,16 @@
|
||||
|
||||
#include "llvm/CodeGen/LiveInterval.h"
|
||||
#include "llvm/CodeGen/MachineFunctionPass.h"
|
||||
#include "llvm/Support/Allocator.h"
|
||||
#include "llvm/Target/TargetRegisterInfo.h"
|
||||
#include "llvm/Pass.h"
|
||||
#include <cassert>
|
||||
#include <map>
|
||||
#include <unordered_map>
|
||||
|
||||
namespace llvm {
|
||||
|
||||
class TargetRegisterClass;
|
||||
class TargetRegisterInfo;
|
||||
|
||||
class LiveStacks : public MachineFunctionPass {
|
||||
const TargetRegisterInfo *TRI;
|
||||
|
||||
@ -33,8 +36,7 @@ class LiveStacks : public MachineFunctionPass {
|
||||
VNInfo::Allocator VNInfoAllocator;
|
||||
|
||||
/// S2IMap - Stack slot indices to live interval mapping.
|
||||
///
|
||||
typedef std::unordered_map<int, LiveInterval> SS2IntervalMap;
|
||||
using SS2IntervalMap = std::unordered_map<int, LiveInterval>;
|
||||
SS2IntervalMap S2IMap;
|
||||
|
||||
/// S2RCMap - Stack slot indices to register class mapping.
|
||||
@ -42,12 +44,14 @@ class LiveStacks : public MachineFunctionPass {
|
||||
|
||||
public:
|
||||
static char ID; // Pass identification, replacement for typeid
|
||||
|
||||
LiveStacks() : MachineFunctionPass(ID) {
|
||||
initializeLiveStacksPass(*PassRegistry::getPassRegistry());
|
||||
}
|
||||
|
||||
typedef SS2IntervalMap::iterator iterator;
|
||||
typedef SS2IntervalMap::const_iterator const_iterator;
|
||||
using iterator = SS2IntervalMap::iterator;
|
||||
using const_iterator = SS2IntervalMap::const_iterator;
|
||||
|
||||
const_iterator begin() const { return S2IMap.begin(); }
|
||||
const_iterator end() const { return S2IMap.end(); }
|
||||
iterator begin() { return S2IMap.begin(); }
|
||||
@ -93,6 +97,7 @@ class LiveStacks : public MachineFunctionPass {
|
||||
/// print - Implement the dump method.
|
||||
void print(raw_ostream &O, const Module * = nullptr) const override;
|
||||
};
|
||||
}
|
||||
|
||||
#endif /* LLVM_CODEGEN_LIVESTACK_ANALYSIS_H */
|
||||
} // end namespace llvm
|
||||
|
||||
#endif // LLVM_CODEGEN_LIVESTACK_ANALYSIS_H
|
||||
|
@ -1,4 +1,4 @@
|
||||
//===-- llvm/CodeGen/MachineBasicBlock.h ------------------------*- C++ -*-===//
|
||||
//===- llvm/CodeGen/MachineBasicBlock.h -------------------------*- C++ -*-===//
|
||||
//
|
||||
// The LLVM Compiler Infrastructure
|
||||
//
|
||||
@ -15,41 +15,50 @@
|
||||
#define LLVM_CODEGEN_MACHINEBASICBLOCK_H
|
||||
|
||||
#include "llvm/ADT/GraphTraits.h"
|
||||
#include "llvm/ADT/ilist.h"
|
||||
#include "llvm/ADT/ilist_node.h"
|
||||
#include "llvm/ADT/iterator_range.h"
|
||||
#include "llvm/ADT/simple_ilist.h"
|
||||
#include "llvm/CodeGen/MachineInstrBundleIterator.h"
|
||||
#include "llvm/CodeGen/MachineInstr.h"
|
||||
#include "llvm/IR/DebugLoc.h"
|
||||
#include "llvm/Support/BranchProbability.h"
|
||||
#include "llvm/MC/LaneBitmask.h"
|
||||
#include "llvm/MC/MCRegisterInfo.h"
|
||||
#include "llvm/Support/DataTypes.h"
|
||||
#include <cassert>
|
||||
#include <cstdint>
|
||||
#include <functional>
|
||||
#include <iterator>
|
||||
#include <string>
|
||||
#include <vector>
|
||||
|
||||
namespace llvm {
|
||||
|
||||
class Pass;
|
||||
class BasicBlock;
|
||||
class MachineFunction;
|
||||
class MCSymbol;
|
||||
class MIPrinter;
|
||||
class ModuleSlotTracker;
|
||||
class Pass;
|
||||
class SlotIndexes;
|
||||
class StringRef;
|
||||
class raw_ostream;
|
||||
class MachineBranchProbabilityInfo;
|
||||
class TargetRegisterClass;
|
||||
class TargetRegisterInfo;
|
||||
|
||||
template <> struct ilist_traits<MachineInstr> {
|
||||
private:
|
||||
friend class MachineBasicBlock; // Set by the owning MachineBasicBlock.
|
||||
|
||||
MachineBasicBlock *Parent;
|
||||
|
||||
typedef simple_ilist<MachineInstr, ilist_sentinel_tracking<true>>::iterator
|
||||
instr_iterator;
|
||||
using instr_iterator =
|
||||
simple_ilist<MachineInstr, ilist_sentinel_tracking<true>>::iterator;
|
||||
|
||||
public:
|
||||
void addNodeToList(MachineInstr *N);
|
||||
void removeNodeFromList(MachineInstr *N);
|
||||
void transferNodesFromList(ilist_traits &OldList, instr_iterator First,
|
||||
instr_iterator Last);
|
||||
|
||||
void deleteNode(MachineInstr *MI);
|
||||
};
|
||||
|
||||
@ -69,7 +78,8 @@ class MachineBasicBlock
|
||||
};
|
||||
|
||||
private:
|
||||
typedef ilist<MachineInstr, ilist_sentinel_tracking<true>> Instructions;
|
||||
using Instructions = ilist<MachineInstr, ilist_sentinel_tracking<true>>;
|
||||
|
||||
Instructions Insts;
|
||||
const BasicBlock *BB;
|
||||
int Number;
|
||||
@ -83,12 +93,12 @@ class MachineBasicBlock
|
||||
/// same order as Successors, or it is empty if we don't use it (disable
|
||||
/// optimization).
|
||||
std::vector<BranchProbability> Probs;
|
||||
typedef std::vector<BranchProbability>::iterator probability_iterator;
|
||||
typedef std::vector<BranchProbability>::const_iterator
|
||||
const_probability_iterator;
|
||||
using probability_iterator = std::vector<BranchProbability>::iterator;
|
||||
using const_probability_iterator =
|
||||
std::vector<BranchProbability>::const_iterator;
|
||||
|
||||
/// Keep track of the physical registers that are livein of the basicblock.
|
||||
typedef std::vector<RegisterMaskPair> LiveInVector;
|
||||
using LiveInVector = std::vector<RegisterMaskPair>;
|
||||
LiveInVector LiveIns;
|
||||
|
||||
/// Alignment of the basic block. Zero if the basic block does not need to be
|
||||
@ -113,7 +123,7 @@ class MachineBasicBlock
|
||||
mutable MCSymbol *CachedMCSymbol = nullptr;
|
||||
|
||||
// Intrusive list support
|
||||
MachineBasicBlock() {}
|
||||
MachineBasicBlock() = default;
|
||||
|
||||
explicit MachineBasicBlock(MachineFunction &MF, const BasicBlock *BB);
|
||||
|
||||
@ -145,16 +155,16 @@ class MachineBasicBlock
|
||||
const MachineFunction *getParent() const { return xParent; }
|
||||
MachineFunction *getParent() { return xParent; }
|
||||
|
||||
typedef Instructions::iterator instr_iterator;
|
||||
typedef Instructions::const_iterator const_instr_iterator;
|
||||
typedef Instructions::reverse_iterator reverse_instr_iterator;
|
||||
typedef Instructions::const_reverse_iterator const_reverse_instr_iterator;
|
||||
using instr_iterator = Instructions::iterator;
|
||||
using const_instr_iterator = Instructions::const_iterator;
|
||||
using reverse_instr_iterator = Instructions::reverse_iterator;
|
||||
using const_reverse_instr_iterator = Instructions::const_reverse_iterator;
|
||||
|
||||
typedef MachineInstrBundleIterator<MachineInstr> iterator;
|
||||
typedef MachineInstrBundleIterator<const MachineInstr> const_iterator;
|
||||
typedef MachineInstrBundleIterator<MachineInstr, true> reverse_iterator;
|
||||
typedef MachineInstrBundleIterator<const MachineInstr, true>
|
||||
const_reverse_iterator;
|
||||
using iterator = MachineInstrBundleIterator<MachineInstr>;
|
||||
using const_iterator = MachineInstrBundleIterator<const MachineInstr>;
|
||||
using reverse_iterator = MachineInstrBundleIterator<MachineInstr, true>;
|
||||
using const_reverse_iterator =
|
||||
MachineInstrBundleIterator<const MachineInstr, true>;
|
||||
|
||||
unsigned size() const { return (unsigned)Insts.size(); }
|
||||
bool empty() const { return Insts.empty(); }
|
||||
@ -178,8 +188,8 @@ class MachineBasicBlock
|
||||
reverse_instr_iterator instr_rend () { return Insts.rend(); }
|
||||
const_reverse_instr_iterator instr_rend () const { return Insts.rend(); }
|
||||
|
||||
typedef iterator_range<instr_iterator> instr_range;
|
||||
typedef iterator_range<const_instr_iterator> const_instr_range;
|
||||
using instr_range = iterator_range<instr_iterator>;
|
||||
using const_instr_range = iterator_range<const_instr_iterator>;
|
||||
instr_range instrs() { return instr_range(instr_begin(), instr_end()); }
|
||||
const_instr_range instrs() const {
|
||||
return const_instr_range(instr_begin(), instr_end());
|
||||
@ -213,18 +223,18 @@ class MachineBasicBlock
|
||||
}
|
||||
|
||||
// Machine-CFG iterators
|
||||
typedef std::vector<MachineBasicBlock *>::iterator pred_iterator;
|
||||
typedef std::vector<MachineBasicBlock *>::const_iterator const_pred_iterator;
|
||||
typedef std::vector<MachineBasicBlock *>::iterator succ_iterator;
|
||||
typedef std::vector<MachineBasicBlock *>::const_iterator const_succ_iterator;
|
||||
typedef std::vector<MachineBasicBlock *>::reverse_iterator
|
||||
pred_reverse_iterator;
|
||||
typedef std::vector<MachineBasicBlock *>::const_reverse_iterator
|
||||
const_pred_reverse_iterator;
|
||||
typedef std::vector<MachineBasicBlock *>::reverse_iterator
|
||||
succ_reverse_iterator;
|
||||
typedef std::vector<MachineBasicBlock *>::const_reverse_iterator
|
||||
const_succ_reverse_iterator;
|
||||
using pred_iterator = std::vector<MachineBasicBlock *>::iterator;
|
||||
using const_pred_iterator = std::vector<MachineBasicBlock *>::const_iterator;
|
||||
using succ_iterator = std::vector<MachineBasicBlock *>::iterator;
|
||||
using const_succ_iterator = std::vector<MachineBasicBlock *>::const_iterator;
|
||||
using pred_reverse_iterator =
|
||||
std::vector<MachineBasicBlock *>::reverse_iterator;
|
||||
using const_pred_reverse_iterator =
|
||||
std::vector<MachineBasicBlock *>::const_reverse_iterator;
|
||||
using succ_reverse_iterator =
|
||||
std::vector<MachineBasicBlock *>::reverse_iterator;
|
||||
using const_succ_reverse_iterator =
|
||||
std::vector<MachineBasicBlock *>::const_reverse_iterator;
|
||||
pred_iterator pred_begin() { return Predecessors.begin(); }
|
||||
const_pred_iterator pred_begin() const { return Predecessors.begin(); }
|
||||
pred_iterator pred_end() { return Predecessors.end(); }
|
||||
@ -307,7 +317,7 @@ class MachineBasicBlock
|
||||
|
||||
// Iteration support for live in sets. These sets are kept in sorted
|
||||
// order by their register number.
|
||||
typedef LiveInVector::const_iterator livein_iterator;
|
||||
using livein_iterator = LiveInVector::const_iterator;
|
||||
#ifndef NDEBUG
|
||||
/// Unlike livein_begin, this method does not check that the liveness
|
||||
/// information is accurate. Still for debug purposes it may be useful
|
||||
@ -455,7 +465,6 @@ class MachineBasicBlock
|
||||
/// other block.
|
||||
bool isLayoutSuccessor(const MachineBasicBlock *MBB) const;
|
||||
|
||||
|
||||
/// Return the fallthrough block if the block can implicitly
|
||||
/// transfer control to the block after it by falling off the end of
|
||||
/// it. This should return null if it can reach the block after
|
||||
@ -695,7 +704,7 @@ class MachineBasicBlock
|
||||
LivenessQueryResult computeRegisterLiveness(const TargetRegisterInfo *TRI,
|
||||
unsigned Reg,
|
||||
const_iterator Before,
|
||||
unsigned Neighborhood=10) const;
|
||||
unsigned Neighborhood = 10) const;
|
||||
|
||||
// Debugging methods.
|
||||
void dump() const;
|
||||
@ -714,7 +723,6 @@ class MachineBasicBlock
|
||||
/// Return the MCSymbol for this basic block.
|
||||
MCSymbol *getSymbol() const;
|
||||
|
||||
|
||||
private:
|
||||
/// Return probability iterator corresponding to the I successor iterator.
|
||||
probability_iterator getProbabilityIterator(succ_iterator I);
|
||||
@ -764,8 +772,8 @@ struct MBB2NumberFunctor :
|
||||
//
|
||||
|
||||
template <> struct GraphTraits<MachineBasicBlock *> {
|
||||
typedef MachineBasicBlock *NodeRef;
|
||||
typedef MachineBasicBlock::succ_iterator ChildIteratorType;
|
||||
using NodeRef = MachineBasicBlock *;
|
||||
using ChildIteratorType = MachineBasicBlock::succ_iterator;
|
||||
|
||||
static NodeRef getEntryNode(MachineBasicBlock *BB) { return BB; }
|
||||
static ChildIteratorType child_begin(NodeRef N) { return N->succ_begin(); }
|
||||
@ -773,8 +781,8 @@ template <> struct GraphTraits<MachineBasicBlock *> {
|
||||
};
|
||||
|
||||
template <> struct GraphTraits<const MachineBasicBlock *> {
|
||||
typedef const MachineBasicBlock *NodeRef;
|
||||
typedef MachineBasicBlock::const_succ_iterator ChildIteratorType;
|
||||
using NodeRef = const MachineBasicBlock *;
|
||||
using ChildIteratorType = MachineBasicBlock::const_succ_iterator;
|
||||
|
||||
static NodeRef getEntryNode(const MachineBasicBlock *BB) { return BB; }
|
||||
static ChildIteratorType child_begin(NodeRef N) { return N->succ_begin(); }
|
||||
@ -787,28 +795,30 @@ template <> struct GraphTraits<const MachineBasicBlock *> {
|
||||
// to be when traversing the predecessor edges of a MBB
|
||||
// instead of the successor edges.
|
||||
//
|
||||
template <> struct GraphTraits<Inverse<MachineBasicBlock*> > {
|
||||
typedef MachineBasicBlock *NodeRef;
|
||||
typedef MachineBasicBlock::pred_iterator ChildIteratorType;
|
||||
template <> struct GraphTraits<Inverse<MachineBasicBlock*>> {
|
||||
using NodeRef = MachineBasicBlock *;
|
||||
using ChildIteratorType = MachineBasicBlock::pred_iterator;
|
||||
|
||||
static NodeRef getEntryNode(Inverse<MachineBasicBlock *> G) {
|
||||
return G.Graph;
|
||||
}
|
||||
|
||||
static ChildIteratorType child_begin(NodeRef N) { return N->pred_begin(); }
|
||||
static ChildIteratorType child_end(NodeRef N) { return N->pred_end(); }
|
||||
};
|
||||
|
||||
template <> struct GraphTraits<Inverse<const MachineBasicBlock*> > {
|
||||
typedef const MachineBasicBlock *NodeRef;
|
||||
typedef MachineBasicBlock::const_pred_iterator ChildIteratorType;
|
||||
template <> struct GraphTraits<Inverse<const MachineBasicBlock*>> {
|
||||
using NodeRef = const MachineBasicBlock *;
|
||||
using ChildIteratorType = MachineBasicBlock::const_pred_iterator;
|
||||
|
||||
static NodeRef getEntryNode(Inverse<const MachineBasicBlock *> G) {
|
||||
return G.Graph;
|
||||
}
|
||||
|
||||
static ChildIteratorType child_begin(NodeRef N) { return N->pred_begin(); }
|
||||
static ChildIteratorType child_end(NodeRef N) { return N->pred_end(); }
|
||||
};
|
||||
|
||||
|
||||
|
||||
/// MachineInstrSpan provides an interface to get an iteration range
|
||||
/// containing the instruction it was initialized with, along with all
|
||||
/// those instructions inserted prior to or following that instruction
|
||||
@ -816,6 +826,7 @@ template <> struct GraphTraits<Inverse<const MachineBasicBlock*> > {
|
||||
class MachineInstrSpan {
|
||||
MachineBasicBlock &MBB;
|
||||
MachineBasicBlock::iterator I, B, E;
|
||||
|
||||
public:
|
||||
MachineInstrSpan(MachineBasicBlock::iterator I)
|
||||
: MBB(*I->getParent()),
|
||||
@ -854,6 +865,6 @@ inline IterT skipDebugInstructionsBackward(IterT It, IterT Begin) {
|
||||
return It;
|
||||
}
|
||||
|
||||
} // End llvm namespace
|
||||
} // end namespace llvm
|
||||
|
||||
#endif
|
||||
#endif // LLVM_CODEGEN_MACHINEBASICBLOCK_H
|
||||
|
@ -1,4 +1,4 @@
|
||||
//===- MachineBlockFrequencyInfo.h - MBB Frequency Analysis -*- C++ -*-----===//
|
||||
//===- MachineBlockFrequencyInfo.h - MBB Frequency Analysis -----*- C++ -*-===//
|
||||
//
|
||||
// The LLVM Compiler Infrastructure
|
||||
//
|
||||
@ -17,26 +17,28 @@
|
||||
#include "llvm/ADT/Optional.h"
|
||||
#include "llvm/CodeGen/MachineFunctionPass.h"
|
||||
#include "llvm/Support/BlockFrequency.h"
|
||||
#include <climits>
|
||||
#include <cstdint>
|
||||
#include <memory>
|
||||
|
||||
namespace llvm {
|
||||
|
||||
template <class BlockT> class BlockFrequencyInfoImpl;
|
||||
class MachineBasicBlock;
|
||||
class MachineBranchProbabilityInfo;
|
||||
class MachineFunction;
|
||||
class MachineLoopInfo;
|
||||
template <class BlockT> class BlockFrequencyInfoImpl;
|
||||
class raw_ostream;
|
||||
|
||||
/// MachineBlockFrequencyInfo pass uses BlockFrequencyInfoImpl implementation
|
||||
/// to estimate machine basic block frequencies.
|
||||
class MachineBlockFrequencyInfo : public MachineFunctionPass {
|
||||
typedef BlockFrequencyInfoImpl<MachineBasicBlock> ImplType;
|
||||
using ImplType = BlockFrequencyInfoImpl<MachineBasicBlock>;
|
||||
std::unique_ptr<ImplType> MBFI;
|
||||
|
||||
public:
|
||||
static char ID;
|
||||
|
||||
MachineBlockFrequencyInfo();
|
||||
|
||||
~MachineBlockFrequencyInfo() override;
|
||||
|
||||
void getAnalysisUsage(AnalysisUsage &AU) const override;
|
||||
@ -74,9 +76,8 @@ class MachineBlockFrequencyInfo : public MachineFunctionPass {
|
||||
const MachineBasicBlock *MBB) const;
|
||||
|
||||
uint64_t getEntryFreq() const;
|
||||
|
||||
};
|
||||
|
||||
}
|
||||
} // end namespace llvm
|
||||
|
||||
#endif
|
||||
#endif // LLVM_CODEGEN_MACHINEBLOCKFREQUENCYINFO_H
|
||||
|
@ -11,23 +11,28 @@
|
||||
#define LLVM_CODEGEN_MACHINEDOMINANCEFRONTIER_H
|
||||
|
||||
#include "llvm/Analysis/DominanceFrontier.h"
|
||||
#include "llvm/Analysis/DominanceFrontierImpl.h"
|
||||
#include "llvm/CodeGen/MachineBasicBlock.h"
|
||||
#include "llvm/CodeGen/MachineFunctionPass.h"
|
||||
|
||||
#include "llvm/Support/GenericDomTree.h"
|
||||
#include <vector>
|
||||
|
||||
namespace llvm {
|
||||
|
||||
class MachineDominanceFrontier : public MachineFunctionPass {
|
||||
ForwardDominanceFrontierBase<MachineBasicBlock> Base;
|
||||
public:
|
||||
typedef DominatorTreeBase<MachineBasicBlock> DomTreeT;
|
||||
typedef DomTreeNodeBase<MachineBasicBlock> DomTreeNodeT;
|
||||
typedef DominanceFrontierBase<MachineBasicBlock>::DomSetType DomSetType;
|
||||
typedef DominanceFrontierBase<MachineBasicBlock>::iterator iterator;
|
||||
typedef DominanceFrontierBase<MachineBasicBlock>::const_iterator const_iterator;
|
||||
|
||||
void operator=(const MachineDominanceFrontier &) = delete;
|
||||
public:
|
||||
using DomTreeT = DominatorTreeBase<MachineBasicBlock>;
|
||||
using DomTreeNodeT = DomTreeNodeBase<MachineBasicBlock>;
|
||||
using DomSetType = DominanceFrontierBase<MachineBasicBlock>::DomSetType;
|
||||
using iterator = DominanceFrontierBase<MachineBasicBlock>::iterator;
|
||||
using const_iterator =
|
||||
DominanceFrontierBase<MachineBasicBlock>::const_iterator;
|
||||
|
||||
MachineDominanceFrontier(const MachineDominanceFrontier &) = delete;
|
||||
MachineDominanceFrontier &
|
||||
operator=(const MachineDominanceFrontier &) = delete;
|
||||
|
||||
static char ID;
|
||||
|
||||
@ -104,6 +109,6 @@ class MachineDominanceFrontier : public MachineFunctionPass {
|
||||
void getAnalysisUsage(AnalysisUsage &AU) const override;
|
||||
};
|
||||
|
||||
}
|
||||
} // end namespace llvm
|
||||
|
||||
#endif
|
||||
#endif // LLVM_CODEGEN_MACHINEDOMINANCEFRONTIER_H
|
||||
|
@ -1,4 +1,4 @@
|
||||
//=- llvm/CodeGen/MachineDominators.h - Machine Dom Calculation --*- C++ -*-==//
|
||||
//==- llvm/CodeGen/MachineDominators.h - Machine Dom Calculation -*- C++ -*-==//
|
||||
//
|
||||
// The LLVM Compiler Infrastructure
|
||||
//
|
||||
@ -16,12 +16,15 @@
|
||||
#define LLVM_CODEGEN_MACHINEDOMINATORS_H
|
||||
|
||||
#include "llvm/ADT/SmallSet.h"
|
||||
#include "llvm/ADT/SmallVector.h"
|
||||
#include "llvm/CodeGen/MachineBasicBlock.h"
|
||||
#include "llvm/CodeGen/MachineFunction.h"
|
||||
#include "llvm/CodeGen/MachineFunctionPass.h"
|
||||
#include "llvm/CodeGen/MachineInstr.h"
|
||||
#include "llvm/Support/GenericDomTree.h"
|
||||
#include "llvm/Support/GenericDomTreeConstruction.h"
|
||||
#include <cassert>
|
||||
#include <memory>
|
||||
#include <vector>
|
||||
|
||||
namespace llvm {
|
||||
|
||||
@ -33,7 +36,7 @@ inline void DominatorTreeBase<MachineBasicBlock>::addRoot(MachineBasicBlock* MBB
|
||||
extern template class DomTreeNodeBase<MachineBasicBlock>;
|
||||
extern template class DominatorTreeBase<MachineBasicBlock>;
|
||||
|
||||
typedef DomTreeNodeBase<MachineBasicBlock> MachineDomTreeNode;
|
||||
using MachineDomTreeNode = DomTreeNodeBase<MachineBasicBlock>;
|
||||
|
||||
//===-------------------------------------
|
||||
/// DominatorTree Class - Concrete subclass of DominatorTreeBase that is used to
|
||||
@ -52,6 +55,7 @@ class MachineDominatorTree : public MachineFunctionPass {
|
||||
/// The splitting of a critical edge is local and thus, it is possible
|
||||
/// to apply several of those changes at the same time.
|
||||
mutable SmallVector<CriticalEdge, 32> CriticalEdgesToSplit;
|
||||
|
||||
/// \brief Remember all the basic blocks that are inserted during
|
||||
/// edge splitting.
|
||||
/// Invariant: NewBBs == all the basic blocks contained in the NewBB
|
||||
@ -259,8 +263,8 @@ class MachineDominatorTree : public MachineFunctionPass {
|
||||
|
||||
template <class Node, class ChildIterator>
|
||||
struct MachineDomTreeGraphTraitsBase {
|
||||
typedef Node *NodeRef;
|
||||
typedef ChildIterator ChildIteratorType;
|
||||
using NodeRef = Node *;
|
||||
using ChildIteratorType = ChildIterator;
|
||||
|
||||
static NodeRef getEntryNode(NodeRef N) { return N; }
|
||||
static ChildIteratorType child_begin(NodeRef N) { return N->begin(); }
|
||||
@ -287,6 +291,6 @@ template <> struct GraphTraits<MachineDominatorTree*>
|
||||
}
|
||||
};
|
||||
|
||||
}
|
||||
} // end namespace llvm
|
||||
|
||||
#endif
|
||||
#endif // LLVM_CODEGEN_MACHINEDOMINATORS_H
|
||||
|
@ -826,20 +826,12 @@ class MachineInstr
|
||||
getOperand(0).getSubReg() == getOperand(1).getSubReg();
|
||||
}
|
||||
|
||||
/// Return true if this is a transient instruction that is
|
||||
/// either very likely to be eliminated during register allocation (such as
|
||||
/// copy-like instructions), or if this instruction doesn't have an
|
||||
/// execution-time cost.
|
||||
bool isTransient() const {
|
||||
switch(getOpcode()) {
|
||||
default: return false;
|
||||
// Copy-like instructions are usually eliminated during register allocation.
|
||||
case TargetOpcode::PHI:
|
||||
case TargetOpcode::COPY:
|
||||
case TargetOpcode::INSERT_SUBREG:
|
||||
case TargetOpcode::SUBREG_TO_REG:
|
||||
case TargetOpcode::REG_SEQUENCE:
|
||||
// Pseudo-instructions that don't produce any real output.
|
||||
/// Return true if this instruction doesn't produce any output in the form of
|
||||
/// executable instructions.
|
||||
bool isMetaInstruction() const {
|
||||
switch (getOpcode()) {
|
||||
default:
|
||||
return false;
|
||||
case TargetOpcode::IMPLICIT_DEF:
|
||||
case TargetOpcode::KILL:
|
||||
case TargetOpcode::CFI_INSTRUCTION:
|
||||
@ -850,6 +842,23 @@ class MachineInstr
|
||||
}
|
||||
}
|
||||
|
||||
/// Return true if this is a transient instruction that is either very likely
|
||||
/// to be eliminated during register allocation (such as copy-like
|
||||
/// instructions), or if this instruction doesn't have an execution-time cost.
|
||||
bool isTransient() const {
|
||||
switch (getOpcode()) {
|
||||
default:
|
||||
return isMetaInstruction();
|
||||
// Copy-like instructions are usually eliminated during register allocation.
|
||||
case TargetOpcode::PHI:
|
||||
case TargetOpcode::COPY:
|
||||
case TargetOpcode::INSERT_SUBREG:
|
||||
case TargetOpcode::SUBREG_TO_REG:
|
||||
case TargetOpcode::REG_SEQUENCE:
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
/// Return the number of instructions inside the MI bundle, excluding the
|
||||
/// bundle header.
|
||||
///
|
||||
|
@ -642,6 +642,11 @@ class MachineRegisterInfo {
|
||||
///
|
||||
void setRegBank(unsigned Reg, const RegisterBank &RegBank);
|
||||
|
||||
void setRegClassOrRegBank(unsigned Reg,
|
||||
const RegClassOrRegBank &RCOrRB){
|
||||
VRegInfo[Reg].first = RCOrRB;
|
||||
}
|
||||
|
||||
/// constrainRegClass - Constrain the register class of the specified virtual
|
||||
/// register to be a common subclass of RC and the current register class,
|
||||
/// but only if the new class has at least MinNumRegs registers. Return the
|
||||
|
@ -26,7 +26,7 @@ namespace llvm {
|
||||
/// Machine Value Type. Every type that is supported natively by some
|
||||
/// processor targeted by LLVM occurs here. This means that any legal value
|
||||
/// type can be represented by an MVT.
|
||||
class MVT {
|
||||
class MVT {
|
||||
public:
|
||||
enum SimpleValueType : uint8_t {
|
||||
// Simple value types that aren't explicitly part of this enumeration
|
||||
|
@ -52,14 +52,14 @@ class TargetRegisterInfo;
|
||||
/// These are the different kinds of scheduling dependencies.
|
||||
enum Kind {
|
||||
Data, ///< Regular data dependence (aka true-dependence).
|
||||
Anti, ///< A register anti-dependedence (aka WAR).
|
||||
Anti, ///< A register anti-dependence (aka WAR).
|
||||
Output, ///< A register output-dependence (aka WAW).
|
||||
Order ///< Any other ordering dependency.
|
||||
};
|
||||
|
||||
// Strong dependencies must be respected by the scheduler. Artificial
|
||||
// dependencies may be removed only if they are redundant with another
|
||||
// strong depedence.
|
||||
// strong dependence.
|
||||
//
|
||||
// Weak dependencies may be violated by the scheduling strategy, but only if
|
||||
// the strategy can prove it is correct to do so.
|
||||
@ -342,7 +342,7 @@ class TargetRegisterInfo;
|
||||
/// BoundaryNodes can have DAG edges, including Data edges, but they do not
|
||||
/// correspond to schedulable entities (e.g. instructions) and do not have a
|
||||
/// valid ID. Consequently, always check for boundary nodes before accessing
|
||||
/// an assoicative data structure keyed on node ID.
|
||||
/// an associative data structure keyed on node ID.
|
||||
bool isBoundaryNode() const { return NodeNum == BoundaryID; }
|
||||
|
||||
/// Assigns the representative SDNode for this SUnit. This may be used
|
||||
|
@ -18,6 +18,7 @@
|
||||
#include "llvm/ADT/MapVector.h"
|
||||
#include "llvm/ADT/SparseMultiSet.h"
|
||||
#include "llvm/ADT/SparseSet.h"
|
||||
#include "llvm/CodeGen/LivePhysRegs.h"
|
||||
#include "llvm/CodeGen/ScheduleDAG.h"
|
||||
#include "llvm/CodeGen/TargetSchedule.h"
|
||||
#include "llvm/Support/Compiler.h"
|
||||
@ -224,7 +225,7 @@ namespace llvm {
|
||||
MachineInstr *FirstDbgValue;
|
||||
|
||||
/// Set of live physical registers for updating kill flags.
|
||||
BitVector LiveRegs;
|
||||
LivePhysRegs LiveRegs;
|
||||
|
||||
public:
|
||||
explicit ScheduleDAGInstrs(MachineFunction &mf,
|
||||
@ -311,7 +312,7 @@ namespace llvm {
|
||||
std::string getDAGName() const override;
|
||||
|
||||
/// Fixes register kill flags that scheduling has made invalid.
|
||||
void fixupKills(MachineBasicBlock *MBB);
|
||||
void fixupKills(MachineBasicBlock &MBB);
|
||||
|
||||
protected:
|
||||
void initSUnits();
|
||||
|
@ -1070,6 +1070,11 @@ class SelectionDAG {
|
||||
SDNode *MorphNodeTo(SDNode *N, unsigned Opc, SDVTList VTs,
|
||||
ArrayRef<SDValue> Ops);
|
||||
|
||||
/// Mutate the specified strict FP node to its non-strict equivalent,
|
||||
/// unlinking the node from its chain and dropping the metadata arguments.
|
||||
/// The node must be a strict FP node.
|
||||
SDNode *mutateStrictFPToFP(SDNode *Node);
|
||||
|
||||
/// These are used for target selectors to create a new node
|
||||
/// with specified return type(s), MachineInstr opcode, and operands.
|
||||
///
|
||||
|
@ -612,6 +612,32 @@ class SDNode : public FoldingSetNode, public ilist_node<SDNode> {
|
||||
SDNodeBits.IsMemIntrinsic;
|
||||
}
|
||||
|
||||
/// Test if this node is a strict floating point pseudo-op.
|
||||
bool isStrictFPOpcode() {
|
||||
switch (NodeType) {
|
||||
default:
|
||||
return false;
|
||||
case ISD::STRICT_FADD:
|
||||
case ISD::STRICT_FSUB:
|
||||
case ISD::STRICT_FMUL:
|
||||
case ISD::STRICT_FDIV:
|
||||
case ISD::STRICT_FREM:
|
||||
case ISD::STRICT_FSQRT:
|
||||
case ISD::STRICT_FPOW:
|
||||
case ISD::STRICT_FPOWI:
|
||||
case ISD::STRICT_FSIN:
|
||||
case ISD::STRICT_FCOS:
|
||||
case ISD::STRICT_FEXP:
|
||||
case ISD::STRICT_FEXP2:
|
||||
case ISD::STRICT_FLOG:
|
||||
case ISD::STRICT_FLOG10:
|
||||
case ISD::STRICT_FLOG2:
|
||||
case ISD::STRICT_FRINT:
|
||||
case ISD::STRICT_FNEARBYINT:
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
/// Test if this node has a post-isel opcode, directly
|
||||
/// corresponding to a MachineInstr opcode.
|
||||
bool isMachineOpcode() const { return NodeType < 0; }
|
||||
|
@ -14,6 +14,7 @@
|
||||
#include "llvm/ADT/Optional.h"
|
||||
#include "llvm/DebugInfo/CodeView/CodeViewError.h"
|
||||
#include "llvm/DebugInfo/CodeView/RecordSerialization.h"
|
||||
#include "llvm/DebugInfo/CodeView/TypeIndex.h"
|
||||
#include "llvm/Support/BinaryStreamReader.h"
|
||||
#include "llvm/Support/BinaryStreamRef.h"
|
||||
#include "llvm/Support/Endian.h"
|
||||
@ -50,6 +51,13 @@ template <typename Kind> class CVRecord {
|
||||
Optional<uint32_t> Hash;
|
||||
};
|
||||
|
||||
template <typename Kind> struct RemappedRecord {
|
||||
explicit RemappedRecord(const CVRecord<Kind> &R) : OriginalRecord(R) {}
|
||||
|
||||
CVRecord<Kind> OriginalRecord;
|
||||
SmallVector<std::pair<uint32_t, TypeIndex>, 8> Mappings;
|
||||
};
|
||||
|
||||
} // end namespace codeview
|
||||
|
||||
template <typename Kind>
|
||||
|
@ -46,6 +46,7 @@ Error visitMemberRecordStream(ArrayRef<uint8_t> FieldList,
|
||||
TypeVisitorCallbacks &Callbacks);
|
||||
|
||||
Error visitTypeStream(const CVTypeArray &Types, TypeVisitorCallbacks &Callbacks,
|
||||
VisitorDataSource Source = VDS_BytesPresent,
|
||||
TypeServerHandler *TS = nullptr);
|
||||
Error visitTypeStream(CVTypeRange Types, TypeVisitorCallbacks &Callbacks,
|
||||
TypeServerHandler *TS = nullptr);
|
||||
|
@ -40,6 +40,17 @@ class TypeDeserializer : public TypeVisitorCallbacks {
|
||||
public:
|
||||
TypeDeserializer() = default;
|
||||
|
||||
template <typename T> static Error deserializeAs(CVType &CVT, T &Record) {
|
||||
MappingInfo I(CVT.content());
|
||||
if (auto EC = I.Mapping.visitTypeBegin(CVT))
|
||||
return EC;
|
||||
if (auto EC = I.Mapping.visitKnownRecord(CVT, Record))
|
||||
return EC;
|
||||
if (auto EC = I.Mapping.visitTypeEnd(CVT))
|
||||
return EC;
|
||||
return Error::success();
|
||||
}
|
||||
|
||||
Error visitTypeBegin(CVType &Record) override {
|
||||
assert(!Mapping && "Already in a type mapping!");
|
||||
Mapping = llvm::make_unique<MappingInfo>(Record.content());
|
||||
|
33
include/llvm/DebugInfo/CodeView/TypeIndexDiscovery.h
Normal file
33
include/llvm/DebugInfo/CodeView/TypeIndexDiscovery.h
Normal file
@ -0,0 +1,33 @@
|
||||
//===- TypeIndexDiscovery.h -------------------------------------*- C++ -*-===//
|
||||
//
|
||||
// The LLVM Compiler Infrastructure
|
||||
//
|
||||
// This file is distributed under the University of Illinois Open Source
|
||||
// License. See LICENSE.TXT for details.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#ifndef LLVM_DEBUGINFO_CODEVIEW_TYPEINDEXDISCOVERY_H
|
||||
#define LLVM_DEBUGINFO_CODEVIEW_TYPEINDEXDISCOVERY_H
|
||||
|
||||
#include "llvm/ADT/SmallVector.h"
|
||||
#include "llvm/DebugInfo/CodeView/TypeRecord.h"
|
||||
#include "llvm/Support/Error.h"
|
||||
|
||||
namespace llvm {
|
||||
namespace codeview {
|
||||
enum class TiRefKind { TypeRef, IndexRef };
|
||||
struct TiReference {
|
||||
TiRefKind Kind;
|
||||
uint32_t Offset;
|
||||
uint32_t Count;
|
||||
};
|
||||
|
||||
void discoverTypeIndices(ArrayRef<uint8_t> RecordData,
|
||||
SmallVectorImpl<TiReference> &Refs);
|
||||
void discoverTypeIndices(const CVType &Type,
|
||||
SmallVectorImpl<TiReference> &Refs);
|
||||
}
|
||||
}
|
||||
|
||||
#endif
|
@ -35,6 +35,7 @@ using support::ulittle16_t;
|
||||
using support::ulittle32_t;
|
||||
|
||||
typedef CVRecord<TypeLeafKind> CVType;
|
||||
typedef RemappedRecord<TypeLeafKind> RemappedType;
|
||||
|
||||
struct CVMemberRecord {
|
||||
TypeLeafKind Kind;
|
||||
@ -278,15 +279,9 @@ class PointerRecord : public TypeRecord {
|
||||
Attrs(calcAttrs(PK, PM, PO, Size)) {}
|
||||
|
||||
PointerRecord(TypeIndex ReferentType, PointerKind PK, PointerMode PM,
|
||||
PointerOptions PO, uint8_t Size,
|
||||
const MemberPointerInfo &Member)
|
||||
PointerOptions PO, uint8_t Size, const MemberPointerInfo &MPI)
|
||||
: TypeRecord(TypeRecordKind::Pointer), ReferentType(ReferentType),
|
||||
Attrs(calcAttrs(PK, PM, PO, Size)), MemberInfo(Member) {}
|
||||
|
||||
PointerRecord(TypeIndex ReferentType, uint32_t Attrs,
|
||||
const MemberPointerInfo &Member)
|
||||
: TypeRecord(TypeRecordKind::Pointer), ReferentType(ReferentType),
|
||||
Attrs(Attrs), MemberInfo(Member) {}
|
||||
Attrs(calcAttrs(PK, PM, PO, Size)), MemberInfo(MPI) {}
|
||||
|
||||
TypeIndex getReferentType() const { return ReferentType; }
|
||||
|
||||
|
@ -17,7 +17,6 @@
|
||||
|
||||
#include "llvm/ADT/Optional.h"
|
||||
#include "llvm/ADT/SmallVector.h"
|
||||
#include "llvm/ADT/StringMap.h"
|
||||
#include "llvm/ADT/StringRef.h"
|
||||
#include "llvm/Support/Allocator.h"
|
||||
#include "llvm/Support/Error.h"
|
||||
@ -26,6 +25,8 @@ namespace llvm {
|
||||
|
||||
namespace codeview {
|
||||
|
||||
class TypeHasher;
|
||||
|
||||
class TypeSerializer : public TypeVisitorCallbacks {
|
||||
struct SubRecord {
|
||||
SubRecord(TypeLeafKind K, uint32_t S) : Kind(K), Size(S) {}
|
||||
@ -45,14 +46,13 @@ class TypeSerializer : public TypeVisitorCallbacks {
|
||||
}
|
||||
};
|
||||
|
||||
typedef SmallVector<MutableArrayRef<uint8_t>, 2> RecordList;
|
||||
typedef SmallVector<MutableArrayRef<uint8_t>, 2> MutableRecordList;
|
||||
|
||||
static constexpr uint8_t ContinuationLength = 8;
|
||||
BumpPtrAllocator &RecordStorage;
|
||||
RecordSegment CurrentSegment;
|
||||
RecordList FieldListSegments;
|
||||
MutableRecordList FieldListSegments;
|
||||
|
||||
TypeIndex LastTypeIndex;
|
||||
Optional<TypeLeafKind> TypeKind;
|
||||
Optional<TypeLeafKind> MemberKind;
|
||||
std::vector<uint8_t> RecordBuffer;
|
||||
@ -60,28 +60,35 @@ class TypeSerializer : public TypeVisitorCallbacks {
|
||||
BinaryStreamWriter Writer;
|
||||
TypeRecordMapping Mapping;
|
||||
|
||||
RecordList SeenRecords;
|
||||
StringMap<TypeIndex> HashedRecords;
|
||||
/// Private type record hashing implementation details are handled here.
|
||||
std::unique_ptr<TypeHasher> Hasher;
|
||||
|
||||
/// Contains a list of all records indexed by TypeIndex.toArrayIndex().
|
||||
SmallVector<ArrayRef<uint8_t>, 2> SeenRecords;
|
||||
|
||||
/// Temporary storage that we use to copy a record's data while re-writing
|
||||
/// its type indices.
|
||||
SmallVector<uint8_t, 256> RemapStorage;
|
||||
|
||||
TypeIndex nextTypeIndex() const;
|
||||
|
||||
bool isInFieldList() const;
|
||||
TypeIndex calcNextTypeIndex() const;
|
||||
TypeIndex incrementTypeIndex();
|
||||
MutableArrayRef<uint8_t> getCurrentSubRecordData();
|
||||
MutableArrayRef<uint8_t> getCurrentRecordData();
|
||||
Error writeRecordPrefix(TypeLeafKind Kind);
|
||||
TypeIndex insertRecordBytesPrivate(MutableArrayRef<uint8_t> Record);
|
||||
TypeIndex insertRecordBytesWithCopy(CVType &Record,
|
||||
MutableArrayRef<uint8_t> Data);
|
||||
|
||||
Expected<MutableArrayRef<uint8_t>>
|
||||
addPadding(MutableArrayRef<uint8_t> Record);
|
||||
|
||||
public:
|
||||
explicit TypeSerializer(BumpPtrAllocator &Storage);
|
||||
explicit TypeSerializer(BumpPtrAllocator &Storage, bool Hash = true);
|
||||
~TypeSerializer();
|
||||
|
||||
ArrayRef<MutableArrayRef<uint8_t>> records() const;
|
||||
TypeIndex getLastTypeIndex() const;
|
||||
TypeIndex insertRecordBytes(MutableArrayRef<uint8_t> Record);
|
||||
void reset();
|
||||
|
||||
ArrayRef<ArrayRef<uint8_t>> records() const;
|
||||
TypeIndex insertRecordBytes(ArrayRef<uint8_t> &Record);
|
||||
TypeIndex insertRecord(const RemappedType &Record);
|
||||
Expected<TypeIndex> visitTypeEndGetIndex(CVType &Record);
|
||||
|
||||
Error visitTypeBegin(CVType &Record) override;
|
||||
|
@ -22,12 +22,75 @@ class TypeIndex;
|
||||
class TypeServerHandler;
|
||||
class TypeTableBuilder;
|
||||
|
||||
/// Merges one type stream into another. Returns true on success.
|
||||
Error mergeTypeStreams(TypeTableBuilder &DestIdStream,
|
||||
TypeTableBuilder &DestTypeStream,
|
||||
/// \brief Merge one set of type records into another. This method assumes
|
||||
/// that all records are type records, and there are no Id records present.
|
||||
///
|
||||
/// \param Dest The table to store the re-written type records into.
|
||||
///
|
||||
/// \param SourceToDest A vector, indexed by the TypeIndex in the source
|
||||
/// type stream, that contains the index of the corresponding type record
|
||||
/// in the destination stream.
|
||||
///
|
||||
/// \param Handler (optional) If non-null, an interface that gets invoked
|
||||
/// to handle type server records.
|
||||
///
|
||||
/// \param Types The collection of types to merge in.
|
||||
///
|
||||
/// \returns Error::success() if the operation succeeded, otherwise an
|
||||
/// appropriate error code.
|
||||
Error mergeTypeRecords(TypeTableBuilder &Dest,
|
||||
SmallVectorImpl<TypeIndex> &SourceToDest,
|
||||
TypeServerHandler *Handler, const CVTypeArray &Types);
|
||||
|
||||
/// \brief Merge one set of id records into another. This method assumes
|
||||
/// that all records are id records, and there are no Type records present.
|
||||
/// However, since Id records can refer back to Type records, this method
|
||||
/// assumes that the referenced type records have also been merged into
|
||||
/// another type stream (for example using the above method), and accepts
|
||||
/// the mapping from source to dest for that stream so that it can re-write
|
||||
/// the type record mappings accordingly.
|
||||
///
|
||||
/// \param Dest The table to store the re-written id records into.
|
||||
///
|
||||
/// \param Types The mapping to use for the type records that these id
|
||||
/// records refer to.
|
||||
///
|
||||
/// \param SourceToDest A vector, indexed by the TypeIndex in the source
|
||||
/// id stream, that contains the index of the corresponding id record
|
||||
/// in the destination stream.
|
||||
///
|
||||
/// \param Ids The collection of id records to merge in.
|
||||
///
|
||||
/// \returns Error::success() if the operation succeeded, otherwise an
|
||||
/// appropriate error code.
|
||||
Error mergeIdRecords(TypeTableBuilder &Dest, ArrayRef<TypeIndex> Types,
|
||||
SmallVectorImpl<TypeIndex> &SourceToDest,
|
||||
const CVTypeArray &Ids);
|
||||
|
||||
/// \brief Merge a unified set of type and id records, splitting them into
|
||||
/// separate output streams.
|
||||
///
|
||||
/// \param DestIds The table to store the re-written id records into.
|
||||
///
|
||||
/// \param DestTypes the table to store the re-written type records into.
|
||||
///
|
||||
/// \param SourceToDest A vector, indexed by the TypeIndex in the source
|
||||
/// id stream, that contains the index of the corresponding id record
|
||||
/// in the destination stream.
|
||||
///
|
||||
/// \param Handler (optional) If non-null, an interface that gets invoked
|
||||
/// to handle type server records.
|
||||
///
|
||||
/// \param IdsAndTypes The collection of id records to merge in.
|
||||
///
|
||||
/// \returns Error::success() if the operation succeeded, otherwise an
|
||||
/// appropriate error code.
|
||||
Error mergeTypeAndIdRecords(TypeTableBuilder &DestIds,
|
||||
TypeTableBuilder &DestTypes,
|
||||
SmallVectorImpl<TypeIndex> &SourceToDest,
|
||||
TypeServerHandler *Handler,
|
||||
const CVTypeArray &IdsAndTypes);
|
||||
|
||||
} // end namespace codeview
|
||||
} // end namespace llvm
|
||||
|
||||
|
@ -64,10 +64,14 @@ class TypeTableBuilder {
|
||||
return *ExpectedIndex;
|
||||
}
|
||||
|
||||
TypeIndex writeSerializedRecord(MutableArrayRef<uint8_t> Record) {
|
||||
TypeIndex writeSerializedRecord(ArrayRef<uint8_t> Record) {
|
||||
return Serializer.insertRecordBytes(Record);
|
||||
}
|
||||
|
||||
TypeIndex writeSerializedRecord(const RemappedType &Record) {
|
||||
return Serializer.insertRecord(Record);
|
||||
}
|
||||
|
||||
template <typename TFunc> void ForEachRecord(TFunc Func) {
|
||||
uint32_t Index = TypeIndex::FirstNonSimpleIndex;
|
||||
|
||||
@ -77,23 +81,24 @@ class TypeTableBuilder {
|
||||
}
|
||||
}
|
||||
|
||||
ArrayRef<MutableArrayRef<uint8_t>> records() const {
|
||||
return Serializer.records();
|
||||
}
|
||||
ArrayRef<ArrayRef<uint8_t>> records() const { return Serializer.records(); }
|
||||
};
|
||||
|
||||
class FieldListRecordBuilder {
|
||||
TypeTableBuilder &TypeTable;
|
||||
BumpPtrAllocator Allocator;
|
||||
TypeSerializer TempSerializer;
|
||||
CVType Type;
|
||||
|
||||
public:
|
||||
explicit FieldListRecordBuilder(TypeTableBuilder &TypeTable)
|
||||
: TypeTable(TypeTable), TempSerializer(TypeTable.getAllocator()) {
|
||||
: TypeTable(TypeTable), TempSerializer(Allocator, false) {
|
||||
Type.Type = TypeLeafKind::LF_FIELDLIST;
|
||||
}
|
||||
|
||||
void begin() {
|
||||
TempSerializer.reset();
|
||||
|
||||
if (auto EC = TempSerializer.visitTypeBegin(Type))
|
||||
consumeError(std::move(EC));
|
||||
}
|
||||
@ -109,23 +114,19 @@ class FieldListRecordBuilder {
|
||||
consumeError(std::move(EC));
|
||||
}
|
||||
|
||||
TypeIndex end() {
|
||||
TypeIndex end(bool Write) {
|
||||
TypeIndex Index;
|
||||
if (auto EC = TempSerializer.visitTypeEnd(Type)) {
|
||||
consumeError(std::move(EC));
|
||||
return TypeIndex();
|
||||
}
|
||||
|
||||
TypeIndex Index;
|
||||
for (auto Record : TempSerializer.records()) {
|
||||
Index = TypeTable.writeSerializedRecord(Record);
|
||||
if (Write) {
|
||||
for (auto Record : TempSerializer.records())
|
||||
Index = TypeTable.writeSerializedRecord(Record);
|
||||
}
|
||||
return Index;
|
||||
}
|
||||
|
||||
/// Stop building the record.
|
||||
void reset() {
|
||||
if (auto EC = TempSerializer.visitTypeEnd(Type))
|
||||
consumeError(std::move(EC));
|
||||
return Index;
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -18,7 +18,7 @@ namespace codeview {
|
||||
|
||||
class TypeTableCollection : public TypeCollection {
|
||||
public:
|
||||
explicit TypeTableCollection(ArrayRef<MutableArrayRef<uint8_t>> Records);
|
||||
explicit TypeTableCollection(ArrayRef<ArrayRef<uint8_t>> Records);
|
||||
|
||||
Optional<TypeIndex> getFirst() override;
|
||||
Optional<TypeIndex> getNext(TypeIndex Prev) override;
|
||||
@ -33,7 +33,7 @@ class TypeTableCollection : public TypeCollection {
|
||||
bool hasCapacityFor(TypeIndex Index) const;
|
||||
void ensureTypeExists(TypeIndex Index);
|
||||
|
||||
ArrayRef<MutableArrayRef<uint8_t>> Records;
|
||||
ArrayRef<ArrayRef<uint8_t>> Records;
|
||||
TypeDatabase Database;
|
||||
};
|
||||
}
|
||||
|
@ -46,7 +46,8 @@ class raw_ostream;
|
||||
/// Reads a value from data extractor and applies a relocation to the result if
|
||||
/// one exists for the given offset.
|
||||
uint64_t getRelocatedValue(const DataExtractor &Data, uint32_t Size,
|
||||
uint32_t *Off, const RelocAddrMap *Relocs);
|
||||
uint32_t *Off, const RelocAddrMap *Relocs,
|
||||
uint64_t *SecNdx = nullptr);
|
||||
|
||||
/// DWARFContext
|
||||
/// This data structure is the top level entity that deals with dwarf debug
|
||||
@ -71,6 +72,14 @@ class DWARFContext : public DIContext {
|
||||
std::unique_ptr<DWARFDebugAbbrev> AbbrevDWO;
|
||||
std::unique_ptr<DWARFDebugLocDWO> LocDWO;
|
||||
|
||||
struct DWOFile {
|
||||
object::OwningBinary<object::ObjectFile> File;
|
||||
std::unique_ptr<DWARFContext> Context;
|
||||
};
|
||||
StringMap<std::weak_ptr<DWOFile>> DWOFiles;
|
||||
std::weak_ptr<DWOFile> DWP;
|
||||
bool CheckedForDWP = false;
|
||||
|
||||
/// Read compile units from the debug_info section (if necessary)
|
||||
/// and store them in CUs.
|
||||
void parseCompileUnits();
|
||||
@ -165,6 +174,8 @@ class DWARFContext : public DIContext {
|
||||
return DWOCUs[index].get();
|
||||
}
|
||||
|
||||
DWARFCompileUnit *getDWOCompileUnitForHash(uint64_t Hash);
|
||||
|
||||
/// Get a DIE given an exact offset.
|
||||
DWARFDie getDIEForOffset(uint32_t Offset);
|
||||
|
||||
@ -206,6 +217,7 @@ class DWARFContext : public DIContext {
|
||||
DIInliningInfo getInliningInfoForAddress(uint64_t Address,
|
||||
DILineInfoSpecifier Specifier = DILineInfoSpecifier()) override;
|
||||
|
||||
virtual StringRef getFileName() const = 0;
|
||||
virtual bool isLittleEndian() const = 0;
|
||||
virtual uint8_t getAddressSize() const = 0;
|
||||
virtual const DWARFSection &getInfoSection() = 0;
|
||||
@ -248,6 +260,8 @@ class DWARFContext : public DIContext {
|
||||
return version == 2 || version == 3 || version == 4 || version == 5;
|
||||
}
|
||||
|
||||
std::shared_ptr<DWARFContext> getDWOContext(StringRef AbsolutePath);
|
||||
|
||||
private:
|
||||
/// Return the compile unit that includes an offset (relative to .debug_info).
|
||||
DWARFCompileUnit *getCompileUnitForOffset(uint32_t Offset);
|
||||
@ -263,6 +277,7 @@ class DWARFContext : public DIContext {
|
||||
class DWARFContextInMemory : public DWARFContext {
|
||||
virtual void anchor();
|
||||
|
||||
StringRef FileName;
|
||||
bool IsLittleEndian;
|
||||
uint8_t AddressSize;
|
||||
DWARFSection InfoSection;
|
||||
@ -316,6 +331,7 @@ class DWARFContextInMemory : public DWARFContext {
|
||||
uint8_t AddrSize,
|
||||
bool isLittleEndian = sys::IsLittleEndianHost);
|
||||
|
||||
StringRef getFileName() const override { return FileName; }
|
||||
bool isLittleEndian() const override { return IsLittleEndian; }
|
||||
uint8_t getAddressSize() const override { return AddressSize; }
|
||||
const DWARFSection &getInfoSection() override { return InfoSection; }
|
||||
|
@ -25,6 +25,7 @@ class raw_ostream;
|
||||
struct DWARFAddressRange {
|
||||
uint64_t LowPC;
|
||||
uint64_t HighPC;
|
||||
uint64_t SectionIndex;
|
||||
};
|
||||
|
||||
/// DWARFAddressRangesVector - represents a set of absolute address ranges.
|
||||
@ -44,6 +45,8 @@ class DWARFDebugRangeList {
|
||||
/// address past the end of the address range. The ending address must
|
||||
/// be greater than or equal to the beginning address.
|
||||
uint64_t EndAddress;
|
||||
/// A section index this range belongs to.
|
||||
uint64_t SectionIndex;
|
||||
|
||||
/// The end of any given range list is marked by an end of list entry,
|
||||
/// which consists of a 0 for the beginning address offset
|
||||
|
@ -195,7 +195,8 @@ class DWARFDie {
|
||||
|
||||
/// Retrieves DW_AT_low_pc and DW_AT_high_pc from CU.
|
||||
/// Returns true if both attributes are present.
|
||||
bool getLowAndHighPC(uint64_t &LowPC, uint64_t &HighPC) const;
|
||||
bool getLowAndHighPC(uint64_t &LowPC, uint64_t &HighPC,
|
||||
uint64_t &SectionIndex) const;
|
||||
|
||||
/// Get the address ranges for this DIE.
|
||||
///
|
||||
|
@ -47,6 +47,7 @@ class DWARFFormValue {
|
||||
const char *cstr;
|
||||
};
|
||||
const uint8_t *data = nullptr;
|
||||
uint64_t SectionIndex; /// Section index for reference forms.
|
||||
};
|
||||
|
||||
dwarf::Form Form; /// Form for this value.
|
||||
@ -58,6 +59,7 @@ class DWARFFormValue {
|
||||
|
||||
dwarf::Form getForm() const { return Form; }
|
||||
uint64_t getRawUValue() const { return Value.uval; }
|
||||
uint64_t getSectionIndex() const { return Value.SectionIndex; }
|
||||
void setForm(dwarf::Form F) { Form = F; }
|
||||
void setUValue(uint64_t V) { Value.uval = V; }
|
||||
void setSValue(int64_t V) { Value.sval = V; }
|
||||
|
@ -16,7 +16,10 @@
|
||||
|
||||
namespace llvm {
|
||||
|
||||
/// RelocAddrEntry contains relocated value and section index.
|
||||
/// Section index is -1LL if relocation points to absolute symbol.
|
||||
struct RelocAddrEntry {
|
||||
uint64_t SectionIndex;
|
||||
uint64_t Value;
|
||||
};
|
||||
|
||||
|
@ -143,17 +143,7 @@ class DWARFUnit {
|
||||
typedef iterator_range<std::vector<DWARFDebugInfoEntry>::iterator>
|
||||
die_iterator_range;
|
||||
|
||||
class DWOHolder {
|
||||
object::OwningBinary<object::ObjectFile> DWOFile;
|
||||
std::unique_ptr<DWARFContext> DWOContext;
|
||||
DWARFUnit *DWOU = nullptr;
|
||||
|
||||
public:
|
||||
DWOHolder(StringRef DWOPath, uint64_t DWOId);
|
||||
|
||||
DWARFUnit *getUnit() const { return DWOU; }
|
||||
};
|
||||
std::unique_ptr<DWOHolder> DWO;
|
||||
std::shared_ptr<DWARFUnit> DWO;
|
||||
|
||||
const DWARFUnitIndex::Entry *IndexEntry;
|
||||
|
||||
|
@ -43,8 +43,8 @@ class MappedBlockStream : public BinaryStream {
|
||||
friend class WritableMappedBlockStream;
|
||||
public:
|
||||
static std::unique_ptr<MappedBlockStream>
|
||||
createStream(uint32_t BlockSize, uint32_t NumBlocks,
|
||||
const MSFStreamLayout &Layout, BinaryStreamRef MsfData);
|
||||
createStream(uint32_t BlockSize, const MSFStreamLayout &Layout,
|
||||
BinaryStreamRef MsfData);
|
||||
|
||||
static std::unique_ptr<MappedBlockStream>
|
||||
createIndexedStream(const MSFLayout &Layout, BinaryStreamRef MsfData,
|
||||
@ -74,12 +74,11 @@ class MappedBlockStream : public BinaryStream {
|
||||
void invalidateCache();
|
||||
|
||||
uint32_t getBlockSize() const { return BlockSize; }
|
||||
uint32_t getNumBlocks() const { return NumBlocks; }
|
||||
uint32_t getNumBlocks() const { return StreamLayout.Blocks.size(); }
|
||||
uint32_t getStreamLength() const { return StreamLayout.Length; }
|
||||
|
||||
protected:
|
||||
MappedBlockStream(uint32_t BlockSize, uint32_t NumBlocks,
|
||||
const MSFStreamLayout &StreamLayout,
|
||||
MappedBlockStream(uint32_t BlockSize, const MSFStreamLayout &StreamLayout,
|
||||
BinaryStreamRef MsfData);
|
||||
|
||||
private:
|
||||
@ -91,7 +90,6 @@ class MappedBlockStream : public BinaryStream {
|
||||
ArrayRef<uint8_t> &Buffer);
|
||||
|
||||
const uint32_t BlockSize;
|
||||
const uint32_t NumBlocks;
|
||||
const MSFStreamLayout StreamLayout;
|
||||
BinaryStreamRef MsfData;
|
||||
|
||||
@ -103,8 +101,8 @@ class MappedBlockStream : public BinaryStream {
|
||||
class WritableMappedBlockStream : public WritableBinaryStream {
|
||||
public:
|
||||
static std::unique_ptr<WritableMappedBlockStream>
|
||||
createStream(uint32_t BlockSize, uint32_t NumBlocks,
|
||||
const MSFStreamLayout &Layout, WritableBinaryStreamRef MsfData);
|
||||
createStream(uint32_t BlockSize, const MSFStreamLayout &Layout,
|
||||
WritableBinaryStreamRef MsfData);
|
||||
|
||||
static std::unique_ptr<WritableMappedBlockStream>
|
||||
createIndexedStream(const MSFLayout &Layout, WritableBinaryStreamRef MsfData,
|
||||
@ -139,7 +137,7 @@ class WritableMappedBlockStream : public WritableBinaryStream {
|
||||
uint32_t getStreamLength() const { return ReadInterface.getStreamLength(); }
|
||||
|
||||
protected:
|
||||
WritableMappedBlockStream(uint32_t BlockSize, uint32_t NumBlocks,
|
||||
WritableMappedBlockStream(uint32_t BlockSize,
|
||||
const MSFStreamLayout &StreamLayout,
|
||||
WritableBinaryStreamRef MsfData);
|
||||
|
||||
|
@ -82,6 +82,7 @@ class DbiStreamBuilder {
|
||||
|
||||
Error finalize();
|
||||
uint32_t calculateModiSubstreamSize() const;
|
||||
uint32_t calculateNamesOffset() const;
|
||||
uint32_t calculateSectionContribsStreamSize() const;
|
||||
uint32_t calculateSectionMapStreamSize() const;
|
||||
uint32_t calculateFileInfoSubstreamSize() const;
|
||||
|
@ -11,8 +11,7 @@
|
||||
#define LLVM_DEBUGINFO_PDB_PDBTYPESERVERHANDLER_H
|
||||
|
||||
#include "llvm/ADT/SmallString.h"
|
||||
#include "llvm/ADT/SmallVector.h"
|
||||
#include "llvm/ADT/StringMap.h"
|
||||
#include "llvm/ADT/StringSet.h"
|
||||
#include "llvm/DebugInfo/CodeView/TypeRecord.h"
|
||||
#include "llvm/DebugInfo/CodeView/TypeServerHandler.h"
|
||||
#include "llvm/DebugInfo/PDB/Native/NativeSession.h"
|
||||
@ -39,7 +38,7 @@ class PDBTypeServerHandler : public codeview::TypeServerHandler {
|
||||
|
||||
bool RevisitAlways;
|
||||
std::unique_ptr<NativeSession> Session;
|
||||
SmallVector<SmallString<64>, 4> SearchPaths;
|
||||
StringSet<> SearchPaths;
|
||||
};
|
||||
}
|
||||
}
|
||||
|
@ -21,6 +21,9 @@
|
||||
#include "llvm/Support/Error.h"
|
||||
|
||||
namespace llvm {
|
||||
namespace codeview {
|
||||
class LazyRandomTypeCollection;
|
||||
}
|
||||
namespace msf {
|
||||
class MappedBlockStream;
|
||||
}
|
||||
@ -53,12 +56,16 @@ class TpiStream {
|
||||
codeview::CVTypeRange types(bool *HadError) const;
|
||||
const codeview::CVTypeArray &typeArray() const { return TypeRecords; }
|
||||
|
||||
codeview::LazyRandomTypeCollection &typeCollection() { return *Types; }
|
||||
|
||||
Error commit();
|
||||
|
||||
private:
|
||||
const PDBFile &Pdb;
|
||||
std::unique_ptr<msf::MappedBlockStream> Stream;
|
||||
|
||||
std::unique_ptr<codeview::LazyRandomTypeCollection> Types;
|
||||
|
||||
codeview::CVTypeArray TypeRecords;
|
||||
|
||||
std::unique_ptr<BinaryStream> HashStream;
|
||||
|
@ -322,7 +322,7 @@ template <> struct DenseMapInfo<AttributeSet> {
|
||||
/// the AttributeList object. The function attributes are at index
|
||||
/// `AttributeList::FunctionIndex', the return value is at index
|
||||
/// `AttributeList::ReturnIndex', and the attributes for the parameters start at
|
||||
/// index `1'.
|
||||
/// index `AttributeList::FirstArgIndex'.
|
||||
class AttributeList {
|
||||
public:
|
||||
enum AttrIndex : unsigned {
|
||||
@ -347,8 +347,8 @@ class AttributeList {
|
||||
/// \brief Create an AttributeList with the specified parameters in it.
|
||||
static AttributeList get(LLVMContext &C,
|
||||
ArrayRef<std::pair<unsigned, Attribute>> Attrs);
|
||||
static AttributeList
|
||||
get(LLVMContext &C, ArrayRef<std::pair<unsigned, AttributeSet>> Attrs);
|
||||
static AttributeList get(LLVMContext &C,
|
||||
ArrayRef<std::pair<unsigned, AttributeSet>> Attrs);
|
||||
|
||||
/// \brief Create an AttributeList from attribute sets for a function, its
|
||||
/// return value, and all of its arguments.
|
||||
@ -356,13 +356,11 @@ class AttributeList {
|
||||
AttributeSet RetAttrs,
|
||||
ArrayRef<AttributeSet> ArgAttrs);
|
||||
|
||||
static AttributeList
|
||||
getImpl(LLVMContext &C,
|
||||
ArrayRef<std::pair<unsigned, AttributeSet>> Attrs);
|
||||
|
||||
private:
|
||||
explicit AttributeList(AttributeListImpl *LI) : pImpl(LI) {}
|
||||
|
||||
static AttributeList getImpl(LLVMContext &C, ArrayRef<AttributeSet> AttrSets);
|
||||
|
||||
public:
|
||||
AttributeList() = default;
|
||||
|
||||
@ -521,39 +519,31 @@ class AttributeList {
|
||||
/// \brief Return the attributes at the index as a string.
|
||||
std::string getAsString(unsigned Index, bool InAttrGrp = false) const;
|
||||
|
||||
using iterator = ArrayRef<Attribute>::iterator;
|
||||
//===--------------------------------------------------------------------===//
|
||||
// AttributeList Introspection
|
||||
//===--------------------------------------------------------------------===//
|
||||
|
||||
iterator begin(unsigned Slot) const;
|
||||
iterator end(unsigned Slot) const;
|
||||
typedef const AttributeSet *iterator;
|
||||
iterator begin() const;
|
||||
iterator end() const;
|
||||
|
||||
unsigned getNumAttrSets() const;
|
||||
|
||||
/// Use these to iterate over the valid attribute indices.
|
||||
unsigned index_begin() const { return AttributeList::FunctionIndex; }
|
||||
unsigned index_end() const { return getNumAttrSets() - 1; }
|
||||
|
||||
/// operator==/!= - Provide equality predicates.
|
||||
bool operator==(const AttributeList &RHS) const { return pImpl == RHS.pImpl; }
|
||||
bool operator!=(const AttributeList &RHS) const { return pImpl != RHS.pImpl; }
|
||||
|
||||
//===--------------------------------------------------------------------===//
|
||||
// AttributeList Introspection
|
||||
//===--------------------------------------------------------------------===//
|
||||
|
||||
/// \brief Return a raw pointer that uniquely identifies this attribute list.
|
||||
void *getRawPointer() const {
|
||||
return pImpl;
|
||||
}
|
||||
|
||||
/// \brief Return true if there are no attributes.
|
||||
bool isEmpty() const {
|
||||
return getNumSlots() == 0;
|
||||
}
|
||||
|
||||
/// \brief Return the number of slots used in this attribute list. This is
|
||||
/// the number of arguments that have an attribute set on them (including the
|
||||
/// function itself).
|
||||
unsigned getNumSlots() const;
|
||||
|
||||
/// \brief Return the index for the given slot.
|
||||
unsigned getSlotIndex(unsigned Slot) const;
|
||||
|
||||
/// \brief Return the attributes at the given slot.
|
||||
AttributeSet getSlotAttributes(unsigned Slot) const;
|
||||
bool isEmpty() const { return pImpl == nullptr; }
|
||||
|
||||
void dump() const;
|
||||
};
|
||||
|
@ -33,6 +33,7 @@ class Function;
|
||||
class LandingPadInst;
|
||||
class LLVMContext;
|
||||
class Module;
|
||||
class PHINode;
|
||||
class TerminatorInst;
|
||||
class ValueSymbolTable;
|
||||
|
||||
@ -261,6 +262,50 @@ class BasicBlock final : public Value, // Basic blocks are data objects also
|
||||
inline const Instruction &back() const { return InstList.back(); }
|
||||
inline Instruction &back() { return InstList.back(); }
|
||||
|
||||
/// Iterator to walk just the phi nodes in the basic block.
|
||||
template <typename PHINodeT = PHINode, typename BBIteratorT = iterator>
|
||||
class phi_iterator_impl
|
||||
: public iterator_facade_base<phi_iterator_impl<PHINodeT, BBIteratorT>,
|
||||
std::forward_iterator_tag, PHINodeT> {
|
||||
friend BasicBlock;
|
||||
|
||||
PHINodeT *PN;
|
||||
|
||||
phi_iterator_impl(PHINodeT *PN) : PN(PN) {}
|
||||
|
||||
public:
|
||||
// Allow default construction to build variables, but this doesn't build
|
||||
// a useful iterator.
|
||||
phi_iterator_impl() = default;
|
||||
|
||||
// Allow conversion between instantiations where valid.
|
||||
template <typename PHINodeU, typename BBIteratorU>
|
||||
phi_iterator_impl(const phi_iterator_impl<PHINodeU, BBIteratorU> &Arg)
|
||||
: PN(Arg.PN) {}
|
||||
|
||||
bool operator==(const phi_iterator_impl &Arg) const { return PN == Arg.PN; }
|
||||
|
||||
PHINodeT &operator*() const { return *PN; }
|
||||
|
||||
using phi_iterator_impl::iterator_facade_base::operator++;
|
||||
phi_iterator_impl &operator++() {
|
||||
assert(PN && "Cannot increment the end iterator!");
|
||||
PN = dyn_cast<PHINodeT>(std::next(BBIteratorT(PN)));
|
||||
return *this;
|
||||
}
|
||||
};
|
||||
typedef phi_iterator_impl<> phi_iterator;
|
||||
typedef phi_iterator_impl<const PHINode, BasicBlock::const_iterator>
|
||||
const_phi_iterator;
|
||||
|
||||
/// Returns a range that iterates over the phis in the basic block.
|
||||
///
|
||||
/// Note that this cannot be used with basic blocks that have no terminator.
|
||||
iterator_range<const_phi_iterator> phis() const {
|
||||
return const_cast<BasicBlock *>(this)->phis();
|
||||
}
|
||||
iterator_range<phi_iterator> phis();
|
||||
|
||||
/// \brief Return the underlying instruction list container.
|
||||
///
|
||||
/// Currently you need to access the underlying instruction list container
|
||||
|
@ -171,6 +171,7 @@ namespace llvm {
|
||||
ebStrict
|
||||
};
|
||||
|
||||
bool isUnaryOp() const;
|
||||
RoundingMode getRoundingMode() const;
|
||||
ExceptionBehavior getExceptionBehavior() const;
|
||||
|
||||
@ -182,6 +183,18 @@ namespace llvm {
|
||||
case Intrinsic::experimental_constrained_fmul:
|
||||
case Intrinsic::experimental_constrained_fdiv:
|
||||
case Intrinsic::experimental_constrained_frem:
|
||||
case Intrinsic::experimental_constrained_sqrt:
|
||||
case Intrinsic::experimental_constrained_pow:
|
||||
case Intrinsic::experimental_constrained_powi:
|
||||
case Intrinsic::experimental_constrained_sin:
|
||||
case Intrinsic::experimental_constrained_cos:
|
||||
case Intrinsic::experimental_constrained_exp:
|
||||
case Intrinsic::experimental_constrained_exp2:
|
||||
case Intrinsic::experimental_constrained_log:
|
||||
case Intrinsic::experimental_constrained_log10:
|
||||
case Intrinsic::experimental_constrained_log2:
|
||||
case Intrinsic::experimental_constrained_rint:
|
||||
case Intrinsic::experimental_constrained_nearbyint:
|
||||
return true;
|
||||
default: return false;
|
||||
}
|
||||
|
@ -489,8 +489,64 @@ let IntrProperties = [IntrInaccessibleMemOnly] in {
|
||||
LLVMMatchType<0>,
|
||||
llvm_metadata_ty,
|
||||
llvm_metadata_ty ]>;
|
||||
|
||||
// These intrinsics are sensitive to the rounding mode so we need constrained
|
||||
// versions of each of them. When strict rounding and exception control are
|
||||
// not required the non-constrained versions of these intrinsics should be
|
||||
// used.
|
||||
def int_experimental_constrained_sqrt : Intrinsic<[ llvm_anyfloat_ty ],
|
||||
[ LLVMMatchType<0>,
|
||||
llvm_metadata_ty,
|
||||
llvm_metadata_ty ]>;
|
||||
def int_experimental_constrained_powi : Intrinsic<[ llvm_anyfloat_ty ],
|
||||
[ LLVMMatchType<0>,
|
||||
llvm_i32_ty,
|
||||
llvm_metadata_ty,
|
||||
llvm_metadata_ty ]>;
|
||||
def int_experimental_constrained_sin : Intrinsic<[ llvm_anyfloat_ty ],
|
||||
[ LLVMMatchType<0>,
|
||||
llvm_metadata_ty,
|
||||
llvm_metadata_ty ]>;
|
||||
def int_experimental_constrained_cos : Intrinsic<[ llvm_anyfloat_ty ],
|
||||
[ LLVMMatchType<0>,
|
||||
llvm_metadata_ty,
|
||||
llvm_metadata_ty ]>;
|
||||
def int_experimental_constrained_pow : Intrinsic<[ llvm_anyfloat_ty ],
|
||||
[ LLVMMatchType<0>,
|
||||
LLVMMatchType<0>,
|
||||
llvm_metadata_ty,
|
||||
llvm_metadata_ty ]>;
|
||||
def int_experimental_constrained_log : Intrinsic<[ llvm_anyfloat_ty ],
|
||||
[ LLVMMatchType<0>,
|
||||
llvm_metadata_ty,
|
||||
llvm_metadata_ty ]>;
|
||||
def int_experimental_constrained_log10: Intrinsic<[ llvm_anyfloat_ty ],
|
||||
[ LLVMMatchType<0>,
|
||||
llvm_metadata_ty,
|
||||
llvm_metadata_ty ]>;
|
||||
def int_experimental_constrained_log2 : Intrinsic<[ llvm_anyfloat_ty ],
|
||||
[ LLVMMatchType<0>,
|
||||
llvm_metadata_ty,
|
||||
llvm_metadata_ty ]>;
|
||||
def int_experimental_constrained_exp : Intrinsic<[ llvm_anyfloat_ty ],
|
||||
[ LLVMMatchType<0>,
|
||||
llvm_metadata_ty,
|
||||
llvm_metadata_ty ]>;
|
||||
def int_experimental_constrained_exp2 : Intrinsic<[ llvm_anyfloat_ty ],
|
||||
[ LLVMMatchType<0>,
|
||||
llvm_metadata_ty,
|
||||
llvm_metadata_ty ]>;
|
||||
def int_experimental_constrained_rint : Intrinsic<[ llvm_anyfloat_ty ],
|
||||
[ LLVMMatchType<0>,
|
||||
llvm_metadata_ty,
|
||||
llvm_metadata_ty ]>;
|
||||
def int_experimental_constrained_nearbyint : Intrinsic<[ llvm_anyfloat_ty ],
|
||||
[ LLVMMatchType<0>,
|
||||
llvm_metadata_ty,
|
||||
llvm_metadata_ty ]>;
|
||||
}
|
||||
// FIXME: Add intrinsic for fcmp, fptrunc, fpext, fptoui and fptosi.
|
||||
// FIXME: Add intrinsics for fcmp, fptrunc, fpext, fptoui and fptosi.
|
||||
// FIXME: Add intrinsics for fabs, copysign, floor, ceil, trunc and round?
|
||||
|
||||
|
||||
//===------------------------- Expect Intrinsics --------------------------===//
|
||||
|
@ -566,6 +566,16 @@ def int_amdgcn_s_getreg :
|
||||
[IntrReadMem, IntrSpeculatable]
|
||||
>;
|
||||
|
||||
// int_amdgcn_s_getpc is provided to allow a specific style of position
|
||||
// independent code to determine the high part of its address when it is
|
||||
// known (through convention) that the code and any data of interest does
|
||||
// not cross a 4Gb address boundary. Use for any other purpose may not
|
||||
// produce the desired results as optimizations may cause code movement,
|
||||
// especially as we explicitly use IntrNoMem to allow optimizations.
|
||||
def int_amdgcn_s_getpc :
|
||||
GCCBuiltin<"__builtin_amdgcn_s_getpc">,
|
||||
Intrinsic<[llvm_i64_ty], [], [IntrNoMem, IntrSpeculatable]>;
|
||||
|
||||
// __builtin_amdgcn_interp_mov <param>, <attr_chan>, <attr>, <m0>
|
||||
// param values: 0 = P10, 1 = P20, 2 = P0
|
||||
def int_amdgcn_interp_mov :
|
||||
|
@ -1223,6 +1223,7 @@ template <class T> class MDTupleTypedArrayWrapper {
|
||||
|
||||
// FIXME: Fix callers and remove condition on N.
|
||||
unsigned size() const { return N ? N->getNumOperands() : 0u; }
|
||||
bool empty() const { return N ? N->getNumOperands() == 0 : true; }
|
||||
T *operator[](unsigned I) const { return cast_or_null<T>(N->getOperand(I)); }
|
||||
|
||||
// FIXME: Fix callers and remove condition on N.
|
||||
|
@ -139,9 +139,12 @@ class Module {
|
||||
/// during the append operation.
|
||||
AppendUnique = 6,
|
||||
|
||||
/// Takes the max of the two values, which are required to be integers.
|
||||
Max = 7,
|
||||
|
||||
// Markers:
|
||||
ModFlagBehaviorFirstVal = Error,
|
||||
ModFlagBehaviorLastVal = AppendUnique
|
||||
ModFlagBehaviorLastVal = Max
|
||||
};
|
||||
|
||||
/// Checks if Metadata represents a valid ModFlagBehavior, and stores the
|
||||
|
@ -144,6 +144,7 @@ void initializeGCMachineCodeAnalysisPass(PassRegistry&);
|
||||
void initializeGCModuleInfoPass(PassRegistry&);
|
||||
void initializeGCOVProfilerLegacyPassPass(PassRegistry&);
|
||||
void initializeGVNHoistLegacyPassPass(PassRegistry&);
|
||||
void initializeGVNSinkLegacyPassPass(PassRegistry&);
|
||||
void initializeGVNLegacyPassPass(PassRegistry&);
|
||||
void initializeGlobalDCELegacyPassPass(PassRegistry&);
|
||||
void initializeGlobalMergePass(PassRegistry&);
|
||||
@ -193,6 +194,7 @@ void initializeLiveVariablesPass(PassRegistry&);
|
||||
void initializeLoadCombinePass(PassRegistry&);
|
||||
void initializeLoadStoreVectorizerPass(PassRegistry&);
|
||||
void initializeLoaderPassPass(PassRegistry&);
|
||||
void initializeLocalizerPass(PassRegistry&);
|
||||
void initializeLocalStackSlotPassPass(PassRegistry&);
|
||||
void initializeLoopAccessLegacyAnalysisPass(PassRegistry&);
|
||||
void initializeLoopDataPrefetchLegacyPassPass(PassRegistry&);
|
||||
|
@ -39,7 +39,7 @@ struct Config {
|
||||
std::string CPU;
|
||||
TargetOptions Options;
|
||||
std::vector<std::string> MAttrs;
|
||||
Reloc::Model RelocModel = Reloc::PIC_;
|
||||
Optional<Reloc::Model> RelocModel = Reloc::PIC_;
|
||||
CodeModel::Model CodeModel = CodeModel::Default;
|
||||
CodeGenOpt::Level CGOptLevel = CodeGenOpt::Default;
|
||||
TargetMachine::CodeGenFileType CGFileType = TargetMachine::CGFT_ObjectFile;
|
||||
|
@ -95,9 +95,7 @@ class Binary {
|
||||
return TypeID > ID_StartObjects && TypeID < ID_EndObjects;
|
||||
}
|
||||
|
||||
bool isSymbolic() const {
|
||||
return isIR() || isObject();
|
||||
}
|
||||
bool isSymbolic() const { return isIR() || isObject() || isCOFFImportFile(); }
|
||||
|
||||
bool isArchive() const {
|
||||
return TypeID == ID_Archive;
|
||||
|
@ -782,6 +782,7 @@ class COFFObjectFile : public ObjectFile {
|
||||
std::error_code getSectionName(DataRefImpl Sec,
|
||||
StringRef &Res) const override;
|
||||
uint64_t getSectionAddress(DataRefImpl Sec) const override;
|
||||
uint64_t getSectionIndex(DataRefImpl Sec) const override;
|
||||
uint64_t getSectionSize(DataRefImpl Sec) const override;
|
||||
std::error_code getSectionContents(DataRefImpl Sec,
|
||||
StringRef &Res) const override;
|
||||
|
@ -235,6 +235,7 @@ template <class ELFT> class ELFObjectFile : public ELFObjectFileBase {
|
||||
std::error_code getSectionName(DataRefImpl Sec,
|
||||
StringRef &Res) const override;
|
||||
uint64_t getSectionAddress(DataRefImpl Sec) const override;
|
||||
uint64_t getSectionIndex(DataRefImpl Sec) const override;
|
||||
uint64_t getSectionSize(DataRefImpl Sec) const override;
|
||||
std::error_code getSectionContents(DataRefImpl Sec,
|
||||
StringRef &Res) const override;
|
||||
@ -645,6 +646,17 @@ uint64_t ELFObjectFile<ELFT>::getSectionAddress(DataRefImpl Sec) const {
|
||||
return getSection(Sec)->sh_addr;
|
||||
}
|
||||
|
||||
template <class ELFT>
|
||||
uint64_t ELFObjectFile<ELFT>::getSectionIndex(DataRefImpl Sec) const {
|
||||
auto SectionsOrErr = EF.sections();
|
||||
handleAllErrors(std::move(SectionsOrErr.takeError()),
|
||||
[](const ErrorInfoBase &) {
|
||||
llvm_unreachable("unable to get section index");
|
||||
});
|
||||
const Elf_Shdr *First = SectionsOrErr->begin();
|
||||
return getSection(Sec) - First;
|
||||
}
|
||||
|
||||
template <class ELFT>
|
||||
uint64_t ELFObjectFile<ELFT>::getSectionSize(DataRefImpl Sec) const {
|
||||
return getSection(Sec)->sh_size;
|
||||
|
@ -290,6 +290,7 @@ class MachOObjectFile : public ObjectFile {
|
||||
std::error_code getSectionName(DataRefImpl Sec,
|
||||
StringRef &Res) const override;
|
||||
uint64_t getSectionAddress(DataRefImpl Sec) const override;
|
||||
uint64_t getSectionIndex(DataRefImpl Sec) const override;
|
||||
uint64_t getSectionSize(DataRefImpl Sec) const override;
|
||||
std::error_code getSectionContents(DataRefImpl Sec,
|
||||
StringRef &Res) const override;
|
||||
|
@ -95,6 +95,7 @@ class SectionRef {
|
||||
|
||||
std::error_code getName(StringRef &Result) const;
|
||||
uint64_t getAddress() const;
|
||||
uint64_t getIndex() const;
|
||||
uint64_t getSize() const;
|
||||
std::error_code getContents(StringRef &Result) const;
|
||||
|
||||
@ -222,6 +223,7 @@ class ObjectFile : public SymbolicFile {
|
||||
virtual std::error_code getSectionName(DataRefImpl Sec,
|
||||
StringRef &Res) const = 0;
|
||||
virtual uint64_t getSectionAddress(DataRefImpl Sec) const = 0;
|
||||
virtual uint64_t getSectionIndex(DataRefImpl Sec) const = 0;
|
||||
virtual uint64_t getSectionSize(DataRefImpl Sec) const = 0;
|
||||
virtual std::error_code getSectionContents(DataRefImpl Sec,
|
||||
StringRef &Res) const = 0;
|
||||
@ -393,6 +395,10 @@ inline uint64_t SectionRef::getAddress() const {
|
||||
return OwningObject->getSectionAddress(SectionPimpl);
|
||||
}
|
||||
|
||||
inline uint64_t SectionRef::getIndex() const {
|
||||
return OwningObject->getSectionIndex(SectionPimpl);
|
||||
}
|
||||
|
||||
inline uint64_t SectionRef::getSize() const {
|
||||
return OwningObject->getSectionSize(SectionPimpl);
|
||||
}
|
||||
|
@ -40,13 +40,13 @@ class RelocVisitor {
|
||||
// TODO: Should handle multiple applied relocations via either passing in the
|
||||
// previously computed value or just count paired relocations as a single
|
||||
// visit.
|
||||
uint64_t visit(uint32_t RelocType, RelocationRef R, uint64_t Value = 0) {
|
||||
uint64_t visit(uint32_t Rel, RelocationRef R, uint64_t Value = 0) {
|
||||
if (isa<ELFObjectFileBase>(ObjToVisit))
|
||||
return visitELF(RelocType, R, Value);
|
||||
return visitELF(Rel, R, Value);
|
||||
if (isa<COFFObjectFile>(ObjToVisit))
|
||||
return visitCOFF(RelocType, R, Value);
|
||||
return visitCOFF(Rel, R, Value);
|
||||
if (isa<MachOObjectFile>(ObjToVisit))
|
||||
return visitMachO(RelocType, R, Value);
|
||||
return visitMachO(Rel, R, Value);
|
||||
|
||||
HasError = true;
|
||||
return 0;
|
||||
@ -58,214 +58,60 @@ class RelocVisitor {
|
||||
const ObjectFile &ObjToVisit;
|
||||
bool HasError = false;
|
||||
|
||||
uint64_t visitELF(uint32_t RelocType, RelocationRef R, uint64_t Value) {
|
||||
uint64_t visitELF(uint32_t Rel, RelocationRef R, uint64_t Value) {
|
||||
if (ObjToVisit.getBytesInAddress() == 8) { // 64-bit object file
|
||||
switch (ObjToVisit.getArch()) {
|
||||
case Triple::x86_64:
|
||||
switch (RelocType) {
|
||||
case ELF::R_X86_64_NONE:
|
||||
return visitELF_X86_64_NONE(R);
|
||||
case ELF::R_X86_64_64:
|
||||
return visitELF_X86_64_64(R, Value);
|
||||
case ELF::R_X86_64_PC32:
|
||||
return visitELF_X86_64_PC32(R, Value);
|
||||
case ELF::R_X86_64_32:
|
||||
return visitELF_X86_64_32(R, Value);
|
||||
case ELF::R_X86_64_32S:
|
||||
return visitELF_X86_64_32S(R, Value);
|
||||
default:
|
||||
HasError = true;
|
||||
return 0;
|
||||
}
|
||||
return visitX86_64(Rel, R, Value);
|
||||
case Triple::aarch64:
|
||||
case Triple::aarch64_be:
|
||||
switch (RelocType) {
|
||||
case ELF::R_AARCH64_ABS32:
|
||||
return visitELF_AARCH64_ABS32(R, Value);
|
||||
case ELF::R_AARCH64_ABS64:
|
||||
return visitELF_AARCH64_ABS64(R, Value);
|
||||
default:
|
||||
HasError = true;
|
||||
return 0;
|
||||
}
|
||||
return visitAarch64(Rel, R, Value);
|
||||
case Triple::bpfel:
|
||||
case Triple::bpfeb:
|
||||
switch (RelocType) {
|
||||
case ELF::R_BPF_64_64:
|
||||
return visitELF_BPF_64_64(R, Value);
|
||||
case ELF::R_BPF_64_32:
|
||||
return visitELF_BPF_64_32(R, Value);
|
||||
default:
|
||||
HasError = true;
|
||||
return 0;
|
||||
}
|
||||
return visitBpf(Rel, R, Value);
|
||||
case Triple::mips64el:
|
||||
case Triple::mips64:
|
||||
switch (RelocType) {
|
||||
case ELF::R_MIPS_32:
|
||||
return visitELF_MIPS64_32(R, Value);
|
||||
case ELF::R_MIPS_64:
|
||||
return visitELF_MIPS64_64(R, Value);
|
||||
default:
|
||||
HasError = true;
|
||||
return 0;
|
||||
}
|
||||
return visitMips64(Rel, R, Value);
|
||||
case Triple::ppc64le:
|
||||
case Triple::ppc64:
|
||||
switch (RelocType) {
|
||||
case ELF::R_PPC64_ADDR32:
|
||||
return visitELF_PPC64_ADDR32(R, Value);
|
||||
case ELF::R_PPC64_ADDR64:
|
||||
return visitELF_PPC64_ADDR64(R, Value);
|
||||
default:
|
||||
HasError = true;
|
||||
return 0;
|
||||
}
|
||||
return visitPPC64(Rel, R, Value);
|
||||
case Triple::systemz:
|
||||
switch (RelocType) {
|
||||
case ELF::R_390_32:
|
||||
return visitELF_390_32(R, Value);
|
||||
case ELF::R_390_64:
|
||||
return visitELF_390_64(R, Value);
|
||||
default:
|
||||
HasError = true;
|
||||
return 0;
|
||||
}
|
||||
return visitSystemz(Rel, R, Value);
|
||||
case Triple::sparcv9:
|
||||
switch (RelocType) {
|
||||
case ELF::R_SPARC_32:
|
||||
case ELF::R_SPARC_UA32:
|
||||
return visitELF_SPARCV9_32(R, Value);
|
||||
case ELF::R_SPARC_64:
|
||||
case ELF::R_SPARC_UA64:
|
||||
return visitELF_SPARCV9_64(R, Value);
|
||||
default:
|
||||
HasError = true;
|
||||
return 0;
|
||||
}
|
||||
return visitSparc64(Rel, R, Value);
|
||||
case Triple::amdgcn:
|
||||
switch (RelocType) {
|
||||
case ELF::R_AMDGPU_ABS32:
|
||||
return visitELF_AMDGPU_ABS32(R, Value);
|
||||
case ELF::R_AMDGPU_ABS64:
|
||||
return visitELF_AMDGPU_ABS64(R, Value);
|
||||
default:
|
||||
HasError = true;
|
||||
return 0;
|
||||
}
|
||||
return visitAmdgpu(Rel, R, Value);
|
||||
default:
|
||||
HasError = true;
|
||||
return 0;
|
||||
}
|
||||
} else if (ObjToVisit.getBytesInAddress() == 4) { // 32-bit object file
|
||||
switch (ObjToVisit.getArch()) {
|
||||
case Triple::x86:
|
||||
switch (RelocType) {
|
||||
case ELF::R_386_NONE:
|
||||
return visitELF_386_NONE(R);
|
||||
case ELF::R_386_32:
|
||||
return visitELF_386_32(R, Value);
|
||||
case ELF::R_386_PC32:
|
||||
return visitELF_386_PC32(R, Value);
|
||||
default:
|
||||
HasError = true;
|
||||
return 0;
|
||||
}
|
||||
case Triple::ppc:
|
||||
switch (RelocType) {
|
||||
case ELF::R_PPC_ADDR32:
|
||||
return visitELF_PPC_ADDR32(R, Value);
|
||||
default:
|
||||
HasError = true;
|
||||
return 0;
|
||||
}
|
||||
case Triple::arm:
|
||||
case Triple::armeb:
|
||||
switch (RelocType) {
|
||||
default:
|
||||
HasError = true;
|
||||
return 0;
|
||||
case ELF::R_ARM_ABS32:
|
||||
return visitELF_ARM_ABS32(R, Value);
|
||||
}
|
||||
case Triple::lanai:
|
||||
switch (RelocType) {
|
||||
case ELF::R_LANAI_32:
|
||||
return visitELF_Lanai_32(R, Value);
|
||||
default:
|
||||
HasError = true;
|
||||
return 0;
|
||||
}
|
||||
case Triple::mipsel:
|
||||
case Triple::mips:
|
||||
switch (RelocType) {
|
||||
case ELF::R_MIPS_32:
|
||||
return visitELF_MIPS_32(R, Value);
|
||||
default:
|
||||
HasError = true;
|
||||
return 0;
|
||||
}
|
||||
case Triple::sparc:
|
||||
switch (RelocType) {
|
||||
case ELF::R_SPARC_32:
|
||||
case ELF::R_SPARC_UA32:
|
||||
return visitELF_SPARC_32(R, Value);
|
||||
default:
|
||||
HasError = true;
|
||||
return 0;
|
||||
}
|
||||
case Triple::hexagon:
|
||||
switch (RelocType) {
|
||||
case ELF::R_HEX_32:
|
||||
return visitELF_HEX_32(R, Value);
|
||||
default:
|
||||
HasError = true;
|
||||
return 0;
|
||||
}
|
||||
default:
|
||||
HasError = true;
|
||||
return 0;
|
||||
}
|
||||
} else {
|
||||
report_fatal_error("Invalid word size in object file");
|
||||
}
|
||||
}
|
||||
|
||||
uint64_t visitCOFF(uint32_t RelocType, RelocationRef R, uint64_t Value) {
|
||||
// 32-bit object file
|
||||
assert(ObjToVisit.getBytesInAddress() == 4 &&
|
||||
"Invalid word size in object file");
|
||||
|
||||
switch (ObjToVisit.getArch()) {
|
||||
case Triple::x86:
|
||||
switch (RelocType) {
|
||||
case COFF::IMAGE_REL_I386_SECREL:
|
||||
return visitCOFF_I386_SECREL(R, Value);
|
||||
case COFF::IMAGE_REL_I386_DIR32:
|
||||
return visitCOFF_I386_DIR32(R, Value);
|
||||
}
|
||||
break;
|
||||
case Triple::x86_64:
|
||||
switch (RelocType) {
|
||||
case COFF::IMAGE_REL_AMD64_SECREL:
|
||||
return visitCOFF_AMD64_SECREL(R, Value);
|
||||
case COFF::IMAGE_REL_AMD64_ADDR64:
|
||||
return visitCOFF_AMD64_ADDR64(R, Value);
|
||||
}
|
||||
break;
|
||||
return visitX86(Rel, R, Value);
|
||||
case Triple::ppc:
|
||||
return visitPPC32(Rel, R, Value);
|
||||
case Triple::arm:
|
||||
case Triple::armeb:
|
||||
return visitARM(Rel, R, Value);
|
||||
case Triple::lanai:
|
||||
return visitLanai(Rel, R, Value);
|
||||
case Triple::mipsel:
|
||||
case Triple::mips:
|
||||
return visitMips32(Rel, R, Value);
|
||||
case Triple::sparc:
|
||||
return visitSparc32(Rel, R, Value);
|
||||
case Triple::hexagon:
|
||||
return visitHexagon(Rel, R, Value);
|
||||
default:
|
||||
HasError = true;
|
||||
return 0;
|
||||
}
|
||||
HasError = true;
|
||||
return 0;
|
||||
}
|
||||
|
||||
uint64_t visitMachO(uint32_t RelocType, RelocationRef R, uint64_t Value) {
|
||||
switch (ObjToVisit.getArch()) {
|
||||
default: break;
|
||||
case Triple::x86_64:
|
||||
switch (RelocType) {
|
||||
default: break;
|
||||
case MachO::X86_64_RELOC_UNSIGNED:
|
||||
return visitMACHO_X86_64_UNSIGNED(R, Value);
|
||||
}
|
||||
}
|
||||
HasError = true;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int64_t getELFAddend(RelocationRef R) {
|
||||
@ -275,176 +121,193 @@ class RelocVisitor {
|
||||
return *AddendOrErr;
|
||||
}
|
||||
|
||||
/// Operations
|
||||
|
||||
/// 386-ELF
|
||||
uint64_t visitELF_386_NONE(RelocationRef R) {
|
||||
uint64_t visitX86_64(uint32_t Rel, RelocationRef R, uint64_t Value) {
|
||||
switch (Rel) {
|
||||
case ELF::R_X86_64_NONE:
|
||||
return 0;
|
||||
case ELF::R_X86_64_64:
|
||||
return Value + getELFAddend(R);
|
||||
case ELF::R_X86_64_PC32:
|
||||
return Value + getELFAddend(R) - R.getOffset();
|
||||
case ELF::R_X86_64_32:
|
||||
case ELF::R_X86_64_32S:
|
||||
return (Value + getELFAddend(R)) & 0xFFFFFFFF;
|
||||
}
|
||||
HasError = true;
|
||||
return 0;
|
||||
}
|
||||
|
||||
// Ideally the Addend here will be the addend in the data for
|
||||
// the relocation. It's not actually the case for Rel relocations.
|
||||
uint64_t visitELF_386_32(RelocationRef R, uint64_t Value) {
|
||||
return Value;
|
||||
}
|
||||
|
||||
uint64_t visitELF_386_PC32(RelocationRef R, uint64_t Value) {
|
||||
return Value - R.getOffset();
|
||||
}
|
||||
|
||||
/// X86-64 ELF
|
||||
uint64_t visitELF_X86_64_NONE(RelocationRef R) {
|
||||
uint64_t visitAarch64(uint32_t Rel, RelocationRef R, uint64_t Value) {
|
||||
switch (Rel) {
|
||||
case ELF::R_AARCH64_ABS32: {
|
||||
int64_t Res = Value + getELFAddend(R);
|
||||
if (Res < INT32_MIN || Res > UINT32_MAX)
|
||||
HasError = true;
|
||||
return static_cast<uint32_t>(Res);
|
||||
}
|
||||
case ELF::R_AARCH64_ABS64:
|
||||
return Value + getELFAddend(R);
|
||||
}
|
||||
HasError = true;
|
||||
return 0;
|
||||
}
|
||||
|
||||
uint64_t visitELF_X86_64_64(RelocationRef R, uint64_t Value) {
|
||||
return Value + getELFAddend(R);
|
||||
uint64_t visitBpf(uint32_t Rel, RelocationRef R, uint64_t Value) {
|
||||
switch (Rel) {
|
||||
case ELF::R_BPF_64_32:
|
||||
return Value & 0xFFFFFFFF;
|
||||
case ELF::R_BPF_64_64:
|
||||
return Value;
|
||||
}
|
||||
HasError = true;
|
||||
return 0;
|
||||
}
|
||||
|
||||
uint64_t visitELF_X86_64_PC32(RelocationRef R, uint64_t Value) {
|
||||
return Value + getELFAddend(R) - R.getOffset();
|
||||
uint64_t visitMips64(uint32_t Rel, RelocationRef R, uint64_t Value) {
|
||||
switch (Rel) {
|
||||
case ELF::R_MIPS_32:
|
||||
return (Value + getELFAddend(R)) & 0xFFFFFFFF;
|
||||
case ELF::R_MIPS_64:
|
||||
return Value + getELFAddend(R);
|
||||
}
|
||||
HasError = true;
|
||||
return 0;
|
||||
}
|
||||
|
||||
uint64_t visitELF_X86_64_32(RelocationRef R, uint64_t Value) {
|
||||
return (Value + getELFAddend(R)) & 0xFFFFFFFF;
|
||||
uint64_t visitPPC64(uint32_t Rel, RelocationRef R, uint64_t Value) {
|
||||
switch (Rel) {
|
||||
case ELF::R_PPC64_ADDR32:
|
||||
return (Value + getELFAddend(R)) & 0xFFFFFFFF;
|
||||
case ELF::R_PPC64_ADDR64:
|
||||
return Value + getELFAddend(R);
|
||||
}
|
||||
HasError = true;
|
||||
return 0;
|
||||
}
|
||||
|
||||
uint64_t visitELF_X86_64_32S(RelocationRef R, uint64_t Value) {
|
||||
return (Value + getELFAddend(R)) & 0xFFFFFFFF;
|
||||
uint64_t visitSystemz(uint32_t Rel, RelocationRef R, uint64_t Value) {
|
||||
switch (Rel) {
|
||||
case ELF::R_390_32: {
|
||||
int64_t Res = Value + getELFAddend(R);
|
||||
if (Res < INT32_MIN || Res > UINT32_MAX)
|
||||
HasError = true;
|
||||
return static_cast<uint32_t>(Res);
|
||||
}
|
||||
case ELF::R_390_64:
|
||||
return Value + getELFAddend(R);
|
||||
}
|
||||
HasError = true;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/// BPF ELF
|
||||
uint64_t visitELF_BPF_64_32(RelocationRef R, uint64_t Value) {
|
||||
return Value & 0xFFFFFFFF;
|
||||
uint64_t visitSparc64(uint32_t Rel, RelocationRef R, uint64_t Value) {
|
||||
switch (Rel) {
|
||||
case ELF::R_SPARC_32:
|
||||
case ELF::R_SPARC_64:
|
||||
case ELF::R_SPARC_UA32:
|
||||
case ELF::R_SPARC_UA64:
|
||||
return Value + getELFAddend(R);
|
||||
}
|
||||
HasError = true;
|
||||
return 0;
|
||||
}
|
||||
|
||||
uint64_t visitELF_BPF_64_64(RelocationRef R, uint64_t Value) {
|
||||
return Value;
|
||||
uint64_t visitAmdgpu(uint32_t Rel, RelocationRef R, uint64_t Value) {
|
||||
switch (Rel) {
|
||||
case ELF::R_AMDGPU_ABS32:
|
||||
case ELF::R_AMDGPU_ABS64:
|
||||
return Value + getELFAddend(R);
|
||||
}
|
||||
HasError = true;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/// PPC64 ELF
|
||||
uint64_t visitELF_PPC64_ADDR32(RelocationRef R, uint64_t Value) {
|
||||
return (Value + getELFAddend(R)) & 0xFFFFFFFF;
|
||||
uint64_t visitX86(uint32_t Rel, RelocationRef R, uint64_t Value) {
|
||||
switch (Rel) {
|
||||
case ELF::R_386_NONE:
|
||||
return 0;
|
||||
case ELF::R_386_32:
|
||||
return Value;
|
||||
case ELF::R_386_PC32:
|
||||
return Value - R.getOffset();
|
||||
}
|
||||
HasError = true;
|
||||
return 0;
|
||||
}
|
||||
|
||||
uint64_t visitELF_PPC64_ADDR64(RelocationRef R, uint64_t Value) {
|
||||
return Value + getELFAddend(R);
|
||||
uint64_t visitPPC32(uint32_t Rel, RelocationRef R, uint64_t Value) {
|
||||
if (Rel == ELF::R_PPC_ADDR32)
|
||||
return (Value + getELFAddend(R)) & 0xFFFFFFFF;
|
||||
HasError = true;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/// PPC32 ELF
|
||||
uint64_t visitELF_PPC_ADDR32(RelocationRef R, uint64_t Value) {
|
||||
return (Value + getELFAddend(R)) & 0xFFFFFFFF;
|
||||
uint64_t visitARM(uint32_t Rel, RelocationRef R, uint64_t Value) {
|
||||
if (Rel == ELF::R_ARM_ABS32) {
|
||||
if ((int64_t)Value < INT32_MIN || (int64_t)Value > UINT32_MAX)
|
||||
HasError = true;
|
||||
return static_cast<uint32_t>(Value);
|
||||
}
|
||||
HasError = true;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/// Lanai ELF
|
||||
uint64_t visitELF_Lanai_32(RelocationRef R, uint64_t Value) {
|
||||
return (Value + getELFAddend(R)) & 0xFFFFFFFF;
|
||||
uint64_t visitLanai(uint32_t Rel, RelocationRef R, uint64_t Value) {
|
||||
if (Rel == ELF::R_LANAI_32)
|
||||
return (Value + getELFAddend(R)) & 0xFFFFFFFF;
|
||||
HasError = true;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/// MIPS ELF
|
||||
uint64_t visitELF_MIPS_32(RelocationRef R, uint64_t Value) {
|
||||
return Value & 0xFFFFFFFF;
|
||||
uint64_t visitMips32(uint32_t Rel, RelocationRef R, uint64_t Value) {
|
||||
if (Rel == ELF::R_MIPS_32)
|
||||
return Value & 0xFFFFFFFF;
|
||||
HasError = true;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/// MIPS64 ELF
|
||||
uint64_t visitELF_MIPS64_32(RelocationRef R, uint64_t Value) {
|
||||
return (Value + getELFAddend(R)) & 0xFFFFFFFF;
|
||||
uint64_t visitSparc32(uint32_t Rel, RelocationRef R, uint64_t Value) {
|
||||
if (Rel == ELF::R_SPARC_32 || Rel == ELF::R_SPARC_UA32)
|
||||
return Value + getELFAddend(R);
|
||||
HasError = true;
|
||||
return 0;
|
||||
}
|
||||
|
||||
uint64_t visitELF_MIPS64_64(RelocationRef R, uint64_t Value) {
|
||||
return Value + getELFAddend(R);
|
||||
uint64_t visitHexagon(uint32_t Rel, RelocationRef R, uint64_t Value) {
|
||||
if (Rel == ELF::R_HEX_32)
|
||||
return Value + getELFAddend(R);
|
||||
HasError = true;
|
||||
return 0;
|
||||
}
|
||||
|
||||
// AArch64 ELF
|
||||
uint64_t visitELF_AARCH64_ABS32(RelocationRef R, uint64_t Value) {
|
||||
int64_t Addend = getELFAddend(R);
|
||||
int64_t Res = Value + Addend;
|
||||
|
||||
// Overflow check allows for both signed and unsigned interpretation.
|
||||
if (Res < INT32_MIN || Res > UINT32_MAX)
|
||||
HasError = true;
|
||||
|
||||
return static_cast<uint32_t>(Res);
|
||||
uint64_t visitCOFF(uint32_t Rel, RelocationRef R, uint64_t Value) {
|
||||
switch (ObjToVisit.getArch()) {
|
||||
case Triple::x86:
|
||||
switch (Rel) {
|
||||
case COFF::IMAGE_REL_I386_SECREL:
|
||||
case COFF::IMAGE_REL_I386_DIR32:
|
||||
return static_cast<uint32_t>(Value);
|
||||
}
|
||||
break;
|
||||
case Triple::x86_64:
|
||||
switch (Rel) {
|
||||
case COFF::IMAGE_REL_AMD64_SECREL:
|
||||
return static_cast<uint32_t>(Value);
|
||||
case COFF::IMAGE_REL_AMD64_ADDR64:
|
||||
return Value;
|
||||
}
|
||||
break;
|
||||
}
|
||||
HasError = true;
|
||||
return 0;
|
||||
}
|
||||
|
||||
uint64_t visitELF_AARCH64_ABS64(RelocationRef R, uint64_t Value) {
|
||||
return Value + getELFAddend(R);
|
||||
}
|
||||
|
||||
// SystemZ ELF
|
||||
uint64_t visitELF_390_32(RelocationRef R, uint64_t Value) {
|
||||
int64_t Addend = getELFAddend(R);
|
||||
int64_t Res = Value + Addend;
|
||||
|
||||
// Overflow check allows for both signed and unsigned interpretation.
|
||||
if (Res < INT32_MIN || Res > UINT32_MAX)
|
||||
HasError = true;
|
||||
|
||||
return static_cast<uint32_t>(Res);
|
||||
}
|
||||
|
||||
uint64_t visitELF_390_64(RelocationRef R, uint64_t Value) {
|
||||
return Value + getELFAddend(R);
|
||||
}
|
||||
|
||||
uint64_t visitELF_SPARC_32(RelocationRef R, uint32_t Value) {
|
||||
return Value + getELFAddend(R);
|
||||
}
|
||||
|
||||
uint64_t visitELF_SPARCV9_32(RelocationRef R, uint64_t Value) {
|
||||
return Value + getELFAddend(R);
|
||||
}
|
||||
|
||||
uint64_t visitELF_SPARCV9_64(RelocationRef R, uint64_t Value) {
|
||||
return Value + getELFAddend(R);
|
||||
}
|
||||
|
||||
uint64_t visitELF_ARM_ABS32(RelocationRef R, uint64_t Value) {
|
||||
int64_t Res = Value;
|
||||
|
||||
// Overflow check allows for both signed and unsigned interpretation.
|
||||
if (Res < INT32_MIN || Res > UINT32_MAX)
|
||||
HasError = true;
|
||||
|
||||
return static_cast<uint32_t>(Res);
|
||||
}
|
||||
|
||||
uint64_t visitELF_HEX_32(RelocationRef R, uint64_t Value) {
|
||||
int64_t Addend = getELFAddend(R);
|
||||
return Value + Addend;
|
||||
}
|
||||
|
||||
uint64_t visitELF_AMDGPU_ABS32(RelocationRef R, uint64_t Value) {
|
||||
int64_t Addend = getELFAddend(R);
|
||||
return Value + Addend;
|
||||
}
|
||||
|
||||
uint64_t visitELF_AMDGPU_ABS64(RelocationRef R, uint64_t Value) {
|
||||
int64_t Addend = getELFAddend(R);
|
||||
return Value + Addend;
|
||||
}
|
||||
|
||||
/// I386 COFF
|
||||
uint64_t visitCOFF_I386_SECREL(RelocationRef R, uint64_t Value) {
|
||||
return static_cast<uint32_t>(Value);
|
||||
}
|
||||
|
||||
uint64_t visitCOFF_I386_DIR32(RelocationRef R, uint64_t Value) {
|
||||
return static_cast<uint32_t>(Value);
|
||||
}
|
||||
|
||||
/// AMD64 COFF
|
||||
uint64_t visitCOFF_AMD64_SECREL(RelocationRef R, uint64_t Value) {
|
||||
return static_cast<uint32_t>(Value);
|
||||
}
|
||||
|
||||
uint64_t visitCOFF_AMD64_ADDR64(RelocationRef R, uint64_t Value) {
|
||||
return Value;
|
||||
}
|
||||
|
||||
// X86_64 MachO
|
||||
uint64_t visitMACHO_X86_64_UNSIGNED(RelocationRef R, uint64_t Value) {
|
||||
return Value;
|
||||
uint64_t visitMachO(uint32_t Rel, RelocationRef R, uint64_t Value) {
|
||||
if (ObjToVisit.getArch() == Triple::x86_64 &&
|
||||
Rel == MachO::X86_64_RELOC_UNSIGNED)
|
||||
return Value;
|
||||
HasError = true;
|
||||
return 0;
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -119,6 +119,7 @@ class WasmObjectFile : public ObjectFile {
|
||||
std::error_code getSectionName(DataRefImpl Sec,
|
||||
StringRef &Res) const override;
|
||||
uint64_t getSectionAddress(DataRefImpl Sec) const override;
|
||||
uint64_t getSectionIndex(DataRefImpl Sec) const override;
|
||||
uint64_t getSectionSize(DataRefImpl Sec) const override;
|
||||
std::error_code getSectionContents(DataRefImpl Sec,
|
||||
StringRef &Res) const override;
|
||||
|
@ -113,6 +113,14 @@ class OptTable {
|
||||
return getInfo(id).MetaVar;
|
||||
}
|
||||
|
||||
/// Find flags from OptTable which starts with Cur.
|
||||
///
|
||||
/// \param [in] Cur - String prefix that all returned flags need
|
||||
// to start with.
|
||||
///
|
||||
/// \return The vector of flags which start with Cur.
|
||||
std::vector<std::string> findByPrefix(StringRef Cur) const;
|
||||
|
||||
/// \brief Parse a single argument; returning the new argument and
|
||||
/// updating Index.
|
||||
///
|
||||
|
@ -212,12 +212,12 @@ StringRef getFuncNameWithoutPrefix(StringRef PGOFuncName,
|
||||
/// third field is the uncompressed strings; otherwise it is the
|
||||
/// compressed string. When the string compression is off, the
|
||||
/// second field will have value zero.
|
||||
Error collectPGOFuncNameStrings(const std::vector<std::string> &NameStrs,
|
||||
Error collectPGOFuncNameStrings(ArrayRef<std::string> NameStrs,
|
||||
bool doCompression, std::string &Result);
|
||||
|
||||
/// Produce \c Result string with the same format described above. The input
|
||||
/// is vector of PGO function name variables that are referenced.
|
||||
Error collectPGOFuncNameStrings(const std::vector<GlobalVariable *> &NameVars,
|
||||
Error collectPGOFuncNameStrings(ArrayRef<GlobalVariable *> NameVars,
|
||||
std::string &Result, bool doCompression = true);
|
||||
|
||||
/// \c NameStrings is a string composed of one of more sub-strings encoded in
|
||||
@ -967,7 +967,7 @@ struct Header {
|
||||
} // end namespace RawInstrProf
|
||||
|
||||
// Parse MemOP Size range option.
|
||||
void getMemOPSizeRangeFromOption(std::string Str, int64_t &RangeStart,
|
||||
void getMemOPSizeRangeFromOption(StringRef Str, int64_t &RangeStart,
|
||||
int64_t &RangeLast);
|
||||
|
||||
} // end namespace llvm
|
||||
|
@ -671,7 +671,7 @@ class CodeInit : public TypedInit {
|
||||
/// [AL, AH, CL] - Represent a list of defs
|
||||
///
|
||||
class ListInit final : public TypedInit, public FoldingSetNode,
|
||||
public TrailingObjects<BitsInit, Init *> {
|
||||
public TrailingObjects<ListInit, Init *> {
|
||||
unsigned NumValues;
|
||||
|
||||
public:
|
||||
@ -1137,17 +1137,19 @@ class FieldInit : public TypedInit {
|
||||
/// to have at least one value then a (possibly empty) list of arguments. Each
|
||||
/// argument can have a name associated with it.
|
||||
///
|
||||
class DagInit : public TypedInit, public FoldingSetNode {
|
||||
class DagInit final : public TypedInit, public FoldingSetNode,
|
||||
public TrailingObjects<DagInit, Init *, StringInit *> {
|
||||
Init *Val;
|
||||
StringInit *ValName;
|
||||
SmallVector<Init*, 4> Args;
|
||||
SmallVector<StringInit*, 4> ArgNames;
|
||||
unsigned NumArgs;
|
||||
unsigned NumArgNames;
|
||||
|
||||
DagInit(Init *V, StringInit *VN, ArrayRef<Init *> ArgRange,
|
||||
ArrayRef<StringInit *> NameRange)
|
||||
DagInit(Init *V, StringInit *VN, unsigned NumArgs, unsigned NumArgNames)
|
||||
: TypedInit(IK_DagInit, DagRecTy::get()), Val(V), ValName(VN),
|
||||
Args(ArgRange.begin(), ArgRange.end()),
|
||||
ArgNames(NameRange.begin(), NameRange.end()) {}
|
||||
NumArgs(NumArgs), NumArgNames(NumArgNames) {}
|
||||
|
||||
friend TrailingObjects;
|
||||
size_t numTrailingObjects(OverloadToken<Init *>) const { return NumArgs; }
|
||||
|
||||
public:
|
||||
DagInit(const DagInit &Other) = delete;
|
||||
@ -1173,20 +1175,24 @@ class DagInit : public TypedInit, public FoldingSetNode {
|
||||
return ValName ? ValName->getValue() : StringRef();
|
||||
}
|
||||
|
||||
unsigned getNumArgs() const { return Args.size(); }
|
||||
unsigned getNumArgs() const { return NumArgs; }
|
||||
Init *getArg(unsigned Num) const {
|
||||
assert(Num < Args.size() && "Arg number out of range!");
|
||||
return Args[Num];
|
||||
assert(Num < NumArgs && "Arg number out of range!");
|
||||
return getTrailingObjects<Init *>()[Num];
|
||||
}
|
||||
StringInit *getArgName(unsigned Num) const {
|
||||
assert(Num < ArgNames.size() && "Arg number out of range!");
|
||||
return ArgNames[Num];
|
||||
assert(Num < NumArgNames && "Arg number out of range!");
|
||||
return getTrailingObjects<StringInit *>()[Num];
|
||||
}
|
||||
StringRef getArgNameStr(unsigned Num) const {
|
||||
StringInit *Init = getArgName(Num);
|
||||
return Init ? Init->getValue() : StringRef();
|
||||
}
|
||||
|
||||
ArrayRef<StringInit *> getArgNames() const {
|
||||
return makeArrayRef(getTrailingObjects<StringInit *>(), NumArgNames);
|
||||
}
|
||||
|
||||
Init *resolveReferences(Record &R, const RecordVal *RV) const override;
|
||||
|
||||
std::string getAsString() const override;
|
||||
@ -1194,20 +1200,20 @@ class DagInit : public TypedInit, public FoldingSetNode {
|
||||
typedef SmallVectorImpl<Init*>::const_iterator const_arg_iterator;
|
||||
typedef SmallVectorImpl<StringInit*>::const_iterator const_name_iterator;
|
||||
|
||||
inline const_arg_iterator arg_begin() const { return Args.begin(); }
|
||||
inline const_arg_iterator arg_end () const { return Args.end(); }
|
||||
inline const_arg_iterator arg_begin() const { return getTrailingObjects<Init *>(); }
|
||||
inline const_arg_iterator arg_end () const { return arg_begin() + NumArgs; }
|
||||
inline iterator_range<const_arg_iterator> args() const {
|
||||
return llvm::make_range(arg_begin(), arg_end());
|
||||
}
|
||||
|
||||
inline size_t arg_size () const { return Args.size(); }
|
||||
inline bool arg_empty() const { return Args.empty(); }
|
||||
inline size_t arg_size () const { return NumArgs; }
|
||||
inline bool arg_empty() const { return NumArgs == 0; }
|
||||
|
||||
inline const_name_iterator name_begin() const { return ArgNames.begin(); }
|
||||
inline const_name_iterator name_end () const { return ArgNames.end(); }
|
||||
inline const_name_iterator name_begin() const { return getTrailingObjects<StringInit *>(); }
|
||||
inline const_name_iterator name_end () const { return name_begin() + NumArgNames; }
|
||||
|
||||
inline size_t name_size () const { return ArgNames.size(); }
|
||||
inline bool name_empty() const { return ArgNames.empty(); }
|
||||
inline size_t name_size () const { return NumArgNames; }
|
||||
inline bool name_empty() const { return NumArgNames == 0; }
|
||||
|
||||
Init *getBit(unsigned Bit) const override {
|
||||
llvm_unreachable("Illegal bit reference off dag");
|
||||
|
@ -405,7 +405,9 @@ class TargetLoweringBase {
|
||||
}
|
||||
|
||||
/// Returns if it's reasonable to merge stores to MemVT size.
|
||||
virtual bool canMergeStoresTo(EVT MemVT) const { return true; }
|
||||
virtual bool canMergeStoresTo(unsigned AddressSpace, EVT MemVT) const {
|
||||
return true;
|
||||
}
|
||||
|
||||
/// \brief Return true if it is cheap to speculate a call to intrinsic cttz.
|
||||
virtual bool isCheapToSpeculateCttz() const {
|
||||
@ -736,7 +738,7 @@ class TargetLoweringBase {
|
||||
if (VT.isExtended()) return Expand;
|
||||
// If a target-specific SDNode requires legalization, require the target
|
||||
// to provide custom legalization for it.
|
||||
if (Op > array_lengthof(OpActions[0])) return Custom;
|
||||
if (Op >= array_lengthof(OpActions[0])) return Custom;
|
||||
return OpActions[(unsigned)VT.getSimpleVT().SimpleTy][Op];
|
||||
}
|
||||
|
||||
|
@ -354,6 +354,13 @@ FunctionPass *createEarlyCSEPass(bool UseMemorySSA = false);
|
||||
//
|
||||
FunctionPass *createGVNHoistPass();
|
||||
|
||||
//===----------------------------------------------------------------------===//
|
||||
//
|
||||
// GVNSink - This pass uses an "inverted" value numbering to decide the
|
||||
// similarity of expressions and sinks similar expressions into successors.
|
||||
//
|
||||
FunctionPass *createGVNSinkPass();
|
||||
|
||||
//===----------------------------------------------------------------------===//
|
||||
//
|
||||
// MergedLoadStoreMotion - This pass merges loads and stores in diamonds. Loads
|
||||
|
@ -68,6 +68,24 @@ class GVN : public PassInfoMixin<GVN> {
|
||||
class ValueTable {
|
||||
DenseMap<Value *, uint32_t> valueNumbering;
|
||||
DenseMap<Expression, uint32_t> expressionNumbering;
|
||||
|
||||
// Expressions is the vector of Expression. ExprIdx is the mapping from
|
||||
// value number to the index of Expression in Expressions. We use it
|
||||
// instead of a DenseMap because filling such mapping is faster than
|
||||
// filling a DenseMap and the compile time is a little better.
|
||||
uint32_t nextExprNumber;
|
||||
std::vector<Expression> Expressions;
|
||||
std::vector<uint32_t> ExprIdx;
|
||||
// Value number to PHINode mapping. Used for phi-translate in scalarpre.
|
||||
DenseMap<uint32_t, PHINode *> NumberingPhi;
|
||||
// Cache for phi-translate in scalarpre.
|
||||
typedef DenseMap<std::pair<uint32_t, const BasicBlock *>, uint32_t>
|
||||
PhiTranslateMap;
|
||||
PhiTranslateMap PhiTranslateTable;
|
||||
// Map the block to reversed postorder traversal number. It is used to
|
||||
// find back edge easily.
|
||||
DenseMap<const BasicBlock *, uint32_t> BlockRPONumber;
|
||||
|
||||
AliasAnalysis *AA;
|
||||
MemoryDependenceResults *MD;
|
||||
DominatorTree *DT;
|
||||
@ -79,6 +97,10 @@ class GVN : public PassInfoMixin<GVN> {
|
||||
Value *LHS, Value *RHS);
|
||||
Expression createExtractvalueExpr(ExtractValueInst *EI);
|
||||
uint32_t lookupOrAddCall(CallInst *C);
|
||||
uint32_t phiTranslateImpl(const BasicBlock *BB, const BasicBlock *PhiBlock,
|
||||
uint32_t Num, GVN &Gvn);
|
||||
std::pair<uint32_t, bool> assignExpNewValueNum(Expression &exp);
|
||||
bool areAllValsInBB(uint32_t num, const BasicBlock *BB, GVN &Gvn);
|
||||
|
||||
public:
|
||||
ValueTable();
|
||||
@ -87,9 +109,12 @@ class GVN : public PassInfoMixin<GVN> {
|
||||
~ValueTable();
|
||||
|
||||
uint32_t lookupOrAdd(Value *V);
|
||||
uint32_t lookup(Value *V) const;
|
||||
uint32_t lookup(Value *V, bool Verify = true) const;
|
||||
uint32_t lookupOrAddCmp(unsigned Opcode, CmpInst::Predicate Pred,
|
||||
Value *LHS, Value *RHS);
|
||||
uint32_t phiTranslate(const BasicBlock *BB, const BasicBlock *PhiBlock,
|
||||
uint32_t Num, GVN &Gvn);
|
||||
void assignBlockRPONumber(Function &F);
|
||||
bool exists(Value *V) const;
|
||||
void add(Value *V, uint32_t num);
|
||||
void clear();
|
||||
@ -238,7 +263,12 @@ struct GVNHoistPass : PassInfoMixin<GVNHoistPass> {
|
||||
/// \brief Run the pass over the function.
|
||||
PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
|
||||
};
|
||||
|
||||
/// \brief Uses an "inverted" value numbering to decide the similarity of
|
||||
/// expressions and sinks similar expressions into successors.
|
||||
struct GVNSinkPass : PassInfoMixin<GVNSinkPass> {
|
||||
/// \brief Run the pass over the function.
|
||||
PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
|
||||
};
|
||||
}
|
||||
|
||||
#endif
|
||||
|
@ -356,6 +356,10 @@ void combineMetadata(Instruction *K, const Instruction *J, ArrayRef<unsigned> Kn
|
||||
/// Unknown metadata is removed.
|
||||
void combineMetadataForCSE(Instruction *K, const Instruction *J);
|
||||
|
||||
// Replace each use of 'From' with 'To', if that use does not belong to basic
|
||||
// block where 'From' is defined. Returns the number of replacements made.
|
||||
unsigned replaceNonLocalUsesWith(Instruction *From, Value *To);
|
||||
|
||||
/// Replace each use of 'From' with 'To' if that use is dominated by
|
||||
/// the given edge. Returns the number of replacements made.
|
||||
unsigned replaceDominatedUsesWith(Value *From, Value *To, DominatorTree &DT,
|
||||
@ -406,6 +410,14 @@ bool recognizeBSwapOrBitReverseIdiom(
|
||||
void maybeMarkSanitizerLibraryCallNoBuiltin(CallInst *CI,
|
||||
const TargetLibraryInfo *TLI);
|
||||
|
||||
//===----------------------------------------------------------------------===//
|
||||
// Transform predicates
|
||||
//
|
||||
|
||||
/// Given an instruction, is it legal to set operand OpIdx to a non-constant
|
||||
/// value?
|
||||
bool canReplaceOperandWithVariable(const Instruction *I, unsigned OpIdx);
|
||||
|
||||
} // End llvm namespace
|
||||
|
||||
#endif
|
||||
|
@ -687,11 +687,8 @@ Constant *SymbolicallyEvaluateBinop(unsigned Opc, Constant *Op0, Constant *Op1,
|
||||
// bits.
|
||||
|
||||
if (Opc == Instruction::And) {
|
||||
unsigned BitWidth = DL.getTypeSizeInBits(Op0->getType()->getScalarType());
|
||||
KnownBits Known0(BitWidth);
|
||||
KnownBits Known1(BitWidth);
|
||||
computeKnownBits(Op0, Known0, DL);
|
||||
computeKnownBits(Op1, Known1, DL);
|
||||
KnownBits Known0 = computeKnownBits(Op0, DL);
|
||||
KnownBits Known1 = computeKnownBits(Op1, DL);
|
||||
if ((Known1.One | Known0.Zero).isAllOnesValue()) {
|
||||
// All the bits of Op0 that the 'and' could be masking are already zero.
|
||||
return Op0;
|
||||
|
@ -688,9 +688,7 @@ static Value *SimplifySubInst(Value *Op0, Value *Op1, bool isNSW, bool isNUW,
|
||||
if (isNUW)
|
||||
return Op0;
|
||||
|
||||
unsigned BitWidth = Op1->getType()->getScalarSizeInBits();
|
||||
KnownBits Known(BitWidth);
|
||||
computeKnownBits(Op1, Known, Q.DL, 0, Q.AC, Q.CxtI, Q.DT);
|
||||
KnownBits Known = computeKnownBits(Op1, Q.DL, 0, Q.AC, Q.CxtI, Q.DT);
|
||||
if (Known.Zero.isMaxSignedValue()) {
|
||||
// Op1 is either 0 or the minimum signed value. If the sub is NSW, then
|
||||
// Op1 must be 0 because negating the minimum signed value is undefined.
|
||||
@ -1309,15 +1307,13 @@ static Value *SimplifyShift(Instruction::BinaryOps Opcode, Value *Op0,
|
||||
|
||||
// If any bits in the shift amount make that value greater than or equal to
|
||||
// the number of bits in the type, the shift is undefined.
|
||||
unsigned BitWidth = Op1->getType()->getScalarSizeInBits();
|
||||
KnownBits Known(BitWidth);
|
||||
computeKnownBits(Op1, Known, Q.DL, 0, Q.AC, Q.CxtI, Q.DT);
|
||||
if (Known.One.getLimitedValue() >= BitWidth)
|
||||
KnownBits Known = computeKnownBits(Op1, Q.DL, 0, Q.AC, Q.CxtI, Q.DT);
|
||||
if (Known.One.getLimitedValue() >= Known.getBitWidth())
|
||||
return UndefValue::get(Op0->getType());
|
||||
|
||||
// If all valid bits in the shift amount are known zero, the first operand is
|
||||
// unchanged.
|
||||
unsigned NumValidShiftBits = Log2_32_Ceil(BitWidth);
|
||||
unsigned NumValidShiftBits = Log2_32_Ceil(Known.getBitWidth());
|
||||
if (Known.countMinTrailingZeros() >= NumValidShiftBits)
|
||||
return Op0;
|
||||
|
||||
@ -1343,9 +1339,7 @@ static Value *SimplifyRightShift(Instruction::BinaryOps Opcode, Value *Op0,
|
||||
|
||||
// The low bit cannot be shifted out of an exact shift if it is set.
|
||||
if (isExact) {
|
||||
unsigned BitWidth = Op0->getType()->getScalarSizeInBits();
|
||||
KnownBits Op0Known(BitWidth);
|
||||
computeKnownBits(Op0, Op0Known, Q.DL, /*Depth=*/0, Q.AC, Q.CxtI, Q.DT);
|
||||
KnownBits Op0Known = computeKnownBits(Op0, Q.DL, /*Depth=*/0, Q.AC, Q.CxtI, Q.DT);
|
||||
if (Op0Known.One[0])
|
||||
return Op0;
|
||||
}
|
||||
@ -1428,6 +1422,8 @@ Value *llvm::SimplifyAShrInst(Value *Op0, Value *Op1, bool isExact,
|
||||
return ::SimplifyAShrInst(Op0, Op1, isExact, Q, RecursionLimit);
|
||||
}
|
||||
|
||||
/// Commuted variants are assumed to be handled by calling this function again
|
||||
/// with the parameters swapped.
|
||||
static Value *simplifyUnsignedRangeCheck(ICmpInst *ZeroICmp,
|
||||
ICmpInst *UnsignedICmp, bool IsAnd) {
|
||||
Value *X, *Y;
|
||||
@ -1560,20 +1556,8 @@ static Value *simplifyAndOrOfICmpsWithConstants(ICmpInst *Cmp0, ICmpInst *Cmp1,
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
/// Commuted variants are assumed to be handled by calling this function again
|
||||
/// with the parameters swapped.
|
||||
static Value *simplifyAndOfICmps(ICmpInst *Op0, ICmpInst *Op1) {
|
||||
if (Value *X = simplifyUnsignedRangeCheck(Op0, Op1, /*IsAnd=*/true))
|
||||
return X;
|
||||
|
||||
if (Value *X = simplifyAndOfICmpsWithSameOperands(Op0, Op1))
|
||||
return X;
|
||||
|
||||
if (Value *X = simplifyAndOrOfICmpsWithConstants(Op0, Op1, true))
|
||||
return X;
|
||||
|
||||
static Value *simplifyAndOfICmpsWithAdd(ICmpInst *Op0, ICmpInst *Op1) {
|
||||
// (icmp (add V, C0), C1) & (icmp V, C0)
|
||||
Type *ITy = Op0->getType();
|
||||
ICmpInst::Predicate Pred0, Pred1;
|
||||
const APInt *C0, *C1;
|
||||
Value *V;
|
||||
@ -1587,6 +1571,7 @@ static Value *simplifyAndOfICmps(ICmpInst *Op0, ICmpInst *Op1) {
|
||||
if (AddInst->getOperand(1) != Op1->getOperand(1))
|
||||
return nullptr;
|
||||
|
||||
Type *ITy = Op0->getType();
|
||||
bool isNSW = AddInst->hasNoSignedWrap();
|
||||
bool isNUW = AddInst->hasNoUnsignedWrap();
|
||||
|
||||
@ -1617,18 +1602,29 @@ static Value *simplifyAndOfICmps(ICmpInst *Op0, ICmpInst *Op1) {
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
/// Commuted variants are assumed to be handled by calling this function again
|
||||
/// with the parameters swapped.
|
||||
static Value *simplifyOrOfICmps(ICmpInst *Op0, ICmpInst *Op1) {
|
||||
if (Value *X = simplifyUnsignedRangeCheck(Op0, Op1, /*IsAnd=*/false))
|
||||
static Value *simplifyAndOfICmps(ICmpInst *Op0, ICmpInst *Op1) {
|
||||
if (Value *X = simplifyUnsignedRangeCheck(Op0, Op1, /*IsAnd=*/true))
|
||||
return X;
|
||||
if (Value *X = simplifyUnsignedRangeCheck(Op1, Op0, /*IsAnd=*/true))
|
||||
return X;
|
||||
|
||||
if (Value *X = simplifyOrOfICmpsWithSameOperands(Op0, Op1))
|
||||
if (Value *X = simplifyAndOfICmpsWithSameOperands(Op0, Op1))
|
||||
return X;
|
||||
if (Value *X = simplifyAndOfICmpsWithSameOperands(Op1, Op0))
|
||||
return X;
|
||||
|
||||
if (Value *X = simplifyAndOrOfICmpsWithConstants(Op0, Op1, false))
|
||||
if (Value *X = simplifyAndOrOfICmpsWithConstants(Op0, Op1, true))
|
||||
return X;
|
||||
|
||||
if (Value *X = simplifyAndOfICmpsWithAdd(Op0, Op1))
|
||||
return X;
|
||||
if (Value *X = simplifyAndOfICmpsWithAdd(Op1, Op0))
|
||||
return X;
|
||||
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
static Value *simplifyOrOfICmpsWithAdd(ICmpInst *Op0, ICmpInst *Op1) {
|
||||
// (icmp (add V, C0), C1) | (icmp V, C0)
|
||||
ICmpInst::Predicate Pred0, Pred1;
|
||||
const APInt *C0, *C1;
|
||||
@ -1674,19 +1670,24 @@ static Value *simplifyOrOfICmps(ICmpInst *Op0, ICmpInst *Op1) {
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
static Value *simplifyPossiblyCastedAndOrOfICmps(ICmpInst *Cmp0, ICmpInst *Cmp1,
|
||||
bool IsAnd, CastInst *Cast) {
|
||||
Value *V =
|
||||
IsAnd ? simplifyAndOfICmps(Cmp0, Cmp1) : simplifyOrOfICmps(Cmp0, Cmp1);
|
||||
if (!V)
|
||||
return nullptr;
|
||||
if (!Cast)
|
||||
return V;
|
||||
static Value *simplifyOrOfICmps(ICmpInst *Op0, ICmpInst *Op1) {
|
||||
if (Value *X = simplifyUnsignedRangeCheck(Op0, Op1, /*IsAnd=*/false))
|
||||
return X;
|
||||
if (Value *X = simplifyUnsignedRangeCheck(Op1, Op0, /*IsAnd=*/false))
|
||||
return X;
|
||||
|
||||
// If we looked through casts, we can only handle a constant simplification
|
||||
// because we are not allowed to create a cast instruction here.
|
||||
if (auto *C = dyn_cast<Constant>(V))
|
||||
return ConstantExpr::getCast(Cast->getOpcode(), C, Cast->getType());
|
||||
if (Value *X = simplifyOrOfICmpsWithSameOperands(Op0, Op1))
|
||||
return X;
|
||||
if (Value *X = simplifyOrOfICmpsWithSameOperands(Op1, Op0))
|
||||
return X;
|
||||
|
||||
if (Value *X = simplifyAndOrOfICmpsWithConstants(Op0, Op1, false))
|
||||
return X;
|
||||
|
||||
if (Value *X = simplifyOrOfICmpsWithAdd(Op0, Op1))
|
||||
return X;
|
||||
if (Value *X = simplifyOrOfICmpsWithAdd(Op1, Op0))
|
||||
return X;
|
||||
|
||||
return nullptr;
|
||||
}
|
||||
@ -1706,11 +1707,18 @@ static Value *simplifyAndOrOfICmps(Value *Op0, Value *Op1, bool IsAnd) {
|
||||
if (!Cmp0 || !Cmp1)
|
||||
return nullptr;
|
||||
|
||||
if (Value *V = simplifyPossiblyCastedAndOrOfICmps(Cmp0, Cmp1, IsAnd, Cast0))
|
||||
return V;
|
||||
if (Value *V = simplifyPossiblyCastedAndOrOfICmps(Cmp1, Cmp0, IsAnd, Cast0))
|
||||
Value *V =
|
||||
IsAnd ? simplifyAndOfICmps(Cmp0, Cmp1) : simplifyOrOfICmps(Cmp0, Cmp1);
|
||||
if (!V)
|
||||
return nullptr;
|
||||
if (!Cast0)
|
||||
return V;
|
||||
|
||||
// If we looked through casts, we can only handle a constant simplification
|
||||
// because we are not allowed to create a cast instruction here.
|
||||
if (auto *C = dyn_cast<Constant>(V))
|
||||
return ConstantExpr::getCast(Cast0->getOpcode(), C, Cast0->getType());
|
||||
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
@ -1927,37 +1935,27 @@ static Value *SimplifyOrInst(Value *Op0, Value *Op1, const SimplifyQuery &Q,
|
||||
MaxRecurse))
|
||||
return V;
|
||||
|
||||
// (A & C)|(B & D)
|
||||
Value *C = nullptr, *D = nullptr;
|
||||
if (match(Op0, m_And(m_Value(A), m_Value(C))) &&
|
||||
match(Op1, m_And(m_Value(B), m_Value(D)))) {
|
||||
ConstantInt *C1 = dyn_cast<ConstantInt>(C);
|
||||
ConstantInt *C2 = dyn_cast<ConstantInt>(D);
|
||||
if (C1 && C2 && (C1->getValue() == ~C2->getValue())) {
|
||||
// (A & C1)|(B & C2)
|
||||
const APInt *C1, *C2;
|
||||
if (match(Op0, m_And(m_Value(A), m_APInt(C1))) &&
|
||||
match(Op1, m_And(m_Value(B), m_APInt(C2)))) {
|
||||
if (*C1 == ~*C2) {
|
||||
// (A & C1)|(B & C2)
|
||||
// If we have: ((V + N) & C1) | (V & C2)
|
||||
// .. and C2 = ~C1 and C2 is 0+1+ and (N & C2) == 0
|
||||
// replace with V+N.
|
||||
Value *V1, *V2;
|
||||
if ((C2->getValue() & (C2->getValue() + 1)) == 0 && // C2 == 0+1+
|
||||
match(A, m_Add(m_Value(V1), m_Value(V2)))) {
|
||||
Value *N;
|
||||
if (C2->isMask() && // C2 == 0+1+
|
||||
match(A, m_c_Add(m_Specific(B), m_Value(N)))) {
|
||||
// Add commutes, try both ways.
|
||||
if (V1 == B &&
|
||||
MaskedValueIsZero(V2, C2->getValue(), Q.DL, 0, Q.AC, Q.CxtI, Q.DT))
|
||||
return A;
|
||||
if (V2 == B &&
|
||||
MaskedValueIsZero(V1, C2->getValue(), Q.DL, 0, Q.AC, Q.CxtI, Q.DT))
|
||||
if (MaskedValueIsZero(N, *C2, Q.DL, 0, Q.AC, Q.CxtI, Q.DT))
|
||||
return A;
|
||||
}
|
||||
// Or commutes, try both ways.
|
||||
if ((C1->getValue() & (C1->getValue() + 1)) == 0 &&
|
||||
match(B, m_Add(m_Value(V1), m_Value(V2)))) {
|
||||
if (C1->isMask() &&
|
||||
match(B, m_c_Add(m_Specific(A), m_Value(N)))) {
|
||||
// Add commutes, try both ways.
|
||||
if (V1 == A &&
|
||||
MaskedValueIsZero(V2, C1->getValue(), Q.DL, 0, Q.AC, Q.CxtI, Q.DT))
|
||||
return B;
|
||||
if (V2 == A &&
|
||||
MaskedValueIsZero(V1, C1->getValue(), Q.DL, 0, Q.AC, Q.CxtI, Q.DT))
|
||||
if (MaskedValueIsZero(N, *C1, Q.DL, 0, Q.AC, Q.CxtI, Q.DT))
|
||||
return B;
|
||||
}
|
||||
}
|
||||
@ -3372,9 +3370,7 @@ static Value *SimplifyICmpInst(unsigned Predicate, Value *LHS, Value *RHS,
|
||||
if (ICmpInst::isEquality(Pred)) {
|
||||
const APInt *RHSVal;
|
||||
if (match(RHS, m_APInt(RHSVal))) {
|
||||
unsigned BitWidth = RHSVal->getBitWidth();
|
||||
KnownBits LHSKnown(BitWidth);
|
||||
computeKnownBits(LHS, LHSKnown, Q.DL, /*Depth=*/0, Q.AC, Q.CxtI, Q.DT);
|
||||
KnownBits LHSKnown = computeKnownBits(LHS, Q.DL, /*Depth=*/0, Q.AC, Q.CxtI, Q.DT);
|
||||
if (LHSKnown.Zero.intersects(*RHSVal) ||
|
||||
!LHSKnown.One.isSubsetOf(*RHSVal))
|
||||
return Pred == ICmpInst::ICMP_EQ ? ConstantInt::getFalse(ITy)
|
||||
@ -3539,6 +3535,10 @@ static const Value *SimplifyWithOpReplaced(Value *V, Value *Op, Value *RepOp,
|
||||
if (V == Op)
|
||||
return RepOp;
|
||||
|
||||
// We cannot replace a constant, and shouldn't even try.
|
||||
if (isa<Constant>(Op))
|
||||
return nullptr;
|
||||
|
||||
auto *I = dyn_cast<Instruction>(V);
|
||||
if (!I)
|
||||
return nullptr;
|
||||
@ -4444,19 +4444,21 @@ static Value *SimplifyIntrinsic(Function *F, IterTy ArgBegin, IterTy ArgEnd,
|
||||
case Intrinsic::uadd_with_overflow:
|
||||
case Intrinsic::sadd_with_overflow: {
|
||||
// X + undef -> undef
|
||||
if (isa<UndefValue>(RHS))
|
||||
if (isa<UndefValue>(LHS) || isa<UndefValue>(RHS))
|
||||
return UndefValue::get(ReturnType);
|
||||
|
||||
return nullptr;
|
||||
}
|
||||
case Intrinsic::umul_with_overflow:
|
||||
case Intrinsic::smul_with_overflow: {
|
||||
// 0 * X -> { 0, false }
|
||||
// X * 0 -> { 0, false }
|
||||
if (match(RHS, m_Zero()))
|
||||
if (match(LHS, m_Zero()) || match(RHS, m_Zero()))
|
||||
return Constant::getNullValue(ReturnType);
|
||||
|
||||
// undef * X -> { 0, false }
|
||||
// X * undef -> { 0, false }
|
||||
if (match(RHS, m_Undef()))
|
||||
if (match(LHS, m_Undef()) || match(RHS, m_Undef()))
|
||||
return Constant::getNullValue(ReturnType);
|
||||
|
||||
return nullptr;
|
||||
@ -4680,9 +4682,7 @@ Value *llvm::SimplifyInstruction(Instruction *I, const SimplifyQuery &SQ,
|
||||
// In general, it is possible for computeKnownBits to determine all bits in a
|
||||
// value even when the operands are not all constants.
|
||||
if (!Result && I->getType()->isIntOrIntVectorTy()) {
|
||||
unsigned BitWidth = I->getType()->getScalarSizeInBits();
|
||||
KnownBits Known(BitWidth);
|
||||
computeKnownBits(I, Known, Q.DL, /*Depth*/ 0, Q.AC, I, Q.DT, ORE);
|
||||
KnownBits Known = computeKnownBits(I, Q.DL, /*Depth*/ 0, Q.AC, I, Q.DT, ORE);
|
||||
if (Known.isConstant())
|
||||
Result = ConstantInt::get(I->getType(), Known.getConstant());
|
||||
}
|
||||
|
@ -534,9 +534,7 @@ static bool isZero(Value *V, const DataLayout &DL, DominatorTree *DT,
|
||||
|
||||
VectorType *VecTy = dyn_cast<VectorType>(V->getType());
|
||||
if (!VecTy) {
|
||||
unsigned BitWidth = V->getType()->getIntegerBitWidth();
|
||||
KnownBits Known(BitWidth);
|
||||
computeKnownBits(V, Known, DL, 0, AC, dyn_cast<Instruction>(V), DT);
|
||||
KnownBits Known = computeKnownBits(V, DL, 0, AC, dyn_cast<Instruction>(V), DT);
|
||||
return Known.isZero();
|
||||
}
|
||||
|
||||
@ -550,14 +548,12 @@ static bool isZero(Value *V, const DataLayout &DL, DominatorTree *DT,
|
||||
|
||||
// For a vector, KnownZero will only be true if all values are zero, so check
|
||||
// this per component
|
||||
unsigned BitWidth = VecTy->getElementType()->getIntegerBitWidth();
|
||||
for (unsigned I = 0, N = VecTy->getNumElements(); I != N; ++I) {
|
||||
Constant *Elem = C->getAggregateElement(I);
|
||||
if (isa<UndefValue>(Elem))
|
||||
return true;
|
||||
|
||||
KnownBits Known(BitWidth);
|
||||
computeKnownBits(Elem, Known, DL);
|
||||
KnownBits Known = computeKnownBits(Elem, DL);
|
||||
if (Known.isZero())
|
||||
return true;
|
||||
}
|
||||
|
@ -73,30 +73,23 @@ LPPassManager::LPPassManager()
|
||||
CurrentLoop = nullptr;
|
||||
}
|
||||
|
||||
// Inset loop into loop nest (LoopInfo) and loop queue (LQ).
|
||||
Loop &LPPassManager::addLoop(Loop *ParentLoop) {
|
||||
// Create a new loop. LI will take ownership.
|
||||
Loop *L = new Loop();
|
||||
|
||||
// Insert into the loop nest and the loop queue.
|
||||
if (!ParentLoop) {
|
||||
// Insert loop into loop nest (LoopInfo) and loop queue (LQ).
|
||||
void LPPassManager::addLoop(Loop &L) {
|
||||
if (!L.getParentLoop()) {
|
||||
// This is the top level loop.
|
||||
LI->addTopLevelLoop(L);
|
||||
LQ.push_front(L);
|
||||
return *L;
|
||||
LQ.push_front(&L);
|
||||
return;
|
||||
}
|
||||
|
||||
ParentLoop->addChildLoop(L);
|
||||
// Insert L into the loop queue after the parent loop.
|
||||
for (auto I = LQ.begin(), E = LQ.end(); I != E; ++I) {
|
||||
if (*I == L->getParentLoop()) {
|
||||
if (*I == L.getParentLoop()) {
|
||||
// deque does not support insert after.
|
||||
++I;
|
||||
LQ.insert(I, 1, L);
|
||||
break;
|
||||
LQ.insert(I, 1, &L);
|
||||
return;
|
||||
}
|
||||
}
|
||||
return *L;
|
||||
}
|
||||
|
||||
/// cloneBasicBlockSimpleAnalysis - Invoke cloneBasicBlockAnalysis hook for
|
||||
|
@ -2178,6 +2178,63 @@ StrengthenNoWrapFlags(ScalarEvolution *SE, SCEVTypes Type,
|
||||
return Flags;
|
||||
}
|
||||
|
||||
bool ScalarEvolution::isAvailableAtLoopEntry(const SCEV *S, const Loop *L,
|
||||
DominatorTree &DT, LoopInfo &LI) {
|
||||
if (!isLoopInvariant(S, L))
|
||||
return false;
|
||||
// If a value depends on a SCEVUnknown which is defined after the loop, we
|
||||
// conservatively assume that we cannot calculate it at the loop's entry.
|
||||
struct FindDominatedSCEVUnknown {
|
||||
bool Found = false;
|
||||
const Loop *L;
|
||||
DominatorTree &DT;
|
||||
LoopInfo &LI;
|
||||
|
||||
FindDominatedSCEVUnknown(const Loop *L, DominatorTree &DT, LoopInfo &LI)
|
||||
: L(L), DT(DT), LI(LI) {}
|
||||
|
||||
bool checkSCEVUnknown(const SCEVUnknown *SU) {
|
||||
if (auto *I = dyn_cast<Instruction>(SU->getValue())) {
|
||||
if (DT.dominates(L->getHeader(), I->getParent()))
|
||||
Found = true;
|
||||
else
|
||||
assert(DT.dominates(I->getParent(), L->getHeader()) &&
|
||||
"No dominance relationship between SCEV and loop?");
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
bool follow(const SCEV *S) {
|
||||
switch (static_cast<SCEVTypes>(S->getSCEVType())) {
|
||||
case scConstant:
|
||||
return false;
|
||||
case scAddRecExpr:
|
||||
case scTruncate:
|
||||
case scZeroExtend:
|
||||
case scSignExtend:
|
||||
case scAddExpr:
|
||||
case scMulExpr:
|
||||
case scUMaxExpr:
|
||||
case scSMaxExpr:
|
||||
case scUDivExpr:
|
||||
return true;
|
||||
case scUnknown:
|
||||
return checkSCEVUnknown(cast<SCEVUnknown>(S));
|
||||
case scCouldNotCompute:
|
||||
llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!");
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
bool isDone() { return Found; }
|
||||
};
|
||||
|
||||
FindDominatedSCEVUnknown FSU(L, DT, LI);
|
||||
SCEVTraversal<FindDominatedSCEVUnknown> ST(FSU);
|
||||
ST.visitAll(S);
|
||||
return !FSU.Found;
|
||||
}
|
||||
|
||||
/// Get a canonical add expression, or something simpler if possible.
|
||||
const SCEV *ScalarEvolution::getAddExpr(SmallVectorImpl<const SCEV *> &Ops,
|
||||
SCEV::NoWrapFlags Flags,
|
||||
@ -2459,7 +2516,7 @@ const SCEV *ScalarEvolution::getAddExpr(SmallVectorImpl<const SCEV *> &Ops,
|
||||
const SCEVAddRecExpr *AddRec = cast<SCEVAddRecExpr>(Ops[Idx]);
|
||||
const Loop *AddRecLoop = AddRec->getLoop();
|
||||
for (unsigned i = 0, e = Ops.size(); i != e; ++i)
|
||||
if (isLoopInvariant(Ops[i], AddRecLoop)) {
|
||||
if (isAvailableAtLoopEntry(Ops[i], AddRecLoop, DT, LI)) {
|
||||
LIOps.push_back(Ops[i]);
|
||||
Ops.erase(Ops.begin()+i);
|
||||
--i; --e;
|
||||
@ -2734,7 +2791,7 @@ const SCEV *ScalarEvolution::getMulExpr(SmallVectorImpl<const SCEV *> &Ops,
|
||||
const SCEVAddRecExpr *AddRec = cast<SCEVAddRecExpr>(Ops[Idx]);
|
||||
const Loop *AddRecLoop = AddRec->getLoop();
|
||||
for (unsigned i = 0, e = Ops.size(); i != e; ++i)
|
||||
if (isLoopInvariant(Ops[i], AddRecLoop)) {
|
||||
if (isAvailableAtLoopEntry(Ops[i], AddRecLoop, DT, LI)) {
|
||||
LIOps.push_back(Ops[i]);
|
||||
Ops.erase(Ops.begin()+i);
|
||||
--i; --e;
|
||||
@ -4648,10 +4705,7 @@ uint32_t ScalarEvolution::GetMinTrailingZerosImpl(const SCEV *S) {
|
||||
|
||||
if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) {
|
||||
// For a SCEVUnknown, ask ValueTracking.
|
||||
unsigned BitWidth = getTypeSizeInBits(U->getType());
|
||||
KnownBits Known(BitWidth);
|
||||
computeKnownBits(U->getValue(), Known, getDataLayout(), 0, &AC,
|
||||
nullptr, &DT);
|
||||
KnownBits Known = computeKnownBits(U->getValue(), getDataLayout(), 0, &AC, nullptr, &DT);
|
||||
return Known.countMinTrailingZeros();
|
||||
}
|
||||
|
||||
@ -4831,8 +4885,7 @@ ScalarEvolution::getRange(const SCEV *S,
|
||||
const DataLayout &DL = getDataLayout();
|
||||
if (SignHint == ScalarEvolution::HINT_RANGE_UNSIGNED) {
|
||||
// For a SCEVUnknown, ask ValueTracking.
|
||||
KnownBits Known(BitWidth);
|
||||
computeKnownBits(U->getValue(), Known, DL, 0, &AC, nullptr, &DT);
|
||||
KnownBits Known = computeKnownBits(U->getValue(), DL, 0, &AC, nullptr, &DT);
|
||||
if (Known.One != ~Known.Zero + 1)
|
||||
ConservativeResult =
|
||||
ConservativeResult.intersectWith(ConstantRange(Known.One,
|
||||
@ -9537,8 +9590,11 @@ struct SCEVCollectAddRecMultiplies {
|
||||
bool HasAddRec = false;
|
||||
SmallVector<const SCEV *, 0> Operands;
|
||||
for (auto Op : Mul->operands()) {
|
||||
if (isa<SCEVUnknown>(Op)) {
|
||||
const SCEVUnknown *Unknown = dyn_cast<SCEVUnknown>(Op);
|
||||
if (Unknown && !isa<CallInst>(Unknown->getValue())) {
|
||||
Operands.push_back(Op);
|
||||
} else if (Unknown) {
|
||||
HasAddRec = true;
|
||||
} else {
|
||||
bool ContainsAddRec;
|
||||
SCEVHasAddRec ContiansAddRec(ContainsAddRec);
|
||||
|
@ -1305,12 +1305,17 @@ Value *SCEVExpander::expandAddRecExprLiterally(const SCEVAddRecExpr *S) {
|
||||
// Expand the core addrec. If we need post-loop scaling, force it to
|
||||
// expand to an integer type to avoid the need for additional casting.
|
||||
Type *ExpandTy = PostLoopScale ? IntTy : STy;
|
||||
// We can't use a pointer type for the addrec if the pointer type is
|
||||
// non-integral.
|
||||
Type *AddRecPHIExpandTy =
|
||||
DL.isNonIntegralPointerType(STy) ? Normalized->getType() : ExpandTy;
|
||||
|
||||
// In some cases, we decide to reuse an existing phi node but need to truncate
|
||||
// it and/or invert the step.
|
||||
Type *TruncTy = nullptr;
|
||||
bool InvertStep = false;
|
||||
PHINode *PN = getAddRecExprPHILiterally(Normalized, L, ExpandTy, IntTy,
|
||||
TruncTy, InvertStep);
|
||||
PHINode *PN = getAddRecExprPHILiterally(Normalized, L, AddRecPHIExpandTy,
|
||||
IntTy, TruncTy, InvertStep);
|
||||
|
||||
// Accommodate post-inc mode, if necessary.
|
||||
Value *Result;
|
||||
@ -1383,8 +1388,15 @@ Value *SCEVExpander::expandAddRecExprLiterally(const SCEVAddRecExpr *S) {
|
||||
// Re-apply any non-loop-dominating offset.
|
||||
if (PostLoopOffset) {
|
||||
if (PointerType *PTy = dyn_cast<PointerType>(ExpandTy)) {
|
||||
const SCEV *const OffsetArray[1] = { PostLoopOffset };
|
||||
Result = expandAddToGEP(OffsetArray, OffsetArray+1, PTy, IntTy, Result);
|
||||
if (Result->getType()->isIntegerTy()) {
|
||||
Value *Base = expandCodeFor(PostLoopOffset, ExpandTy);
|
||||
const SCEV *const OffsetArray[1] = {SE.getUnknown(Result)};
|
||||
Result = expandAddToGEP(OffsetArray, OffsetArray + 1, PTy, IntTy, Base);
|
||||
} else {
|
||||
const SCEV *const OffsetArray[1] = {PostLoopOffset};
|
||||
Result =
|
||||
expandAddToGEP(OffsetArray, OffsetArray + 1, PTy, IntTy, Result);
|
||||
}
|
||||
} else {
|
||||
Result = InsertNoopCastOfTo(Result, IntTy);
|
||||
Result = Builder.CreateAdd(Result,
|
||||
|
@ -149,6 +149,10 @@ bool TargetTransformInfo::isLegalMaskedScatter(Type *DataType) const {
|
||||
return TTIImpl->isLegalMaskedGather(DataType);
|
||||
}
|
||||
|
||||
bool TargetTransformInfo::prefersVectorizedAddressing() const {
|
||||
return TTIImpl->prefersVectorizedAddressing();
|
||||
}
|
||||
|
||||
int TargetTransformInfo::getScalingFactorCost(Type *Ty, GlobalValue *BaseGV,
|
||||
int64_t BaseOffset,
|
||||
bool HasBaseReg,
|
||||
|
@ -149,8 +149,10 @@ static KnownBits computeKnownBits(const Value *V, unsigned Depth,
|
||||
KnownBits llvm::computeKnownBits(const Value *V, const DataLayout &DL,
|
||||
unsigned Depth, AssumptionCache *AC,
|
||||
const Instruction *CxtI,
|
||||
const DominatorTree *DT) {
|
||||
return ::computeKnownBits(V, Depth, Query(DL, AC, safeCxtI(V, CxtI), DT));
|
||||
const DominatorTree *DT,
|
||||
OptimizationRemarkEmitter *ORE) {
|
||||
return ::computeKnownBits(V, Depth,
|
||||
Query(DL, AC, safeCxtI(V, CxtI), DT, ORE));
|
||||
}
|
||||
|
||||
bool llvm::haveNoCommonBitsSet(const Value *LHS, const Value *RHS,
|
||||
|
@ -660,10 +660,12 @@ void ModuleBitcodeWriter::writeAttributeTable() {
|
||||
|
||||
SmallVector<uint64_t, 64> Record;
|
||||
for (unsigned i = 0, e = Attrs.size(); i != e; ++i) {
|
||||
const AttributeList &A = Attrs[i];
|
||||
for (unsigned i = 0, e = A.getNumSlots(); i != e; ++i)
|
||||
Record.push_back(
|
||||
VE.getAttributeGroupID({A.getSlotIndex(i), A.getSlotAttributes(i)}));
|
||||
AttributeList AL = Attrs[i];
|
||||
for (unsigned i = AL.index_begin(), e = AL.index_end(); i != e; ++i) {
|
||||
AttributeSet AS = AL.getAttributes(i);
|
||||
if (AS.hasAttributes())
|
||||
Record.push_back(VE.getAttributeGroupID({i, AS}));
|
||||
}
|
||||
|
||||
Stream.EmitRecord(bitc::PARAMATTR_CODE_ENTRY, Record);
|
||||
Record.clear();
|
||||
@ -3413,30 +3415,8 @@ void IndexBitcodeWriter::writeCombinedGlobalValueSummary() {
|
||||
|
||||
// Create value IDs for undefined references.
|
||||
forEachSummary([&](GVInfo I) {
|
||||
if (auto *VS = dyn_cast<GlobalVarSummary>(I.second)) {
|
||||
for (auto &RI : VS->refs())
|
||||
assignValueId(RI.getGUID());
|
||||
return;
|
||||
}
|
||||
|
||||
auto *FS = dyn_cast<FunctionSummary>(I.second);
|
||||
if (!FS)
|
||||
return;
|
||||
for (auto &RI : FS->refs())
|
||||
for (auto &RI : I.second->refs())
|
||||
assignValueId(RI.getGUID());
|
||||
|
||||
for (auto &EI : FS->calls()) {
|
||||
GlobalValue::GUID GUID = EI.first.getGUID();
|
||||
if (!hasValueId(GUID)) {
|
||||
// For SamplePGO, the indirect call targets for local functions will
|
||||
// have its original name annotated in profile. We try to find the
|
||||
// corresponding PGOFuncName as the GUID.
|
||||
GUID = Index.getGUIDFromOriginalID(GUID);
|
||||
if (GUID == 0 || !hasValueId(GUID))
|
||||
continue;
|
||||
}
|
||||
assignValueId(GUID);
|
||||
}
|
||||
});
|
||||
|
||||
for (const auto &GVI : valueIds()) {
|
||||
|
@ -902,8 +902,11 @@ void ValueEnumerator::EnumerateAttributes(AttributeList PAL) {
|
||||
}
|
||||
|
||||
// Do lookups for all attribute groups.
|
||||
for (unsigned i = 0, e = PAL.getNumSlots(); i != e; ++i) {
|
||||
IndexAndAttrSet Pair = {PAL.getSlotIndex(i), PAL.getSlotAttributes(i)};
|
||||
for (unsigned i = PAL.index_begin(), e = PAL.index_end(); i != e; ++i) {
|
||||
AttributeSet AS = PAL.getAttributes(i);
|
||||
if (!AS.hasAttributes())
|
||||
continue;
|
||||
IndexAndAttrSet Pair = {i, AS};
|
||||
unsigned &Entry = AttributeGroupMap[Pair];
|
||||
if (Entry == 0) {
|
||||
AttributeGroups.push_back(Pair);
|
||||
|
@ -628,12 +628,15 @@ void AsmPrinter::EmitDebugThreadLocal(const MCExpr *Value,
|
||||
/// EmitFunctionHeader - This method emits the header for the current
|
||||
/// function.
|
||||
void AsmPrinter::EmitFunctionHeader() {
|
||||
const Function *F = MF->getFunction();
|
||||
|
||||
if (isVerbose())
|
||||
OutStreamer->GetCommentOS() << "-- Begin function " << F->getName() << '\n';
|
||||
|
||||
// Print out constants referenced by the function
|
||||
EmitConstantPool();
|
||||
|
||||
// Print the 'header' of function.
|
||||
const Function *F = MF->getFunction();
|
||||
|
||||
OutStreamer->SwitchSection(getObjFileLowering().SectionForGlobal(F, TM));
|
||||
EmitVisibility(CurrentFnSym, F->getVisibility());
|
||||
|
||||
@ -1107,6 +1110,9 @@ void AsmPrinter::EmitFunctionBody() {
|
||||
HI.Handler->endFunction(MF);
|
||||
}
|
||||
|
||||
if (isVerbose())
|
||||
OutStreamer->GetCommentOS() << "-- End function\n";
|
||||
|
||||
OutStreamer->AddBlankLine();
|
||||
}
|
||||
|
||||
|
@ -1025,11 +1025,11 @@ void CodeViewDebug::beginFunctionImpl(const MachineFunction *MF) {
|
||||
bool EmptyPrologue = true;
|
||||
for (const auto &MBB : *MF) {
|
||||
for (const auto &MI : MBB) {
|
||||
if (!MI.isDebugValue() && !MI.getFlag(MachineInstr::FrameSetup) &&
|
||||
if (!MI.isMetaInstruction() && !MI.getFlag(MachineInstr::FrameSetup) &&
|
||||
MI.getDebugLoc()) {
|
||||
PrologEndLoc = MI.getDebugLoc();
|
||||
break;
|
||||
} else if (!MI.isDebugValue()) {
|
||||
} else if (!MI.isMetaInstruction()) {
|
||||
EmptyPrologue = false;
|
||||
}
|
||||
}
|
||||
@ -1562,7 +1562,7 @@ TypeIndex CodeViewDebug::lowerTypeEnum(const DICompositeType *Ty) {
|
||||
EnumeratorCount++;
|
||||
}
|
||||
}
|
||||
FTI = FLRB.end();
|
||||
FTI = FLRB.end(true);
|
||||
}
|
||||
|
||||
std::string FullName = getFullyQualifiedName(Ty);
|
||||
@ -1869,7 +1869,7 @@ CodeViewDebug::lowerRecordFieldList(const DICompositeType *Ty) {
|
||||
MemberCount++;
|
||||
}
|
||||
|
||||
TypeIndex FieldTI = FLBR.end();
|
||||
TypeIndex FieldTI = FLBR.end(true);
|
||||
return std::make_tuple(FieldTI, Info.VShapeTI, MemberCount,
|
||||
!Info.NestedClasses.empty());
|
||||
}
|
||||
|
@ -116,65 +116,17 @@ void DIEHash::addParentContext(const DIE &Parent) {
|
||||
|
||||
// Collect all of the attributes for a particular DIE in single structure.
|
||||
void DIEHash::collectAttributes(const DIE &Die, DIEAttrs &Attrs) {
|
||||
#define COLLECT_ATTR(NAME) \
|
||||
case dwarf::NAME: \
|
||||
Attrs.NAME = V; \
|
||||
break
|
||||
|
||||
for (const auto &V : Die.values()) {
|
||||
DEBUG(dbgs() << "Attribute: "
|
||||
<< dwarf::AttributeString(V.getAttribute())
|
||||
<< " added.\n");
|
||||
switch (V.getAttribute()) {
|
||||
COLLECT_ATTR(DW_AT_name);
|
||||
COLLECT_ATTR(DW_AT_accessibility);
|
||||
COLLECT_ATTR(DW_AT_address_class);
|
||||
COLLECT_ATTR(DW_AT_allocated);
|
||||
COLLECT_ATTR(DW_AT_artificial);
|
||||
COLLECT_ATTR(DW_AT_associated);
|
||||
COLLECT_ATTR(DW_AT_binary_scale);
|
||||
COLLECT_ATTR(DW_AT_bit_offset);
|
||||
COLLECT_ATTR(DW_AT_bit_size);
|
||||
COLLECT_ATTR(DW_AT_bit_stride);
|
||||
COLLECT_ATTR(DW_AT_byte_size);
|
||||
COLLECT_ATTR(DW_AT_byte_stride);
|
||||
COLLECT_ATTR(DW_AT_const_expr);
|
||||
COLLECT_ATTR(DW_AT_const_value);
|
||||
COLLECT_ATTR(DW_AT_containing_type);
|
||||
COLLECT_ATTR(DW_AT_count);
|
||||
COLLECT_ATTR(DW_AT_data_bit_offset);
|
||||
COLLECT_ATTR(DW_AT_data_location);
|
||||
COLLECT_ATTR(DW_AT_data_member_location);
|
||||
COLLECT_ATTR(DW_AT_decimal_scale);
|
||||
COLLECT_ATTR(DW_AT_decimal_sign);
|
||||
COLLECT_ATTR(DW_AT_default_value);
|
||||
COLLECT_ATTR(DW_AT_digit_count);
|
||||
COLLECT_ATTR(DW_AT_discr);
|
||||
COLLECT_ATTR(DW_AT_discr_list);
|
||||
COLLECT_ATTR(DW_AT_discr_value);
|
||||
COLLECT_ATTR(DW_AT_encoding);
|
||||
COLLECT_ATTR(DW_AT_enum_class);
|
||||
COLLECT_ATTR(DW_AT_endianity);
|
||||
COLLECT_ATTR(DW_AT_explicit);
|
||||
COLLECT_ATTR(DW_AT_is_optional);
|
||||
COLLECT_ATTR(DW_AT_location);
|
||||
COLLECT_ATTR(DW_AT_lower_bound);
|
||||
COLLECT_ATTR(DW_AT_mutable);
|
||||
COLLECT_ATTR(DW_AT_ordering);
|
||||
COLLECT_ATTR(DW_AT_picture_string);
|
||||
COLLECT_ATTR(DW_AT_prototyped);
|
||||
COLLECT_ATTR(DW_AT_small);
|
||||
COLLECT_ATTR(DW_AT_segment);
|
||||
COLLECT_ATTR(DW_AT_string_length);
|
||||
COLLECT_ATTR(DW_AT_threads_scaled);
|
||||
COLLECT_ATTR(DW_AT_upper_bound);
|
||||
COLLECT_ATTR(DW_AT_use_location);
|
||||
COLLECT_ATTR(DW_AT_use_UTF8);
|
||||
COLLECT_ATTR(DW_AT_variable_parameter);
|
||||
COLLECT_ATTR(DW_AT_virtuality);
|
||||
COLLECT_ATTR(DW_AT_visibility);
|
||||
COLLECT_ATTR(DW_AT_vtable_elem_location);
|
||||
COLLECT_ATTR(DW_AT_type);
|
||||
#define HANDLE_DIE_HASH_ATTR(NAME) \
|
||||
case dwarf::NAME: \
|
||||
Attrs.NAME = V; \
|
||||
break;
|
||||
#include "DIEHashAttributes.def"
|
||||
default:
|
||||
break;
|
||||
}
|
||||
@ -366,62 +318,12 @@ void DIEHash::hashAttribute(const DIEValue &Value, dwarf::Tag Tag) {
|
||||
// Go through the attributes from \param Attrs in the order specified in 7.27.4
|
||||
// and hash them.
|
||||
void DIEHash::hashAttributes(const DIEAttrs &Attrs, dwarf::Tag Tag) {
|
||||
#define ADD_ATTR(ATTR) \
|
||||
#define HANDLE_DIE_HASH_ATTR(NAME) \
|
||||
{ \
|
||||
if (ATTR) \
|
||||
hashAttribute(ATTR, Tag); \
|
||||
if (Attrs.NAME) \
|
||||
hashAttribute(Attrs.NAME, Tag); \
|
||||
}
|
||||
|
||||
ADD_ATTR(Attrs.DW_AT_name);
|
||||
ADD_ATTR(Attrs.DW_AT_accessibility);
|
||||
ADD_ATTR(Attrs.DW_AT_address_class);
|
||||
ADD_ATTR(Attrs.DW_AT_allocated);
|
||||
ADD_ATTR(Attrs.DW_AT_artificial);
|
||||
ADD_ATTR(Attrs.DW_AT_associated);
|
||||
ADD_ATTR(Attrs.DW_AT_binary_scale);
|
||||
ADD_ATTR(Attrs.DW_AT_bit_offset);
|
||||
ADD_ATTR(Attrs.DW_AT_bit_size);
|
||||
ADD_ATTR(Attrs.DW_AT_bit_stride);
|
||||
ADD_ATTR(Attrs.DW_AT_byte_size);
|
||||
ADD_ATTR(Attrs.DW_AT_byte_stride);
|
||||
ADD_ATTR(Attrs.DW_AT_const_expr);
|
||||
ADD_ATTR(Attrs.DW_AT_const_value);
|
||||
ADD_ATTR(Attrs.DW_AT_containing_type);
|
||||
ADD_ATTR(Attrs.DW_AT_count);
|
||||
ADD_ATTR(Attrs.DW_AT_data_bit_offset);
|
||||
ADD_ATTR(Attrs.DW_AT_data_location);
|
||||
ADD_ATTR(Attrs.DW_AT_data_member_location);
|
||||
ADD_ATTR(Attrs.DW_AT_decimal_scale);
|
||||
ADD_ATTR(Attrs.DW_AT_decimal_sign);
|
||||
ADD_ATTR(Attrs.DW_AT_default_value);
|
||||
ADD_ATTR(Attrs.DW_AT_digit_count);
|
||||
ADD_ATTR(Attrs.DW_AT_discr);
|
||||
ADD_ATTR(Attrs.DW_AT_discr_list);
|
||||
ADD_ATTR(Attrs.DW_AT_discr_value);
|
||||
ADD_ATTR(Attrs.DW_AT_encoding);
|
||||
ADD_ATTR(Attrs.DW_AT_enum_class);
|
||||
ADD_ATTR(Attrs.DW_AT_endianity);
|
||||
ADD_ATTR(Attrs.DW_AT_explicit);
|
||||
ADD_ATTR(Attrs.DW_AT_is_optional);
|
||||
ADD_ATTR(Attrs.DW_AT_location);
|
||||
ADD_ATTR(Attrs.DW_AT_lower_bound);
|
||||
ADD_ATTR(Attrs.DW_AT_mutable);
|
||||
ADD_ATTR(Attrs.DW_AT_ordering);
|
||||
ADD_ATTR(Attrs.DW_AT_picture_string);
|
||||
ADD_ATTR(Attrs.DW_AT_prototyped);
|
||||
ADD_ATTR(Attrs.DW_AT_small);
|
||||
ADD_ATTR(Attrs.DW_AT_segment);
|
||||
ADD_ATTR(Attrs.DW_AT_string_length);
|
||||
ADD_ATTR(Attrs.DW_AT_threads_scaled);
|
||||
ADD_ATTR(Attrs.DW_AT_upper_bound);
|
||||
ADD_ATTR(Attrs.DW_AT_use_location);
|
||||
ADD_ATTR(Attrs.DW_AT_use_UTF8);
|
||||
ADD_ATTR(Attrs.DW_AT_variable_parameter);
|
||||
ADD_ATTR(Attrs.DW_AT_virtuality);
|
||||
ADD_ATTR(Attrs.DW_AT_visibility);
|
||||
ADD_ATTR(Attrs.DW_AT_vtable_elem_location);
|
||||
ADD_ATTR(Attrs.DW_AT_type);
|
||||
|
||||
#include "DIEHashAttributes.def"
|
||||
// FIXME: Add the extended attributes.
|
||||
}
|
||||
|
||||
@ -478,10 +380,12 @@ void DIEHash::computeHash(const DIE &Die) {
|
||||
/// DWARF4 standard. It is an md5 hash of the flattened description of the DIE
|
||||
/// with the inclusion of the full CU and all top level CU entities.
|
||||
// TODO: Initialize the type chain at 0 instead of 1 for CU signatures.
|
||||
uint64_t DIEHash::computeCUSignature(const DIE &Die) {
|
||||
uint64_t DIEHash::computeCUSignature(StringRef DWOName, const DIE &Die) {
|
||||
Numbering.clear();
|
||||
Numbering[&Die] = 1;
|
||||
|
||||
if (!DWOName.empty())
|
||||
Hash.update(DWOName);
|
||||
// Hash the DIE.
|
||||
computeHash(Die);
|
||||
|
||||
|
@ -28,64 +28,15 @@ class CompileUnit;
|
||||
class DIEHash {
|
||||
// Collection of all attributes used in hashing a particular DIE.
|
||||
struct DIEAttrs {
|
||||
DIEValue DW_AT_name;
|
||||
DIEValue DW_AT_accessibility;
|
||||
DIEValue DW_AT_address_class;
|
||||
DIEValue DW_AT_allocated;
|
||||
DIEValue DW_AT_artificial;
|
||||
DIEValue DW_AT_associated;
|
||||
DIEValue DW_AT_binary_scale;
|
||||
DIEValue DW_AT_bit_offset;
|
||||
DIEValue DW_AT_bit_size;
|
||||
DIEValue DW_AT_bit_stride;
|
||||
DIEValue DW_AT_byte_size;
|
||||
DIEValue DW_AT_byte_stride;
|
||||
DIEValue DW_AT_const_expr;
|
||||
DIEValue DW_AT_const_value;
|
||||
DIEValue DW_AT_containing_type;
|
||||
DIEValue DW_AT_count;
|
||||
DIEValue DW_AT_data_bit_offset;
|
||||
DIEValue DW_AT_data_location;
|
||||
DIEValue DW_AT_data_member_location;
|
||||
DIEValue DW_AT_decimal_scale;
|
||||
DIEValue DW_AT_decimal_sign;
|
||||
DIEValue DW_AT_default_value;
|
||||
DIEValue DW_AT_digit_count;
|
||||
DIEValue DW_AT_discr;
|
||||
DIEValue DW_AT_discr_list;
|
||||
DIEValue DW_AT_discr_value;
|
||||
DIEValue DW_AT_encoding;
|
||||
DIEValue DW_AT_enum_class;
|
||||
DIEValue DW_AT_endianity;
|
||||
DIEValue DW_AT_explicit;
|
||||
DIEValue DW_AT_is_optional;
|
||||
DIEValue DW_AT_location;
|
||||
DIEValue DW_AT_lower_bound;
|
||||
DIEValue DW_AT_mutable;
|
||||
DIEValue DW_AT_ordering;
|
||||
DIEValue DW_AT_picture_string;
|
||||
DIEValue DW_AT_prototyped;
|
||||
DIEValue DW_AT_small;
|
||||
DIEValue DW_AT_segment;
|
||||
DIEValue DW_AT_string_length;
|
||||
DIEValue DW_AT_threads_scaled;
|
||||
DIEValue DW_AT_upper_bound;
|
||||
DIEValue DW_AT_use_location;
|
||||
DIEValue DW_AT_use_UTF8;
|
||||
DIEValue DW_AT_variable_parameter;
|
||||
DIEValue DW_AT_virtuality;
|
||||
DIEValue DW_AT_visibility;
|
||||
DIEValue DW_AT_vtable_elem_location;
|
||||
DIEValue DW_AT_type;
|
||||
|
||||
// Insert any additional ones here...
|
||||
#define HANDLE_DIE_HASH_ATTR(NAME) DIEValue NAME;
|
||||
#include "DIEHashAttributes.def"
|
||||
};
|
||||
|
||||
public:
|
||||
DIEHash(AsmPrinter *A = nullptr) : AP(A) {}
|
||||
|
||||
/// \brief Computes the CU signature.
|
||||
uint64_t computeCUSignature(const DIE &Die);
|
||||
uint64_t computeCUSignature(StringRef DWOName, const DIE &Die);
|
||||
|
||||
/// \brief Computes the type signature.
|
||||
uint64_t computeTypeSignature(const DIE &Die);
|
||||
|
55
lib/CodeGen/AsmPrinter/DIEHashAttributes.def
Normal file
55
lib/CodeGen/AsmPrinter/DIEHashAttributes.def
Normal file
@ -0,0 +1,55 @@
|
||||
#ifndef HANDLE_DIE_HASH_ATTR
|
||||
#error "Missing macro definition of HANDLE_DIE_HASH_ATTR"
|
||||
#endif
|
||||
|
||||
HANDLE_DIE_HASH_ATTR(DW_AT_name)
|
||||
HANDLE_DIE_HASH_ATTR(DW_AT_accessibility)
|
||||
HANDLE_DIE_HASH_ATTR(DW_AT_address_class)
|
||||
HANDLE_DIE_HASH_ATTR(DW_AT_allocated)
|
||||
HANDLE_DIE_HASH_ATTR(DW_AT_artificial)
|
||||
HANDLE_DIE_HASH_ATTR(DW_AT_associated)
|
||||
HANDLE_DIE_HASH_ATTR(DW_AT_binary_scale)
|
||||
HANDLE_DIE_HASH_ATTR(DW_AT_bit_offset)
|
||||
HANDLE_DIE_HASH_ATTR(DW_AT_bit_size)
|
||||
HANDLE_DIE_HASH_ATTR(DW_AT_bit_stride)
|
||||
HANDLE_DIE_HASH_ATTR(DW_AT_byte_size)
|
||||
HANDLE_DIE_HASH_ATTR(DW_AT_byte_stride)
|
||||
HANDLE_DIE_HASH_ATTR(DW_AT_const_expr)
|
||||
HANDLE_DIE_HASH_ATTR(DW_AT_const_value)
|
||||
HANDLE_DIE_HASH_ATTR(DW_AT_containing_type)
|
||||
HANDLE_DIE_HASH_ATTR(DW_AT_count)
|
||||
HANDLE_DIE_HASH_ATTR(DW_AT_data_bit_offset)
|
||||
HANDLE_DIE_HASH_ATTR(DW_AT_data_location)
|
||||
HANDLE_DIE_HASH_ATTR(DW_AT_data_member_location)
|
||||
HANDLE_DIE_HASH_ATTR(DW_AT_decimal_scale)
|
||||
HANDLE_DIE_HASH_ATTR(DW_AT_decimal_sign)
|
||||
HANDLE_DIE_HASH_ATTR(DW_AT_default_value)
|
||||
HANDLE_DIE_HASH_ATTR(DW_AT_digit_count)
|
||||
HANDLE_DIE_HASH_ATTR(DW_AT_discr)
|
||||
HANDLE_DIE_HASH_ATTR(DW_AT_discr_list)
|
||||
HANDLE_DIE_HASH_ATTR(DW_AT_discr_value)
|
||||
HANDLE_DIE_HASH_ATTR(DW_AT_encoding)
|
||||
HANDLE_DIE_HASH_ATTR(DW_AT_enum_class)
|
||||
HANDLE_DIE_HASH_ATTR(DW_AT_endianity)
|
||||
HANDLE_DIE_HASH_ATTR(DW_AT_explicit)
|
||||
HANDLE_DIE_HASH_ATTR(DW_AT_is_optional)
|
||||
HANDLE_DIE_HASH_ATTR(DW_AT_location)
|
||||
HANDLE_DIE_HASH_ATTR(DW_AT_lower_bound)
|
||||
HANDLE_DIE_HASH_ATTR(DW_AT_mutable)
|
||||
HANDLE_DIE_HASH_ATTR(DW_AT_ordering)
|
||||
HANDLE_DIE_HASH_ATTR(DW_AT_picture_string)
|
||||
HANDLE_DIE_HASH_ATTR(DW_AT_prototyped)
|
||||
HANDLE_DIE_HASH_ATTR(DW_AT_small)
|
||||
HANDLE_DIE_HASH_ATTR(DW_AT_segment)
|
||||
HANDLE_DIE_HASH_ATTR(DW_AT_string_length)
|
||||
HANDLE_DIE_HASH_ATTR(DW_AT_threads_scaled)
|
||||
HANDLE_DIE_HASH_ATTR(DW_AT_upper_bound)
|
||||
HANDLE_DIE_HASH_ATTR(DW_AT_use_location)
|
||||
HANDLE_DIE_HASH_ATTR(DW_AT_use_UTF8)
|
||||
HANDLE_DIE_HASH_ATTR(DW_AT_variable_parameter)
|
||||
HANDLE_DIE_HASH_ATTR(DW_AT_virtuality)
|
||||
HANDLE_DIE_HASH_ATTR(DW_AT_visibility)
|
||||
HANDLE_DIE_HASH_ATTR(DW_AT_vtable_elem_location)
|
||||
HANDLE_DIE_HASH_ATTR(DW_AT_type)
|
||||
|
||||
#undef HANDLE_DIE_HASH_ATTR
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user