Vendor import of llvm trunk r178860:

http://llvm.org/svn/llvm-project/llvm/trunk@178860
This commit is contained in:
Dimitry Andric 2013-04-08 18:41:23 +00:00
parent 482e7bddf6
commit 4a16efa3e4
4058 changed files with 367586 additions and 151868 deletions

4
.arcconfig Normal file
View File

@ -0,0 +1,4 @@
{
"project_id" : "llvm",
"conduit_uri" : "http://llvm-reviews.chandlerc.com/"
}

View File

@ -11,7 +11,7 @@ set(CMAKE_MODULE_PATH
)
set(LLVM_VERSION_MAJOR 3)
set(LLVM_VERSION_MINOR 2)
set(LLVM_VERSION_MINOR 3)
set(PACKAGE_VERSION "${LLVM_VERSION_MAJOR}.${LLVM_VERSION_MINOR}svn")
@ -74,8 +74,8 @@ set(LLVM_EXAMPLES_BINARY_DIR ${LLVM_BINARY_DIR}/examples)
set(LLVM_LIBDIR_SUFFIX "" CACHE STRING "Define suffix of library directory name (32/64)" )
set(LLVM_ALL_TARGETS
AArch64
ARM
CellSPU
CppBackend
Hexagon
Mips
@ -186,13 +186,16 @@ endif( LLVM_USE_INTEL_JITEVENTS )
option(LLVM_USE_OPROFILE
"Use opagent JIT interface to inform OProfile about JIT code" OFF)
# If enabled, ierify we are on a platform that supports oprofile.
# If enabled, verify we are on a platform that supports oprofile.
if( LLVM_USE_OPROFILE )
if( NOT CMAKE_SYSTEM_NAME MATCHES "Linux" )
message(FATAL_ERROR "OProfile support is available on Linux only.")
endif( NOT CMAKE_SYSTEM_NAME MATCHES "Linux" )
endif( LLVM_USE_OPROFILE )
set(LLVM_USE_SANITIZER "" CACHE STRING
"Define the sanitizer used to build binaries and tests.")
# Define an option controlling whether we should build for 32-bit on 64-bit
# platforms, where supported.
if( CMAKE_SIZEOF_VOID_P EQUAL 8 AND NOT WIN32 )
@ -243,8 +246,7 @@ include(config-ix)
# invocation time.
set(LLVM_DEFAULT_TARGET_TRIPLE "${LLVM_HOST_TRIPLE}" CACHE STRING
"Default target for which LLVM will generate code." )
set(TARGET_TRIPLE "${LLVM_DEFAULT_TARGET_TRIPLE}" CACHE STRING
"Default target for which LLVM will generate code." )
set(TARGET_TRIPLE "${LLVM_DEFAULT_TARGET_TRIPLE}")
include(HandleLLVMOptions)
@ -377,10 +379,21 @@ set(CMAKE_INCLUDE_CURRENT_DIR ON)
include_directories( ${LLVM_BINARY_DIR}/include ${LLVM_MAIN_INCLUDE_DIR})
if( ${CMAKE_SYSTEM_NAME} MATCHES FreeBSD )
# On FreeBSD, /usr/local/* is not used by default. In order to build LLVM
# with libxml2, iconv.h, etc., we must add /usr/local paths.
include_directories("/usr/local/include")
link_directories("/usr/local/lib")
endif( ${CMAKE_SYSTEM_NAME} MATCHES FreeBSD )
if( ${CMAKE_SYSTEM_NAME} MATCHES SunOS )
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -include llvm/Support/Solaris.h")
endif( ${CMAKE_SYSTEM_NAME} MATCHES SunOS )
# Make sure we don't get -rdynamic in every binary. For those that need it,
# use set_target_properties(target PROPERTIES ENABLE_EXPORTS 1)
set(CMAKE_SHARED_LIBRARY_LINK_CXX_FLAGS "")
include(AddLLVM)
include(TableGen)
@ -406,7 +419,6 @@ add_subdirectory(utils/count)
add_subdirectory(utils/not)
add_subdirectory(utils/llvm-lit)
add_subdirectory(utils/yaml-bench)
add_subdirectory(utils/obj2yaml)
add_subdirectory(utils/yaml2obj)
add_subdirectory(projects)
@ -428,7 +440,7 @@ if( LLVM_INCLUDE_TESTS )
add_subdirectory(utils/unittest)
add_subdirectory(unittests)
if (MSVC)
# This utility is used to prevent chrashing tests from calling Dr. Watson on
# This utility is used to prevent crashing tests from calling Dr. Watson on
# Windows.
add_subdirectory(utils/KillTheDoctor)
endif()

View File

@ -8,9 +8,30 @@ beautification by scripts. The fields are: name (N), email (E), web-address
(W), PGP key ID and fingerprint (P), description (D), and snail-mail address
(S).
N: Joe Abbey
E: jabbey@arxan.com
D: LLVM Bitcode (lib/Bitcode/* include/llvm/Bitcode/*)
N: Owen Anderson
E: resistor@mac.com
D: SelectionDAG (lib/CodeGen/SelectionDAG/*)
N: Rafael Avila de Espindola
E: rafael.espindola@gmail.com
D: Gold plugin (tools/gold/*)
N: Chandler Carruth
E: chandlerc@gmail.com
E: chandlerc@google.com
D: Config, ADT, Support, inlining & related passse, SROA/mem2reg & related passes, CMake, library layering
N: Evan Cheng
E: evan.cheng@apple.com
D: Code generator and all targets
D: ARM target, parts of code generator not covered by someone else
N: Eric Christopher
E: echristo@gmail.com
D: Debug Information, autotools/configure/make build, inline assembly
N: Greg Clayton
D: LLDB
@ -18,34 +39,93 @@ D: LLDB
N: Peter Collingbourne
D: libclc
N: Doug Gregor
D: Clang Frontend Libraries
N: Anshuman Dasgupta
E: adasgupt@codeaurora.org
D: Hexagon Backend
N: Hal Finkel
E: hfinkel@anl.gov
D: BBVectorize and the PowerPC target
N: Venkatraman Govindaraju
E: venkatra@cs.wisc.edu
D: Sparc Backend (lib/Target/Sparc/*)
N: Tobias Grosser
D: Polly
N: James Grosbach
E: grosbach@apple.com
D: MC layer
N: Howard Hinnant
D: libc++
N: Anton Korobeynikov
E: asl@math.spbu.ru
D: Exception handling, debug information, and Windows codegen
N: Justin Holewinski
E: jholewinski@nvidia.com
D: NVPTX Target (lib/Target/NVPTX/*)
N: Ted Kremenek
D: Clang Static Analyzer
N: Andy Kaylor
E: andrew.kaylor@intel.com
D: MCJIT, RuntimeDyld and JIT event listeners
N: Galina Kistanova
E: gkistanova@gmail.com
D: LLVM Buildbot
N: Anton Korobeynikov
E: anton@korobeynikov.info
D: Exception handling, Windows codegen, ARM EABI
N: Benjamin Kramer
E: benny.kra@gmail.com
D: DWARF Parser
N: Sergei Larin
E: slarin@codeaurora.org
D: VLIW Instruction Scheduling, Packetization
N: Chris Lattner
E: sabre@nondot.org
W: http://nondot.org/~sabre/
D: Everything not covered by someone else
N: John McCall
E: rjmccall@apple.com
D: Clang LLVM IR generation
N: Tim Northover
E: Tim.Northover@arm.com
D: AArch64 backend
N: Jakob Olesen
D: Register allocators and TableGen
N: Richard Osborne
E: richard@xmos.com
D: XCore Backend
N: Chad Rosier
E: mcrosier@apple.com
D: Fast-Isel
N: Nadav Rotem
E: nrotem@apple.com
D: X86 Backend, Loop Vectorizer
N: Duncan Sands
E: baldrick@free.fr
D: DragonEgg
N: Michael Spencer
E: bigcheesegs@gmail.com
D: Windows parts of Support, Object, ar, nm, objdump, ranlib, size
N: Tom Stellard
E: thomas.stellard@amd.com
E: mesa-dev@lists.freedesktop.org
D: R600 Backend
N: Andrew Trick
E: atrick@apple.com
D: IndVar Simplify, Loop Strength Reduction, Instruction Scheduling
N: Bill Wendling
E: wendling@apple.com
D: libLTO & IR Linker

View File

@ -60,9 +60,11 @@ D: Loop unrolling with run-time trip counts.
N: Chandler Carruth
E: chandlerc@gmail.com
E: chandlerc@google.com
D: Hashing algorithms and interfaces
D: Inline cost analysis
D: Machine block placement pass
D: SROA
N: Casey Carter
E: ccarter@uiuc.edu
@ -98,7 +100,7 @@ E: adasgupt@codeaurora.org
D: Deterministic finite automaton based infrastructure for VLIW packetization
N: Stefanus Du Toit
E: stefanus.dutoit@rapidmind.com
E: stefanus.du.toit@intel.com
D: Bug fixes and minor improvements
N: Rafael Avila de Espindola
@ -141,7 +143,7 @@ E: foldr@codedgers.com
D: Author of llvmc2
N: Dan Gohman
E: gohman@apple.com
E: dan433584@gmail.com
D: Miscellaneous bug fixes
N: David Goodwin
@ -361,8 +363,8 @@ D: ARM fast-isel improvements
D: Performance monitoring
N: Nadav Rotem
E: nadav.rotem@intel.com
D: Vector code generation improvements.
E: nrotem@apple.com
D: X86 code generation improvements, Loop Vectorizer.
N: Roman Samoilov
E: roman@codedgers.com
@ -402,6 +404,10 @@ E: rspencer@reidspencer.com
W: http://reidspencer.com/
D: Lots of stuff, see: http://wiki.llvm.org/index.php/User:Reid
N: Craig Topper
E: craig.topper@gmail.com
D: X86 codegen and disassembler improvements. AVX2 support.
N: Edwin Torok
E: edwintorok@gmail.com
D: Miscellaneous bug fixes
@ -417,7 +423,6 @@ D: Thread Local Storage implementation
N: Bill Wendling
E: wendling@apple.com
D: Exception handling
D: Bunches of stuff
N: Bob Wilson

View File

@ -4,7 +4,7 @@ LLVM Release License
University of Illinois/NCSA
Open Source License
Copyright (c) 2003-2012 University of Illinois at Urbana-Champaign.
Copyright (c) 2003-2013 University of Illinois at Urbana-Champaign.
All rights reserved.
Developed by:
@ -64,7 +64,7 @@ Program Directory
Autoconf llvm/autoconf
llvm/projects/ModuleMaker/autoconf
llvm/projects/sample/autoconf
CellSPU backend llvm/lib/Target/CellSPU/README.txt
Google Test llvm/utils/unittest/googletest
OpenBSD regex llvm/lib/Support/{reg*, COPYRIGHT.regex}
pyyaml tests llvm/test/YAMLParser/{*.data, LICENSE.TXT}
ARM contributions llvm/lib/Target/ARM/LICENSE.TXT

View File

@ -11,8 +11,8 @@ LEVEL := .
# Top-Level LLVM Build Stages:
# 1. Build lib/Support and lib/TableGen, which are used by utils (tblgen).
# 2. Build utils, which is used by VMCore.
# 3. Build VMCore, which builds the Intrinsics.inc file used by libs.
# 2. Build utils, which is used by IR.
# 3. Build IR, which builds the Intrinsics.inc file used by libs.
# 4. Build libs, which are needed by llvm-config.
# 5. Build llvm-config, which determines inter-lib dependencies for tools.
# 6. Build tools, runtime, docs.
@ -30,7 +30,7 @@ ifeq ($(BUILD_DIRS_ONLY),1)
DIRS := lib/Support lib/TableGen utils tools/llvm-config
OPTIONAL_DIRS := tools/clang/utils/TableGen
else
DIRS := lib/Support lib/TableGen utils lib/VMCore lib tools/llvm-shlib \
DIRS := lib/Support lib/TableGen utils lib/IR lib tools/llvm-shlib \
tools/llvm-config tools runtime docs unittests
OPTIONAL_DIRS := projects bindings
endif
@ -248,13 +248,26 @@ build-for-llvm-top:
SVN = svn
SVN-UPDATE-OPTIONS =
AWK = awk
SUB-SVN-DIRS = $(AWK) '/I|\? / {print $$2}' \
| LC_ALL=C xargs $(SVN) info 2>/dev/null \
| $(AWK) '/^Path:\ / {print $$2}'
# Multiline variable defining a recursive function for finding svn repos rooted at
# a given path. svnup() requires one argument: the root to search from.
define SUB_SVN_DIRS
svnup() {
dirs=`svn status --no-ignore $$1 | awk '/I|\? / {print $$2}' | LC_ALL=C xargs svn info 2>/dev/null | awk '/^Path:\ / {print $$2}'`;
if [ "$$dirs" = "" ]; then
return;
fi;
for f in $$dirs; do
echo $$f;
svnup $$f;
done
}
endef
export SUB_SVN_DIRS
update:
$(SVN) $(SVN-UPDATE-OPTIONS) update $(LLVM_SRC_ROOT)
@ $(SVN) status --no-ignore $(LLVM_SRC_ROOT) | $(SUB-SVN-DIRS) | xargs $(SVN) $(SVN-UPDATE-OPTIONS) update
@eval $$SUB_SVN_DIRS; $(SVN) status --no-ignore $(LLVM_SRC_ROOT) | svnup $(LLVM_SRC_ROOT) | xargs $(SVN) $(SVN-UPDATE-OPTIONS) update
happiness: update all check-all

View File

@ -28,7 +28,7 @@
# built in any order. All DIRS are built in order before PARALLEL_DIRS are
# built, which are then built in any order.
#
# 4. Source - If specified, this sets the source code filenames. If this
# 4. SOURCES - If specified, this sets the source code filenames. If this
# is not set, it defaults to be all of the .cpp, .c, .y, and .l files
# in the current directory.
#

View File

@ -134,6 +134,9 @@ BUILD_CXX=@BUILD_CXX@
# Triple for configuring build tools when cross-compiling
BUILD_TRIPLE=@build@
# Target triple (cpu-vendor-os) which LLVM is compiled for
HOST_TRIPLE=@host@
# Target triple (cpu-vendor-os) for which we should generate code
TARGET_TRIPLE=@target@
@ -153,8 +156,17 @@ CXX = @CXX@
# Path to the CC binary, which use used by testcases for native builds.
CC := @CC@
# C/C++ preprocessor flags.
CPPFLAGS += @CPPFLAGS@
# C compiler flags.
CFLAGS += @CFLAGS@
# C++ compiler flags.
CXXFLAGS += @CXXFLAGS@
# Linker flags.
LDFLAGS+=@LDFLAGS@
LDFLAGS += @LDFLAGS@
# Path to the library archiver program.
AR_PATH = @AR@
@ -176,6 +188,7 @@ RANLIB := @RANLIB@
RM := @RM@
SED := @SED@
TAR := @TAR@
PYTHON := @PYTHON@
# Paths to miscellaneous programs we hope are present but might not be
BZIP2 := @BZIP2@
@ -222,6 +235,15 @@ ENABLE_LIBCPP = @ENABLE_LIBCPP@
# When ENABLE_CXX11 is enabled, LLVM uses c++11 mode by default to build.
ENABLE_CXX11 = @ENABLE_CXX11@
# When ENABLE_CLANG_ARCMT is enabled, clang will have ARCMigrationTool.
ENABLE_CLANG_ARCMT = @ENABLE_CLANG_ARCMT@
# When ENABLE_CLANG_REWRITER is enabled, clang will have Rewriter.
ENABLE_CLANG_REWRITER = @ENABLE_CLANG_REWRITER@
# When ENABLE_CLANG_STATIC_ANALYZER is enabled, clang will have StaticAnalyzer.
ENABLE_CLANG_STATIC_ANALYZER = @ENABLE_CLANG_STATIC_ANALYZER@
# When ENABLE_WERROR is enabled, we'll pass -Werror on the command line
ENABLE_WERROR = @ENABLE_WERROR@
@ -278,7 +300,7 @@ ENABLE_DOCS = @ENABLE_DOCS@
ENABLE_DOXYGEN = @ENABLE_DOXYGEN@
# Do we want to enable threads?
ENABLE_THREADS := @ENABLE_THREADS@
ENABLE_THREADS := @LLVM_ENABLE_THREADS@
# Do we want to build with position independent code?
ENABLE_PIC := @ENABLE_PIC@
@ -349,6 +371,10 @@ NO_MISSING_FIELD_INITIALIZERS = @NO_MISSING_FIELD_INITIALIZERS@
NO_VARIADIC_MACROS = @NO_VARIADIC_MACROS@
# -Wcovered-switch-default
COVERED_SWITCH_DEFAULT = @COVERED_SWITCH_DEFAULT@
# -Wno-uninitialized
NO_UNINITIALIZED = @NO_UNINITIALIZED@
# -Wno-maybe-uninitialized
NO_MAYBE_UNINITIALIZED = @NO_MAYBE_UNINITIALIZED@
# Was polly found in tools/polly?
LLVM_HAS_POLLY = @LLVM_HAS_POLLY@

View File

@ -97,7 +97,7 @@ endif
$(LLVMBuildMakeFrag): $(PROJ_SRC_ROOT)/Makefile.rules \
$(PROJ_OBJ_ROOT)/Makefile.config
$(Echo) Constructing LLVMBuild project information.
$(Verb) $(LLVMBuildTool) \
$(Verb)$(PYTHON) $(LLVMBuildTool) \
--native-target "$(TARGET_NATIVE_ARCH)" \
--enable-targets "$(TARGETS_TO_BUILD)" \
--enable-optional-components "$(OPTIONAL_COMPONENTS)" \
@ -280,12 +280,6 @@ ifeq ($(ENABLE_OPTIMIZED),1)
endif
endif
# Darwin requires -fstrict-aliasing to be explicitly enabled.
# Avoid -fstrict-aliasing on Darwin for now, there are unresolved issues
# with -fstrict-aliasing and ipa-type-escape radr://6756684
#ifeq ($(HOST_OS),Darwin)
# EXTRA_OPTIONS += -fstrict-aliasing -Wstrict-aliasing
#endif
CXX.Flags += $(OPTIMIZE_OPTION) $(OmitFramePointer)
C.Flags += $(OPTIMIZE_OPTION) $(OmitFramePointer)
LD.Flags += $(OPTIMIZE_OPTION)
@ -583,16 +577,24 @@ ifeq ($(HOST_OS),Darwin)
LoadableModuleOptions := -Wl,-flat_namespace -Wl,-undefined,suppress
SharedLinkOptions := -dynamiclib
ifneq ($(ARCH),ARM)
SharedLinkOptions += -mmacosx-version-min=$(DARWIN_VERSION)
ifdef DEPLOYMENT_TARGET
SharedLinkOptions += $(DEPLOYMENT_TARGET)
else
ifneq ($(ARCH),ARM)
SharedLinkOptions += -mmacosx-version-min=$(DARWIN_VERSION)
endif
endif
else
SharedLinkOptions=-shared
endif
ifeq ($(TARGET_OS),Darwin)
ifneq ($(ARCH),ARM)
TargetCommonOpts += -mmacosx-version-min=$(DARWIN_VERSION)
ifdef DEPLOYMENT_TARGET
TargetCommonOpts += $(DEPLOYMENT_TARGET)
else
ifneq ($(ARCH),ARM)
TargetCommonOpts += -mmacosx-version-min=$(DARWIN_VERSION)
endif
endif
endif
@ -648,7 +650,7 @@ else
ifneq ($(DARWIN_MAJVERS),4)
LD.Flags += $(RPATH) -Wl,@executable_path/../lib
endif
ifeq ($(RC_BUILDIT),YES)
ifeq ($(RC_XBS),YES)
TempFile := $(shell mkdir -p ${OBJROOT}/dSYMs ; mktemp ${OBJROOT}/dSYMs/llvm-lto.XXXXXX)
LD.Flags += -Wl,-object_path_lto -Wl,$(TempFile)
endif
@ -668,7 +670,9 @@ ifndef NO_PEDANTIC
CompileCommonOpts += -pedantic -Wno-long-long
endif
CompileCommonOpts += -Wall -W -Wno-unused-parameter -Wwrite-strings \
$(EXTRA_OPTIONS) $(COVERED_SWITCH_DEFAULT)
$(EXTRA_OPTIONS) $(COVERED_SWITCH_DEFAULT) \
$(NO_UNINITIALIZED) $(NO_MAYBE_UNINITIALIZED) \
$(NO_MISSING_FIELD_INITIALIZERS)
# Enable cast-qual for C++; the workaround is to use const_cast.
CXX.Flags += -Wcast-qual
@ -824,7 +828,7 @@ ObjectsBC := $(BaseNameSources:%=$(ObjDir)/%.bc)
#----------------------------------------------------------
ifeq (-mingw32,$(findstring -mingw32,$(BUILD_TRIPLE)))
ECHOPATH := $(Verb)python -u -c "import sys;print ' '.join(sys.argv[1:])"
ECHOPATH := $(Verb)$(PYTHON) -u -c "import sys;print ' '.join(sys.argv[1:])"
else
ECHOPATH := $(Verb)$(ECHO)
endif
@ -1814,7 +1818,7 @@ TDFiles := $(strip $(wildcard $(PROJ_SRC_DIR)/*.td) \
$(LLVM_SRC_ROOT)/include/llvm/Target/TargetSchedule.td \
$(LLVM_SRC_ROOT)/include/llvm/Target/TargetSelectionDAG.td \
$(LLVM_SRC_ROOT)/include/llvm/CodeGen/ValueTypes.td) \
$(wildcard $(LLVM_SRC_ROOT)/include/llvm/Intrinsics*.td)
$(wildcard $(LLVM_SRC_ROOT)/include/llvm/IR/Intrinsics*.td)
# All .inc.tmp files depend on the .td files.
$(INCTMPFiles) : $(TDFiles)
@ -1869,11 +1873,6 @@ $(ObjDir)/%GenDisassemblerTables.inc.tmp : %.td $(ObjDir)/.dir $(LLVM_TBLGEN)
$(Echo) "Building $(<F) disassembly tables with tblgen"
$(Verb) $(LLVMTableGen) -gen-disassembler -o $(call SYSPATH, $@) $<
$(TARGET:%=$(ObjDir)/%GenEDInfo.inc.tmp): \
$(ObjDir)/%GenEDInfo.inc.tmp : %.td $(ObjDir)/.dir $(LLVM_TBLGEN)
$(Echo) "Building $(<F) enhanced disassembly information with tblgen"
$(Verb) $(LLVMTableGen) -gen-enhanced-disassembly-info -o $(call SYSPATH, $@) $<
$(TARGET:%=$(ObjDir)/%GenFastISel.inc.tmp): \
$(ObjDir)/%GenFastISel.inc.tmp : %.td $(ObjDir)/.dir $(LLVM_TBLGEN)
$(Echo) "Building $(<F) \"fast\" instruction selector implementation with tblgen"

View File

@ -8,10 +8,10 @@ optimizers, and runtime environments.
LLVM is open source software. You may freely distribute it under the terms of
the license agreement found in LICENSE.txt.
Please see the HTML documentation provided in docs/index.html for further
assistance with LLVM.
Please see the documentation provided in docs/ for further
assistance with LLVM, and in particular docs/GettingStarted.rst for getting
started with LLVM and docs/README.txt for an overview of LLVM's
documentation setup.
If you're writing a package for LLVM, see docs/Packaging.html for our
If you're writing a package for LLVM, see docs/Packaging.rst for our
suggestions.

View File

@ -13,7 +13,7 @@ clean() {
### These variables specify the tool versions we want to use.
### Periods should be escaped with backslash for use by grep.
###
### If you update these, please also update docs/GettingStarted.html
### If you update these, please also update docs/GettingStarted.rst
want_autoconf_version='2\.60'
want_autoheader_version=$want_autoconf_version
want_aclocal_version='1\.9\.6'

4
autoconf/config.sub vendored
View File

@ -251,7 +251,8 @@ case $basic_machine in
| alpha64 | alpha64ev[4-8] | alpha64ev56 | alpha64ev6[78] | alpha64pca5[67] \
| am33_2.0 \
| arc | arm | arm[bl]e | arme[lb] | armv[2345] | armv[345][lb] | avr | avr32 \
| be32 | be64 \
| aarch64 \
| be32 | be64 \
| bfin \
| c4x | clipper \
| d10v | d30v | dlx | dsp16xx \
@ -359,6 +360,7 @@ case $basic_machine in
| alpha64-* | alpha64ev[4-8]-* | alpha64ev56-* | alpha64ev6[78]-* \
| alphapca5[67]-* | alpha64pca5[67]-* | arc-* \
| arm-* | armbe-* | armle-* | armeb-* | armv*-* \
| aarch64-* \
| avr-* | avr32-* \
| be32-* | be64-* \
| bfin-* | bs2000-* \

View File

@ -31,21 +31,21 @@ dnl===
dnl===-----------------------------------------------------------------------===
dnl Initialize autoconf and define the package name, version number and
dnl address for reporting bugs.
AC_INIT([LLVM],[3.2svn],[http://llvm.org/bugs/])
AC_INIT([LLVM],[3.3svn],[http://llvm.org/bugs/])
AC_DEFINE([LLVM_VERSION_MAJOR], [3], [Major version of the LLVM API])
AC_DEFINE([LLVM_VERSION_MINOR], [2], [Minor version of the LLVM API])
AC_DEFINE([LLVM_VERSION_MINOR], [3], [Minor version of the LLVM API])
dnl Provide a copyright substitution and ensure the copyright notice is included
dnl in the output of --version option of the generated configure script.
AC_SUBST(LLVM_COPYRIGHT,["Copyright (c) 2003-2012 University of Illinois at Urbana-Champaign."])
AC_COPYRIGHT([Copyright (c) 2003-2012 University of Illinois at Urbana-Champaign.])
AC_SUBST(LLVM_COPYRIGHT,["Copyright (c) 2003-2013 University of Illinois at Urbana-Champaign."])
AC_COPYRIGHT([Copyright (c) 2003-2013 University of Illinois at Urbana-Champaign.])
dnl Indicate that we require autoconf 2.60 or later.
AC_PREREQ(2.60)
dnl Verify that the source directory is valid. This makes sure that we are
dnl configuring LLVM and not some other package (it validates --srcdir argument)
AC_CONFIG_SRCDIR([lib/VMCore/Module.cpp])
AC_CONFIG_SRCDIR([lib/IR/Module.cpp])
dnl Place all of the extra autoconf files into the config subdirectory. Tell
dnl various tools where the m4 autoconf macros are.
@ -59,12 +59,43 @@ if test ${srcdir} != "." ; then
fi
fi
dnl Default to empty (i.e. assigning the null string to) CFLAGS and CXXFLAGS,
dnl instead of the autoconf default (for example, '-g -O2' for CC=gcc).
${CFLAGS=}
${CXXFLAGS=}
dnl We need to check for the compiler up here to avoid anything else
dnl starting with a different one.
AC_PROG_CC(clang llvm-gcc gcc)
AC_PROG_CXX(clang++ llvm-g++ g++)
AC_PROG_CPP
dnl If CXX is Clang, check that it can find and parse C++ standard library
dnl headers.
if test "$CXX" = "clang++" ; then
AC_MSG_CHECKING([whether clang works])
AC_LANG_PUSH([C++])
dnl Note that space between 'include' and '(' is required. There's a broken
dnl regex in aclocal that otherwise will think that we call m4's include
dnl builtin.
AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[#include <limits>
#if __has_include (<cxxabi.h>)
#include <cxxabi.h>
#endif
#if __has_include (<unwind.h>)
#include <unwind.h>
#endif
]])],
[
AC_MSG_RESULT([yes])
],
[
AC_MSG_RESULT([no])
AC_MSG_ERROR([Selected compiler could not find or parse C++ standard library headers. Rerun with CC=c-compiler CXX=c++-compiler ./configure ...])
])
AC_LANG_POP([C++])
fi
dnl Configure all of the projects present in our source tree. While we could
dnl just AC_CONFIG_SUBDIRS on the set of directories in projects that have a
dnl configure script, that usage of the AC_CONFIG_SUBDIRS macro is deprecated.
@ -363,6 +394,7 @@ AC_CACHE_CHECK([target architecture],[llvm_cv_target_arch],
sparc*-*) llvm_cv_target_arch="Sparc" ;;
powerpc*-*) llvm_cv_target_arch="PowerPC" ;;
arm*-*) llvm_cv_target_arch="ARM" ;;
aarch64*-*) llvm_cv_target_arch="AArch64" ;;
mips-* | mips64-*) llvm_cv_target_arch="Mips" ;;
mipsel-* | mips64el-*) llvm_cv_target_arch="Mips" ;;
xcore-*) llvm_cv_target_arch="XCore" ;;
@ -396,6 +428,7 @@ case $host in
sparc*-*) host_arch="Sparc" ;;
powerpc*-*) host_arch="PowerPC" ;;
arm*-*) host_arch="ARM" ;;
aarch64*-*) host_arch="AArch64" ;;
mips-* | mips64-*) host_arch="Mips" ;;
mipsel-* | mips64el-*) host_arch="Mips" ;;
xcore-*) host_arch="XCore" ;;
@ -475,6 +508,54 @@ case "$enableval" in
*) AC_MSG_ERROR([Invalid setting for --enable-cxx11. Use "yes" or "no"]) ;;
esac
dnl --enable-clang-arcmt: check whether to enable clang arcmt
clang_arcmt="yes"
AC_ARG_ENABLE(clang-arcmt,
AS_HELP_STRING([--enable-clang-arcmt],
[Enable building of clang ARCMT (default is YES)]),
clang_arcmt="$enableval",
enableval="yes")
case "$enableval" in
yes) AC_SUBST(ENABLE_CLANG_ARCMT,[1]) ;;
no) AC_SUBST(ENABLE_CLANG_ARCMT,[0]) ;;
default) AC_SUBST(ENABLE_CLANG_ARCMT,[1]);;
*) AC_MSG_ERROR([Invalid setting for --enable-clang-arcmt. Use "yes" or "no"]) ;;
esac
dnl --enable-clang-static-analyzer: check whether to enable static-analyzer
clang_static_analyzer="yes"
AC_ARG_ENABLE(clang-static-analyzer,
AS_HELP_STRING([--enable-clang-static-analyzer],
[Enable building of clang Static Analyzer (default is YES)]),
clang_static_analyzer="$enableval",
enableval="yes")
case "$enableval" in
yes) AC_SUBST(ENABLE_CLANG_STATIC_ANALYZER,[1]) ;;
no) AC_SUBST(ENABLE_CLANG_STATIC_ANALYZER,[0]) ;;
default) AC_SUBST(ENABLE_CLANG_STATIC_ANALYZER,[1]);;
*) AC_MSG_ERROR([Invalid setting for --enable-clang-static-analyzer. Use "yes" or "no"]) ;;
esac
dnl --enable-clang-rewriter: check whether to enable clang rewriter
AC_ARG_ENABLE(clang-rewriter,
AS_HELP_STRING([--enable-clang-rewriter],
[Enable building of clang rewriter (default is YES)]),,
enableval="yes")
case "$enableval" in
yes) AC_SUBST(ENABLE_CLANG_REWRITER,[1]) ;;
no)
if test ${clang_arcmt} != "no" ; then
AC_MSG_ERROR([Cannot enable clang ARC Migration Tool while disabling rewriter.])
fi
if test ${clang_static_analyzer} != "no" ; then
AC_MSG_ERROR([Cannot enable clang static analyzer while disabling rewriter.])
fi
AC_SUBST(ENABLE_CLANG_REWRITER,[0])
;;
default) AC_SUBST(ENABLE_CLANG_REWRITER,[1]);;
*) AC_MSG_ERROR([Invalid setting for --enable-clang-rewriter. Use "yes" or "no"]) ;;
esac
dnl --enable-optimized : check whether they want to do an optimized build:
AC_ARG_ENABLE(optimized, AS_HELP_STRING(
--enable-optimized,[Compile with optimizations enabled (default is NO)]),,enableval=$optimize)
@ -566,6 +647,7 @@ else
PowerPC) AC_SUBST(TARGET_HAS_JIT,1) ;;
x86_64) AC_SUBST(TARGET_HAS_JIT,1) ;;
ARM) AC_SUBST(TARGET_HAS_JIT,1) ;;
AArch64) AC_SUBST(TARGET_HAS_JIT,0) ;;
Mips) AC_SUBST(TARGET_HAS_JIT,1) ;;
XCore) AC_SUBST(TARGET_HAS_JIT,0) ;;
MSP430) AC_SUBST(TARGET_HAS_JIT,0) ;;
@ -697,26 +779,26 @@ dnl Allow specific targets to be specified for building (or not)
TARGETS_TO_BUILD=""
AC_ARG_ENABLE([targets],AS_HELP_STRING([--enable-targets],
[Build specific host targets: all or target1,target2,... Valid targets are:
host, x86, x86_64, sparc, powerpc, arm, mips, spu, hexagon,
host, x86, x86_64, sparc, powerpc, arm, aarch64, mips, hexagon,
xcore, msp430, nvptx, and cpp (default=all)]),,
enableval=all)
if test "$enableval" = host-only ; then
enableval=host
fi
case "$enableval" in
all) TARGETS_TO_BUILD="X86 Sparc PowerPC ARM Mips CellSPU XCore MSP430 CppBackend MBlaze NVPTX Hexagon" ;;
all) TARGETS_TO_BUILD="X86 Sparc PowerPC AArch64 ARM Mips XCore MSP430 CppBackend MBlaze NVPTX Hexagon" ;;
*)for a_target in `echo $enableval|sed -e 's/,/ /g' ` ; do
case "$a_target" in
x86) TARGETS_TO_BUILD="X86 $TARGETS_TO_BUILD" ;;
x86_64) TARGETS_TO_BUILD="X86 $TARGETS_TO_BUILD" ;;
sparc) TARGETS_TO_BUILD="Sparc $TARGETS_TO_BUILD" ;;
powerpc) TARGETS_TO_BUILD="PowerPC $TARGETS_TO_BUILD" ;;
aarch64) TARGETS_TO_BUILD="AArch64 $TARGETS_TO_BUILD" ;;
arm) TARGETS_TO_BUILD="ARM $TARGETS_TO_BUILD" ;;
mips) TARGETS_TO_BUILD="Mips $TARGETS_TO_BUILD" ;;
mipsel) TARGETS_TO_BUILD="Mips $TARGETS_TO_BUILD" ;;
mips64) TARGETS_TO_BUILD="Mips $TARGETS_TO_BUILD" ;;
mips64el) TARGETS_TO_BUILD="Mips $TARGETS_TO_BUILD" ;;
spu) TARGETS_TO_BUILD="CellSPU $TARGETS_TO_BUILD" ;;
xcore) TARGETS_TO_BUILD="XCore $TARGETS_TO_BUILD" ;;
msp430) TARGETS_TO_BUILD="MSP430 $TARGETS_TO_BUILD" ;;
cpp) TARGETS_TO_BUILD="CppBackend $TARGETS_TO_BUILD" ;;
@ -731,7 +813,6 @@ case "$enableval" in
ARM) TARGETS_TO_BUILD="ARM $TARGETS_TO_BUILD" ;;
Mips) TARGETS_TO_BUILD="Mips $TARGETS_TO_BUILD" ;;
MBlaze) TARGETS_TO_BUILD="MBlaze $TARGETS_TO_BUILD" ;;
CellSPU|SPU) TARGETS_TO_BUILD="CellSPU $TARGETS_TO_BUILD" ;;
XCore) TARGETS_TO_BUILD="XCore $TARGETS_TO_BUILD" ;;
MSP430) TARGETS_TO_BUILD="MSP430 $TARGETS_TO_BUILD" ;;
Hexagon) TARGETS_TO_BUILD="Hexagon $TARGETS_TO_BUILD" ;;
@ -1165,10 +1246,15 @@ fi
dnl Verify that GCC is version 3.0 or higher
if test "$GCC" = "yes"
then
AC_COMPILE_IFELSE([[#if !defined(__GNUC__) || __GNUC__ < 3
#error Unsupported GCC version
#endif
]], [], [AC_MSG_ERROR([gcc 3.x required, but you have a lower version])])
AC_COMPILE_IFELSE(
[
AC_LANG_SOURCE([[
#if !defined(__GNUC__) || __GNUC__ < 3
#error Unsupported GCC version
#endif
]])
],
[], [AC_MSG_ERROR([gcc 3.x required, but you have a lower version])])
fi
dnl Check for GNU Make. We use its extensions, so don't build without it
@ -1185,7 +1271,53 @@ AC_MSG_CHECKING([optional compiler flags])
CXX_FLAG_CHECK(NO_VARIADIC_MACROS, [-Wno-variadic-macros])
CXX_FLAG_CHECK(NO_MISSING_FIELD_INITIALIZERS, [-Wno-missing-field-initializers])
CXX_FLAG_CHECK(COVERED_SWITCH_DEFAULT, [-Wcovered-switch-default])
AC_MSG_RESULT([$NO_VARIADIC_MACROS $NO_MISSING_FIELD_INITIALIZERS $COVERED_SWITCH_DEFAULT])
dnl GCC's potential uninitialized use analysis is weak and presents lots of
dnl false positives, so disable it.
NO_UNINITIALIZED=
NO_MAYBE_UNINITIALIZED=
if test "$GXX" = "yes"
then
CXX_FLAG_CHECK(NO_MAYBE_UNINITIALIZED, [-Wno-maybe-uninitialized])
dnl gcc 4.7 introduced -Wmaybe-uninitialized to distinguish cases which are
dnl known to be uninitialized from cases which might be uninitialized. We
dnl still want to catch the first kind of errors.
if test -z "$NO_MAYBE_UNINITIALIZED"
then
CXX_FLAG_CHECK(NO_UNINITIALIZED, [-Wno-uninitialized])
fi
fi
AC_MSG_RESULT([$NO_VARIADIC_MACROS $NO_MISSING_FIELD_INITIALIZERS $COVERED_SWITCH_DEFAULT $NO_UNINITIALIZED $NO_MAYBE_UNINITIALIZED])
AC_ARG_WITH([python],
[AS_HELP_STRING([--with-python], [path to python])],
[PYTHON="$withval"])
if test -n "$PYTHON" && test -x "$PYTHON" ; then
AC_MSG_CHECKING([for python])
AC_MSG_RESULT([user defined: $with_python])
else
if test -n "$PYTHON" ; then
AC_MSG_WARN([specified python ($PYTHON) is not usable, searching path])
fi
AC_PATH_PROG([PYTHON], [python python2 python26],
[AC_MSG_RESULT([not found])
AC_MSG_ERROR([could not find python 2.5 or higher])])
fi
AC_MSG_CHECKING([for python >= 2.5])
ac_python_version=`$PYTHON -c 'import sys; print sys.version.split()[[0]]'`
ac_python_version_major=`echo $ac_python_version | cut -d'.' -f1`
ac_python_version_minor=`echo $ac_python_version | cut -d'.' -f2`
ac_python_version_patch=`echo $ac_python_version | cut -d'.' -f3`
if test "$ac_python_version_major" -eq "2" \
&& test "$ac_python_version_minor" -ge "5" ; then
AC_MSG_RESULT([$PYTHON ($ac_python_version)])
else
AC_MSG_RESULT([not found])
AC_MSG_FAILURE([found python $ac_python_version ($PYTHON); required >= 2.5])
fi
dnl===-----------------------------------------------------------------------===
dnl===
@ -1204,6 +1336,11 @@ AC_SEARCH_LIBS(dlopen,dl,AC_DEFINE([HAVE_DLOPEN],[1],
[Define if dlopen() is available on this platform.]),
AC_MSG_WARN([dlopen() not found - disabling plugin support]))
dnl Search for the clock_gettime() function. Note that we rely on the POSIX
dnl macros to detect whether clock_gettime is available, this just finds the
dnl right libraries to link with.
AC_SEARCH_LIBS(clock_gettime,rt)
dnl libffi is optional; used to call external functions from the interpreter
if test "$llvm_cv_enable_libffi" = "yes" ; then
AC_SEARCH_LIBS(ffi_call,ffi,AC_DEFINE([HAVE_FFI_CALL],[1],
@ -1356,6 +1493,7 @@ AC_CHECK_HEADERS([sys/mman.h sys/param.h sys/resource.h sys/time.h sys/uio.h])
AC_CHECK_HEADERS([sys/types.h sys/ioctl.h malloc/malloc.h mach/mach.h])
AC_CHECK_HEADERS([valgrind/valgrind.h])
AC_CHECK_HEADERS([fenv.h])
AC_CHECK_DECLS([FE_ALL_EXCEPT, FE_INEXACT], [], [], [[#include <fenv.h>]])
if test "$LLVM_ENABLE_THREADS" -eq 1 && test "$ENABLE_PTHREADS" -eq 1 ; then
AC_CHECK_HEADERS(pthread.h,
AC_SUBST(HAVE_PTHREAD, 1),
@ -1375,18 +1513,23 @@ AC_CHECK_HEADERS([CrashReporterClient.h])
dnl Try to find Darwin specific crash reporting global.
AC_MSG_CHECKING([__crashreporter_info__])
AC_LINK_IFELSE(
AC_LANG_SOURCE(
[[extern const char *__crashreporter_info__;
int main() {
__crashreporter_info__ = "test";
return 0;
}
]]),
AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_CRASHREPORTER_INFO, 1, Can use __crashreporter_info__),
AC_MSG_RESULT(no)
AC_DEFINE(HAVE_CRASHREPORTER_INFO, 0,
Define if __crashreporter_info__ exists.))
[
AC_LANG_SOURCE([[
extern const char *__crashreporter_info__;
int main() {
__crashreporter_info__ = "test";
return 0;
}
]])
],
[
AC_MSG_RESULT([yes])
AC_DEFINE([HAVE_CRASHREPORTER_INFO], [1], [can use __crashreporter_info__])
],
[
AC_MSG_RESULT([no])
AC_DEFINE([HAVE_CRASHREPORTER_INFO], [0], [can use __crashreporter_info__])
])
dnl===-----------------------------------------------------------------------===
dnl===
@ -1412,6 +1555,7 @@ dnl===-----------------------------------------------------------------------===
AC_CHECK_FUNCS([backtrace ceilf floorf roundf rintf nearbyintf getcwd ])
AC_CHECK_FUNCS([powf fmodf strtof round ])
AC_CHECK_FUNCS([log log2 log10 exp exp2])
AC_CHECK_FUNCS([getpagesize getrusage getrlimit setrlimit gettimeofday ])
AC_CHECK_FUNCS([isatty mkdtemp mkstemp ])
AC_CHECK_FUNCS([mktemp posix_spawn pread realpath sbrk setrlimit strdup ])
@ -1449,10 +1593,15 @@ fi
dnl Check Win32 API EnumerateLoadedModules.
if test "$llvm_cv_os_type" = "MingW" ; then
AC_MSG_CHECKING([whether EnumerateLoadedModules() accepts new decl])
AC_COMPILE_IFELSE([[#include <windows.h>
#include <imagehlp.h>
extern void foo(PENUMLOADED_MODULES_CALLBACK);
extern void foo(BOOL(CALLBACK*)(PCSTR,ULONG_PTR,ULONG,PVOID));]],
AC_COMPILE_IFELSE(
[
AC_LANG_SOURCE([[
#include <windows.h>
#include <imagehlp.h>
extern void foo(PENUMLOADED_MODULES_CALLBACK);
extern void foo(BOOL(CALLBACK*)(PCSTR,ULONG_PTR,ULONG,PVOID));
]])
],
[
AC_MSG_RESULT([yes])
llvm_cv_win32_elmcb_pcstr="PCSTR"
@ -1493,22 +1642,28 @@ dnl Since we'll be using these atomic builtins in C++ files we should test
dnl the C++ compiler.
AC_LANG_PUSH([C++])
AC_LINK_IFELSE(
AC_LANG_SOURCE(
[[int main() {
volatile unsigned long val = 1;
__sync_synchronize();
__sync_val_compare_and_swap(&val, 1, 0);
__sync_add_and_fetch(&val, 1);
__sync_sub_and_fetch(&val, 1);
return 0;
}
]]),
AC_LANG_POP([C++])
AC_MSG_RESULT(yes)
AC_DEFINE(LLVM_HAS_ATOMICS, 1, Has gcc/MSVC atomic intrinsics),
AC_MSG_RESULT(no)
AC_DEFINE(LLVM_HAS_ATOMICS, 0, Has gcc/MSVC atomic intrinsics)
AC_MSG_WARN([LLVM will be built thread-unsafe because atomic builtins are missing]))
[
AC_LANG_SOURCE([[
int main() {
volatile unsigned long val = 1;
__sync_synchronize();
__sync_val_compare_and_swap(&val, 1, 0);
__sync_add_and_fetch(&val, 1);
__sync_sub_and_fetch(&val, 1);
return 0;
}
]])
],
[
AC_MSG_RESULT([yes])
AC_DEFINE([LLVM_HAS_ATOMICS], [1], [Has gcc/MSVC atomic intrinsics])
],
[
AC_MSG_RESULT([no])
AC_DEFINE([LLVM_HAS_ATOMICS], [0], [Has gcc/MSVC atomic intrinsics])
AC_MSG_WARN([LLVM will be built thread-unsafe because atomic builtins are missing])
])
AC_LANG_POP([C++])
dnl===-----------------------------------------------------------------------===
dnl===

View File

@ -1,2 +1,2 @@
AC_DEFUN([CXX_FLAG_CHECK],
[AC_SUBST($1, `$CXX -Werror $2 -fsyntax-only -xc /dev/null 2>/dev/null && echo $2`)])
[AC_SUBST($1, `$CXX -Werror patsubst($2, [^-Wno-], [-W]) -fsyntax-only -xc /dev/null 2>/dev/null && echo $2`)])

View File

@ -1,34 +1,40 @@
#
# This function determins if the isinf function isavailable on this
# platform.
#
dnl
dnl This function determins if the isinf function isavailable on this
dnl platform.
dnl
AC_DEFUN([AC_FUNC_ISINF],[
AC_SINGLE_CXX_CHECK([ac_cv_func_isinf_in_math_h],
[isinf], [<math.h>],
[float f; isinf(f);])
if test "$ac_cv_func_isinf_in_math_h" = "yes" ; then
AC_DEFINE([HAVE_ISINF_IN_MATH_H],1,[Set to 1 if the isinf function is found in <math.h>])
AC_DEFINE([HAVE_ISINF_IN_MATH_H], [1],
[Set to 1 if the isinf function is found in <math.h>])
fi
AC_SINGLE_CXX_CHECK([ac_cv_func_isinf_in_cmath],
[isinf], [<cmath>],
[float f; isinf(f);])
if test "$ac_cv_func_isinf_in_cmath" = "yes" ; then
AC_DEFINE([HAVE_ISINF_IN_CMATH],1,[Set to 1 if the isinf function is found in <cmath>])
AC_DEFINE([HAVE_ISINF_IN_CMATH], [1],
[Set to 1 if the isinf function is found in <cmath>])
fi
AC_SINGLE_CXX_CHECK([ac_cv_func_std_isinf_in_cmath],
[std::isinf], [<cmath>],
[float f; std::isinf(f);])
if test "$ac_cv_func_std_isinf_in_cmath" = "yes" ; then
AC_DEFINE([HAVE_STD_ISINF_IN_CMATH],1,[Set to 1 if the std::isinf function is found in <cmath>])
AC_DEFINE([HAVE_STD_ISINF_IN_CMATH], [1],
[Set to 1 if the std::isinf function is found in <cmath>])
fi
AC_SINGLE_CXX_CHECK([ac_cv_func_finite_in_ieeefp_h],
[finite], [<ieeefp.h>],
[float f; finite(f);])
if test "$ac_cv_func_finite_in_ieeefp_h" = "yes" ; then
AC_DEFINE([HAVE_FINITE_IN_IEEEFP_H],1,[Set to 1 if the finite function is found in <ieeefp.h>])
AC_DEFINE([HAVE_FINITE_IN_IEEEFP_H], [1],
[Set to 1 if the finite function is found in <ieeefp.h>])
fi
])

View File

@ -7,12 +7,10 @@ AC_DEFUN([AC_HUGE_VAL_CHECK],[
AC_LANG_PUSH([C++])
ac_save_CXXFLAGS=$CXXFLAGS
CXXFLAGS="$CXXFLAGS -pedantic"
AC_RUN_IFELSE(
AC_LANG_PROGRAM(
[#include <math.h>],
[double x = HUGE_VAL; return x != x; ]),
[ac_cv_huge_val_sanity=yes],[ac_cv_huge_val_sanity=no],
[ac_cv_huge_val_sanity=yes])
AC_RUN_IFELSE([AC_LANG_PROGRAM([[#include <math.h>]],
[[double x = HUGE_VAL; return x != x;]])],
[ac_cv_huge_val_sanity=yes],[ac_cv_huge_val_sanity=no],
[ac_cv_huge_val_sanity=yes])
CXXFLAGS=$ac_save_CXXFLAGS
AC_LANG_POP([C++])
])

View File

@ -1,10 +1,16 @@
dnl
dnl AC_SINGLE_CXX_CHECK(CACHEVAR, FUNCTION, HEADER, PROGRAM)
dnl $1, $2, $3, $4,
dnl
AC_DEFUN([AC_SINGLE_CXX_CHECK],
[AC_CACHE_CHECK([for $2 in $3], [$1],
[AC_LANG_PUSH([C++])
AC_COMPILE_IFELSE(AC_LANG_PROGRAM([#include $3],[$4]),[$1=yes],[$1=no])
AC_LANG_POP([C++])])
])
dnl $1, $2, $3, $4,
AC_DEFUN([AC_SINGLE_CXX_CHECK],
[
AC_CACHE_CHECK([for $2 in $3], [$1],
[
AC_LANG_PUSH([C++])
AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[#include ]][$3], [$4])],
[$1][[=yes]],
[$1][[=no]])
AC_LANG_POP([C++])
])
])

View File

@ -12,10 +12,14 @@
from ctypes import cdll
import ctypes.util
import platform
# LLVM_VERSION: sync with PACKAGE_VERSION in autoconf/configure.ac and CMakeLists.txt
# but leave out the 'svn' suffix.
LLVM_VERSION = '3.3'
__all__ = [
'c_object_p',
'find_library',
'get_library',
]
@ -87,20 +91,36 @@ def __get__(self, instance, instance_type=None):
return value
def find_library():
# FIXME should probably have build system define absolute path of shared
# library at install time.
for lib in ['LLVM-3.1svn', 'libLLVM-3.1svn', 'LLVM', 'libLLVM']:
result = ctypes.util.find_library(lib)
if result:
return result
return None
def get_library():
"""Obtain a reference to the llvm library."""
lib = find_library()
if not lib:
raise Exception('LLVM shared library not found!')
return cdll.LoadLibrary(lib)
# On Linux, ctypes.cdll.LoadLibrary() respects LD_LIBRARY_PATH
# while ctypes.util.find_library() doesn't.
# See http://docs.python.org/2/library/ctypes.html#finding-shared-libraries
#
# To make it possible to run the unit tests without installing the LLVM shared
# library into a default linker search path. Always Try ctypes.cdll.LoadLibrary()
# with all possible library names first, then try ctypes.util.find_library().
names = ['LLVM-' + LLVM_VERSION, 'LLVM-' + LLVM_VERSION + 'svn']
t = platform.system()
if t == 'Darwin':
pfx, ext = 'lib', '.dylib'
elif t == 'Windows':
pfx, ext = '', '.dll'
else:
pfx, ext = 'lib', '.so'
for i in names:
try:
lib = cdll.LoadLibrary(pfx + i + ext)
except OSError:
pass
else:
return lib
for i in names:
t = ctypes.util.find_library(i)
if t:
return cdll.LoadLibrary(t)
raise Exception('LLVM shared library not found!')

View File

@ -31,6 +31,9 @@
lib = get_library()
callbacks = {}
# Constants for set_options
Option_UseMarkup = 1
class Disassembler(LLVMObject):
"""Represents a disassembler instance.
@ -113,6 +116,10 @@ def get_instructions(self, source, pc=0):
address += result
offset += result
def set_options(self, options):
if not lib.LLVMSetDisasmOptions(self, options):
raise Exception('Unable to set all disassembler options in %i' % options)
def register_library(library):
library.LLVMCreateDisasm.argtypes = [c_char_p, c_void_p, c_int,
@ -125,6 +132,10 @@ def register_library(library):
c_uint64, c_uint64, c_char_p, c_size_t]
library.LLVMDisasmInstruction.restype = c_size_t
library.LLVMSetDisasmOptions.argtypes = [Disassembler, c_uint64]
library.LLVMSetDisasmOptions.restype = c_int
callbacks['op_info'] = CFUNCTYPE(c_int, c_void_p, c_uint64, c_uint64, c_uint64,
c_int, c_void_p)
callbacks['symbol_lookup'] = CFUNCTYPE(c_char_p, c_void_p, c_uint64,

View File

@ -1,6 +1,6 @@
from .base import TestBase
from ..disassembler import Disassembler
from ..disassembler import Disassembler, Option_UseMarkup
class TestDisassembler(TestBase):
def test_instantiate(self):
@ -26,3 +26,14 @@ def test_get_instructions(self):
self.assertEqual(instructions[0], (0, 3, '\tjcxz\t-127'))
self.assertEqual(instructions[1], (3, 2, '\taddl\t%eax, %edi'))
def test_set_options(self):
sequence = '\x10\x40\x2d\xe9'
triple = 'arm-linux-android'
disassembler = Disassembler(triple)
disassembler.set_options(Option_UseMarkup)
count, s = disassembler.get_instruction(sequence)
print s
self.assertEqual(count, 4)
self.assertEqual(s, '\tpush\t{<reg:r4>, <reg:lr>}')

View File

@ -54,6 +54,7 @@ check_include_file(ndir.h HAVE_NDIR_H)
if( NOT PURE_WINDOWS )
check_include_file(pthread.h HAVE_PTHREAD_H)
endif()
check_include_file(sanitizer/msan_interface.h HAVE_SANITIZER_MSAN_INTERFACE_H)
check_include_file(setjmp.h HAVE_SETJMP_H)
check_include_file(signal.h HAVE_SIGNAL_H)
check_include_file(stdint.h HAVE_STDINT_H)
@ -79,6 +80,9 @@ check_include_file(utime.h HAVE_UTIME_H)
check_include_file(valgrind/valgrind.h HAVE_VALGRIND_VALGRIND_H)
check_include_file(windows.h HAVE_WINDOWS_H)
check_include_file(fenv.h HAVE_FENV_H)
check_symbol_exists(FE_ALL_EXCEPT "fenv.h" HAVE_DECL_FE_ALL_EXCEPT)
check_symbol_exists(FE_INEXACT "fenv.h" HAVE_DECL_FE_INEXACT)
check_include_file(mach/mach.h HAVE_MACH_MACH_H)
check_include_file(mach-o/dyld.h HAVE_MACH_O_DYLD_H)
@ -99,6 +103,7 @@ if( NOT PURE_WINDOWS )
endif()
endif()
check_library_exists(dl dlopen "" HAVE_LIBDL)
check_library_exists(rt clock_gettime "" HAVE_LIBRT)
endif()
# function checks
@ -117,6 +122,12 @@ check_symbol_exists(isnan math.h HAVE_ISNAN_IN_MATH_H)
check_symbol_exists(ceilf math.h HAVE_CEILF)
check_symbol_exists(floorf math.h HAVE_FLOORF)
check_symbol_exists(fmodf math.h HAVE_FMODF)
check_symbol_exists(log math.h HAVE_LOG)
check_symbol_exists(log2 math.h HAVE_LOG2)
check_symbol_exists(log10 math.h HAVE_LOG10)
check_symbol_exists(exp math.h HAVE_EXP)
check_symbol_exists(exp2 math.h HAVE_EXP2)
check_symbol_exists(exp10 math.h HAVE_EXP10)
if( HAVE_SETJMP_H )
check_symbol_exists(longjmp setjmp.h HAVE_LONGJMP)
check_symbol_exists(setjmp setjmp.h HAVE_SETJMP)
@ -294,10 +305,33 @@ else()
set(ENABLE_PIC 0)
endif()
find_package(LibXml2)
if (LIBXML2_FOUND)
set(CLANG_HAVE_LIBXML 1)
endif ()
include(CheckCXXCompilerFlag)
check_cxx_compiler_flag("-Wno-variadic-macros" SUPPORTS_NO_VARIADIC_MACROS_FLAG)
set(USE_NO_MAYBE_UNINITIALIZED 0)
set(USE_NO_UNINITIALIZED 0)
# Disable gcc's potentially uninitialized use analysis as it presents lots of
# false positives.
if (CMAKE_COMPILER_IS_GNUCXX)
check_cxx_compiler_flag("-Wmaybe-uninitialized" HAS_MAYBE_UNINITIALIZED)
if (HAS_MAYBE_UNINITIALIZED)
set(USE_NO_MAYBE_UNINITIALIZED 1)
else()
# Only recent versions of gcc make the distinction between -Wuninitialized
# and -Wmaybe-uninitialized. If -Wmaybe-uninitialized isn't supported, just
# turn off all uninitialized use warnings.
check_cxx_compiler_flag("-Wuninitialized" HAS_UNINITIALIZED)
set(USE_NO_UNINITIALIZED ${HAS_UNINITIALIZED})
endif()
endif()
include(GetHostTriple)
get_host_triple(LLVM_HOST_TRIPLE)

View File

@ -157,12 +157,7 @@ endmacro(add_llvm_external_project)
# Generic support for adding a unittest.
function(add_unittest test_suite test_name)
if (CMAKE_BUILD_TYPE)
set(CMAKE_RUNTIME_OUTPUT_DIRECTORY
${CMAKE_CURRENT_BINARY_DIR}/${CMAKE_BUILD_TYPE})
else()
set(CMAKE_RUNTIME_OUTPUT_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR})
endif()
set(CMAKE_RUNTIME_OUTPUT_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR})
if( NOT LLVM_BUILD_TESTS )
set(EXCLUDE_FROM_ALL ON)
endif()
@ -239,8 +234,8 @@ function(configure_lit_site_cfg input output)
set(LLVM_SOURCE_DIR ${LLVM_MAIN_SRC_DIR})
set(LLVM_BINARY_DIR ${LLVM_BINARY_DIR})
set(LLVM_TOOLS_DIR "${LLVM_TOOLS_BINARY_DIR}/%(build_config)s")
set(LLVM_LIBS_DIR "${LLVM_BINARY_DIR}/lib/%(build_config)s")
set(LLVM_TOOLS_DIR "${LLVM_TOOLS_BINARY_DIR}/%(build_mode)s")
set(LLVM_LIBS_DIR "${LLVM_BINARY_DIR}/lib/%(build_mode)s")
set(PYTHON_EXECUTABLE ${PYTHON_EXECUTABLE})
set(ENABLE_SHARED ${LLVM_SHARED_LIBS_ENABLED})
set(SHLIBPATH_VAR ${SHLIBPATH_VAR})
@ -251,8 +246,8 @@ function(configure_lit_site_cfg input output)
set(ENABLE_ASSERTIONS "0")
endif()
set(HOST_OS ${CMAKE_HOST_SYSTEM_NAME})
set(HOST_ARCH ${CMAKE_HOST_SYSTEM_PROCESSOR})
set(HOST_OS ${CMAKE_SYSTEM_NAME})
set(HOST_ARCH ${CMAKE_SYSTEM_PROCESSOR})
configure_file(${input} ${output} @ONLY)
endfunction()
@ -266,18 +261,23 @@ function(add_lit_target target comment)
set(LIT_COMMAND
${PYTHON_EXECUTABLE}
${LLVM_MAIN_SRC_DIR}/utils/lit/lit.py
--param build_config=${CMAKE_CFG_INTDIR}
--param build_mode=${RUNTIME_BUILD_MODE}
--param build_mode=${CMAKE_CFG_INTDIR}
${LIT_ARGS}
)
foreach(param ${ARG_PARAMS})
list(APPEND LIT_COMMAND --param ${param})
endforeach()
add_custom_target(${target}
COMMAND ${LIT_COMMAND} ${ARG_DEFAULT_ARGS}
COMMENT "${comment}"
)
add_dependencies(${target} ${ARG_DEPENDS})
if( ARG_DEPENDS )
add_custom_target(${target}
COMMAND ${LIT_COMMAND} ${ARG_DEFAULT_ARGS}
COMMENT "${comment}"
)
add_dependencies(${target} ${ARG_DEPENDS})
else()
add_custom_target(${target}
COMMAND cmake -E echo "${target} does nothing, no tools built.")
message(STATUS "${target} does nothing.")
endif()
endfunction()
# A function to add a set of lit test suites to be driven through 'check-*' targets.

View File

@ -0,0 +1,25 @@
# CMake project that writes Subversion revision information to a header.
#
# Input variables:
# FIRST_SOURCE_DIR - First source directory
# FIRST_REPOSITORY - The macro to define to the first revision number.
# SECOND_SOURCE_DIR - Second source directory
# SECOND_REPOSITORY - The macro to define to the second revision number.
# HEADER_FILE - The header file to write
include(FindSubversion)
if (Subversion_FOUND AND EXISTS "${FIRST_SOURCE_DIR}/.svn")
# Repository information for the first repository.
Subversion_WC_INFO(${FIRST_SOURCE_DIR} MY)
file(WRITE ${HEADER_FILE}.txt "#define ${FIRST_REPOSITORY} \"${MY_WC_REVISION}\"\n")
# Repository information for the second repository.
if (EXISTS "${SECOND_SOURCE_DIR}/.svn")
Subversion_WC_INFO(${SECOND_SOURCE_DIR} MY)
file(APPEND ${HEADER_FILE}.txt
"#define ${SECOND_REPOSITORY} \"${MY_WC_REVISION}\"\n")
endif ()
# Copy the file only if it has changed.
execute_process(COMMAND ${CMAKE_COMMAND} -E copy_if_different
${HEADER_FILE}.txt ${HEADER_FILE})
endif()

View File

@ -3,6 +3,8 @@
# selections.
include(AddLLVMDefinitions)
include(CheckCCompilerFlag)
include(CheckCXXCompilerFlag)
if( CMAKE_COMPILER_IS_GNUCXX )
set(LLVM_COMPILER_IS_GCC_COMPATIBLE ON)
@ -10,20 +12,6 @@ elseif( "${CMAKE_CXX_COMPILER_ID}" MATCHES "Clang" )
set(LLVM_COMPILER_IS_GCC_COMPATIBLE ON)
endif()
# Run-time build mode; It is used for unittests.
if(MSVC_IDE)
# Expect "$(Configuration)", "$(OutDir)", etc.
# It is expanded by msbuild or similar.
set(RUNTIME_BUILD_MODE "${CMAKE_CFG_INTDIR}")
elseif(NOT CMAKE_BUILD_TYPE STREQUAL "")
# Expect "Release" "Debug", etc.
# Or unittests could not run.
set(RUNTIME_BUILD_MODE ${CMAKE_BUILD_TYPE})
else()
# It might be "."
set(RUNTIME_BUILD_MODE "${CMAKE_CFG_INTDIR}")
endif()
if( LLVM_ENABLE_ASSERTIONS )
# MSVC doesn't like _DEBUG on release builds. See PR 4379.
if( NOT MSVC )
@ -71,6 +59,39 @@ else(WIN32)
endif(UNIX)
endif(WIN32)
function(add_flag_or_print_warning flag)
check_c_compiler_flag(${flag} C_SUPPORTS_FLAG)
check_cxx_compiler_flag(${flag} CXX_SUPPORTS_FLAG)
if (C_SUPPORTS_FLAG AND CXX_SUPPORTS_FLAG)
message(STATUS "Building with ${flag}")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${flag}" PARENT_SCOPE)
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${flag}" PARENT_SCOPE)
else()
message(WARNING "${flag} is not supported.")
endif()
endfunction()
function(append value)
foreach(variable ${ARGN})
set(${variable} "${${variable}} ${value}" PARENT_SCOPE)
endforeach(variable)
endfunction()
function(append_if condition value)
if (${condition})
foreach(variable ${ARGN})
set(${variable} "${${variable}} ${value}" PARENT_SCOPE)
endforeach(variable)
endif()
endfunction()
macro(add_flag_if_supported flag)
check_c_compiler_flag(${flag} C_SUPPORTS_FLAG)
append_if(C_SUPPORTS_FLAG "${flag}" CMAKE_C_FLAGS)
check_cxx_compiler_flag(${flag} CXX_SUPPORTS_FLAG)
append_if(CXX_SUPPORTS_FLAG "${flag}" CMAKE_CXX_FLAGS)
endmacro()
if( LLVM_ENABLE_PIC )
if( XCODE )
# Xcode has -mdynamic-no-pic on by default, which overrides -fPIC. I don't
@ -79,24 +100,14 @@ if( LLVM_ENABLE_PIC )
elseif( WIN32 OR CYGWIN)
# On Windows all code is PIC. MinGW warns if -fPIC is used.
else()
include(CheckCXXCompilerFlag)
check_cxx_compiler_flag("-fPIC" SUPPORTS_FPIC_FLAG)
if( SUPPORTS_FPIC_FLAG )
message(STATUS "Building with -fPIC")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fPIC")
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fPIC")
else( SUPPORTS_FPIC_FLAG )
message(WARNING "-fPIC not supported.")
endif()
add_flag_or_print_warning("-fPIC")
if( WIN32 OR CYGWIN)
# MinGW warns if -fvisibility-inlines-hidden is used.
else()
check_cxx_compiler_flag("-fvisibility-inlines-hidden" SUPPORTS_FVISIBILITY_INLINES_HIDDEN_FLAG)
if( SUPPORTS_FVISIBILITY_INLINES_HIDDEN_FLAG )
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fvisibility-inlines-hidden")
endif()
endif()
append_if(SUPPORTS_FVISIBILITY_INLINES_HIDDEN_FLAG "-fvisibility-inlines-hidden" CMAKE_CXX_FLAGS)
endif()
endif()
endif()
@ -168,6 +179,7 @@ if( MSVC )
-wd4551 # Suppress 'function call missing argument list'
-wd4624 # Suppress ''derived class' : destructor could not be generated because a base class destructor is inaccessible'
-wd4715 # Suppress ''function' : not all control paths return a value'
-wd4722 # Suppress ''function' : destructor never returns, potential memory leak'
-wd4800 # Suppress ''type' : forcing value to bool 'true' or 'false' (performance warning)'
# Promoted warnings.
@ -175,7 +187,6 @@ if( MSVC )
# Promoted warnings to errors.
-we4238 # Promote 'nonstandard extension used : class rvalue used as lvalue' to error.
-we4239 # Promote 'nonstandard extension used : 'token' : conversion from 'type' to 'type'' to error.
)
# Enable warnings
@ -190,20 +201,67 @@ if( MSVC )
endif (LLVM_ENABLE_WERROR)
elseif( LLVM_COMPILER_IS_GCC_COMPATIBLE )
if (LLVM_ENABLE_WARNINGS)
add_llvm_definitions( -Wall -W -Wno-unused-parameter -Wwrite-strings )
if (LLVM_ENABLE_PEDANTIC)
add_llvm_definitions( -pedantic -Wno-long-long )
endif (LLVM_ENABLE_PEDANTIC)
check_cxx_compiler_flag("-Werror -Wcovered-switch-default" SUPPORTS_COVERED_SWITCH_DEFAULT_FLAG)
if( SUPPORTS_COVERED_SWITCH_DEFAULT_FLAG )
add_llvm_definitions( -Wcovered-switch-default )
append("-Wall -W -Wno-unused-parameter -Wwrite-strings" CMAKE_C_FLAGS CMAKE_CXX_FLAGS)
# Turn off missing field initializer warnings for gcc to avoid noise from
# false positives with empty {}. Turn them on otherwise (they're off by
# default for clang).
check_cxx_compiler_flag("-Wmissing-field-initializers" CXX_SUPPORTS_MISSING_FIELD_INITIALIZERS_FLAG)
if (CXX_SUPPORTS_MISSING_FIELD_INITIALIZERS_FLAG)
if (CMAKE_COMPILER_IS_GNUCXX)
append("-Wno-missing-field-initializers" CMAKE_C_FLAGS CMAKE_CXX_FLAGS)
else()
append("-Wmissing-field-initializers" CMAKE_C_FLAGS CMAKE_CXX_FLAGS)
endif()
endif()
append_if(LLVM_ENABLE_PEDANTIC "-pedantic -Wno-long-long" CMAKE_C_FLAGS CMAKE_CXX_FLAGS)
check_cxx_compiler_flag("-Werror -Wcovered-switch-default" CXX_SUPPORTS_COVERED_SWITCH_DEFAULT_FLAG)
append_if(CXX_SUPPORTS_COVERED_SWITCH_DEFAULT_FLAG "-Wcovered-switch-default" CMAKE_CXX_FLAGS)
check_c_compiler_flag("-Werror -Wcovered-switch-default" C_SUPPORTS_COVERED_SWITCH_DEFAULT_FLAG)
append_if(C_SUPPORTS_COVERED_SWITCH_DEFAULT_FLAG "-Wcovered-switch-default" CMAKE_C_FLAGS)
append_if(USE_NO_UNINITIALIZED "-Wno-uninitialized" CMAKE_CXX_FLAGS)
append_if(USE_NO_MAYBE_UNINITIALIZED "-Wno-maybe-uninitialized" CMAKE_CXX_FLAGS)
check_cxx_compiler_flag("-Werror -Wnon-virtual-dtor" CXX_SUPPORTS_NON_VIRTUAL_DTOR_FLAG)
append_if(CXX_SUPPORTS_NON_VIRTUAL_DTOR_FLAG "-Wnon-virtual-dtor" CMAKE_CXX_FLAGS)
endif (LLVM_ENABLE_WARNINGS)
if (LLVM_ENABLE_WERROR)
add_llvm_definitions( -Werror )
endif (LLVM_ENABLE_WERROR)
endif( MSVC )
macro(append_common_sanitizer_flags)
# Append -fno-omit-frame-pointer and turn on debug info to get better
# stack traces.
add_flag_if_supported("-fno-omit-frame-pointer")
if (NOT uppercase_CMAKE_BUILD_TYPE STREQUAL "DEBUG" AND
NOT uppercase_CMAKE_BUILD_TYPE STREQUAL "RELWITHDEBINFO")
add_flag_if_supported("-gline-tables-only")
endif()
endmacro()
# Turn on sanitizers if necessary.
if(LLVM_USE_SANITIZER)
if (LLVM_ON_UNIX)
if (LLVM_USE_SANITIZER STREQUAL "Address")
append_common_sanitizer_flags()
add_flag_or_print_warning("-fsanitize=address")
elseif (LLVM_USE_SANITIZER MATCHES "Memory(WithOrigins)?")
append_common_sanitizer_flags()
add_flag_or_print_warning("-fsanitize=memory")
# -pie is required for MSan.
set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -pie")
if(LLVM_USE_SANITIZER STREQUAL "MemoryWithOrigins")
add_flag_or_print_warning("-fsanitize-memory-track-origins")
endif()
else()
message(WARNING "Unsupported value of LLVM_USE_SANITIZER: ${LLVM_USE_SANITIZER}")
endif()
else()
message(WARNING "LLVM_USE_SANITIZER is not supported on this platform.")
endif()
endif()
add_llvm_definitions( -D__STDC_CONSTANT_MACROS )
add_llvm_definitions( -D__STDC_FORMAT_MACROS )
add_llvm_definitions( -D__STDC_LIMIT_MACROS )

View File

@ -4,11 +4,14 @@ function(get_system_libs return_var)
if( MINGW )
set(system_libs ${system_libs} imagehlp psapi)
elseif( CMAKE_HOST_UNIX )
if( HAVE_LIBRT )
set(system_libs ${system_libs} rt)
endif()
if( HAVE_LIBDL )
set(system_libs ${system_libs} ${CMAKE_DL_LIBS})
set(system_libs ${system_libs} ${CMAKE_DL_LIBS})
endif()
if( LLVM_ENABLE_THREADS AND HAVE_LIBPTHREAD )
set(system_libs ${system_libs} pthread)
set(system_libs ${system_libs} pthread)
endif()
endif( MINGW )
endif( NOT MSVC )

View File

@ -20,49 +20,51 @@ function(add_version_info_from_vcs VERS)
elseif( EXISTS ${CMAKE_CURRENT_SOURCE_DIR}/.git )
set(result "${result}git")
# Try to get a ref-id
find_program(git_executable NAMES git git.exe git.cmd)
if( git_executable )
set(is_git_svn_rev_exact false)
execute_process(COMMAND ${git_executable} svn log --limit=1 --oneline
WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}
TIMEOUT 5
RESULT_VARIABLE git_result
OUTPUT_VARIABLE git_output)
if( git_result EQUAL 0 )
string(REGEX MATCH r[0-9]+ git_svn_rev ${git_output})
string(LENGTH "${git_svn_rev}" rev_length)
math(EXPR rev_length "${rev_length}-1")
string(SUBSTRING "${git_svn_rev}" 1 ${rev_length} git_svn_rev_number)
set(SVN_REVISION ${git_svn_rev_number} PARENT_SCOPE)
set(git_svn_rev "-svn-${git_svn_rev}")
# Determine if the HEAD points directly at a subversion revision.
execute_process(COMMAND ${git_executable} svn find-rev HEAD
WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}
TIMEOUT 5
RESULT_VARIABLE git_result
OUTPUT_VARIABLE git_output)
if( EXISTS ${CMAKE_CURRENT_SOURCE_DIR}/.git/svn )
find_program(git_executable NAMES git git.exe git.cmd)
if( git_executable )
set(is_git_svn_rev_exact false)
execute_process(COMMAND ${git_executable} svn log --limit=1 --oneline
WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}
TIMEOUT 5
RESULT_VARIABLE git_result
OUTPUT_VARIABLE git_output)
if( git_result EQUAL 0 )
string(STRIP "${git_output}" git_head_svn_rev_number)
if( git_head_svn_rev_number EQUAL git_svn_rev_number )
set(is_git_svn_rev_exact true)
string(REGEX MATCH r[0-9]+ git_svn_rev ${git_output})
string(LENGTH "${git_svn_rev}" rev_length)
math(EXPR rev_length "${rev_length}-1")
string(SUBSTRING "${git_svn_rev}" 1 ${rev_length} git_svn_rev_number)
set(SVN_REVISION ${git_svn_rev_number} PARENT_SCOPE)
set(git_svn_rev "-svn-${git_svn_rev}")
# Determine if the HEAD points directly at a subversion revision.
execute_process(COMMAND ${git_executable} svn find-rev HEAD
WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}
TIMEOUT 5
RESULT_VARIABLE git_result
OUTPUT_VARIABLE git_output)
if( git_result EQUAL 0 )
string(STRIP "${git_output}" git_head_svn_rev_number)
if( git_head_svn_rev_number EQUAL git_svn_rev_number )
set(is_git_svn_rev_exact true)
endif()
endif()
else()
set(git_svn_rev "")
endif()
execute_process(COMMAND
${git_executable} rev-parse --short HEAD
WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}
TIMEOUT 5
RESULT_VARIABLE git_result
OUTPUT_VARIABLE git_output)
if( git_result EQUAL 0 AND NOT is_git_svn_rev_exact )
string(STRIP "${git_output}" git_ref_id)
set(GIT_COMMIT ${git_ref_id} PARENT_SCOPE)
set(result "${result}${git_svn_rev}-${git_ref_id}")
else()
set(result "${result}${git_svn_rev}")
endif()
else()
set(git_svn_rev "")
endif()
execute_process(COMMAND
${git_executable} rev-parse --short HEAD
WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}
TIMEOUT 5
RESULT_VARIABLE git_result
OUTPUT_VARIABLE git_output)
if( git_result EQUAL 0 AND NOT is_git_svn_rev_exact )
string(STRIP "${git_output}" git_ref_id)
set(GIT_COMMIT ${git_ref_id} PARENT_SCOPE)
set(result "${result}${git_svn_rev}-${git_ref_id}")
else()
set(result "${result}${git_svn_rev}")
endif()
endif()
endif()

957
configure vendored

File diff suppressed because it is too large Load Diff

View File

@ -1,5 +1,3 @@
.. _alias_analysis:
==================================
LLVM Alias Analysis Infrastructure
==================================
@ -205,7 +203,7 @@ look at the `various alias analysis implementations`_ included with LLVM.
Different Pass styles
---------------------
The first step to determining what type of `LLVM pass <WritingAnLLVMPass.html>`_
The first step to determining what type of :doc:`LLVM pass <WritingAnLLVMPass>`
you need to use for your Alias Analysis. As is the case with most other
analyses and transformations, the answer should be fairly obvious from what type
of problem you are trying to solve:
@ -253,25 +251,24 @@ Interfaces which may be specified
All of the `AliasAnalysis
<http://llvm.org/doxygen/classllvm_1_1AliasAnalysis.html>`__ virtual methods
default to providing `chaining`_ to another alias analysis implementation, which
ends up returning conservatively correct information (returning "May" Alias and
"Mod/Ref" for alias and mod/ref queries respectively). Depending on the
capabilities of the analysis you are implementing, you just override the
interfaces you can improve.
default to providing :ref:`chaining <aliasanalysis-chaining>` to another alias
analysis implementation, which ends up returning conservatively correct
information (returning "May" Alias and "Mod/Ref" for alias and mod/ref queries
respectively). Depending on the capabilities of the analysis you are
implementing, you just override the interfaces you can improve.
.. _chaining:
.. _chain:
.. _aliasanalysis-chaining:
``AliasAnalysis`` chaining behavior
-----------------------------------
With only one special exception (the `no-aa`_ pass) every alias analysis pass
chains to another alias analysis implementation (for example, the user can
specify "``-basicaa -ds-aa -licm``" to get the maximum benefit from both alias
analyses). The alias analysis class automatically takes care of most of this
for methods that you don't override. For methods that you do override, in code
paths that return a conservative MayAlias or Mod/Ref result, simply return
whatever the superclass computes. For example:
With only one special exception (the :ref:`-no-aa <aliasanalysis-no-aa>` pass)
every alias analysis pass chains to another alias analysis implementation (for
example, the user can specify "``-basicaa -ds-aa -licm``" to get the maximum
benefit from both alias analyses). The alias analysis class automatically
takes care of most of this for methods that you don't override. For methods
that you do override, in code paths that return a conservative MayAlias or
Mod/Ref result, simply return whatever the superclass computes. For example:
.. code-block:: c++
@ -504,11 +501,11 @@ Available ``AliasAnalysis`` implementations
-------------------------------------------
This section lists the various implementations of the ``AliasAnalysis``
interface. With the exception of the `-no-aa`_ implementation, all of these
`chain`_ to other alias analysis implementations.
interface. With the exception of the :ref:`-no-aa <aliasanalysis-no-aa>`
implementation, all of these :ref:`chain <aliasanalysis-chaining>` to other
alias analysis implementations.
.. _no-aa:
.. _-no-aa:
.. _aliasanalysis-no-aa:
The ``-no-aa`` pass
^^^^^^^^^^^^^^^^^^^

View File

@ -1,5 +1,3 @@
.. _atomics:
==============================================
LLVM Atomic Instructions and Concurrency Guide
==============================================

View File

@ -1,5 +1,3 @@
.. _bitcode_format:
.. role:: raw-html(raw)
:format: html
@ -54,8 +52,8 @@ structure. This structure consists of the following concepts:
* Abbreviations, which specify compression optimizations for the file.
Note that the `llvm-bcanalyzer <CommandGuide/html/llvm-bcanalyzer.html>`_ tool
can be used to dump and inspect arbitrary bitstreams, which is very useful for
Note that the :doc:`llvm-bcanalyzer <CommandGuide/llvm-bcanalyzer>` tool can be
used to dump and inspect arbitrary bitstreams, which is very useful for
understanding the encoding.
.. _magic number:

View File

@ -1,5 +1,3 @@
.. _branch_weight:
===========================
LLVM Branch Weight Metadata
===========================
@ -27,8 +25,8 @@ Supported Instructions
``BranchInst``
^^^^^^^^^^^^^^
Metadata is only assign to the conditional branches. There are two extra
operarands, for the true and the false branch.
Metadata is only assigned to the conditional branches. There are two extra
operarands for the true and the false branch.
.. code-block:: llvm
@ -41,8 +39,8 @@ operarands, for the true and the false branch.
``SwitchInst``
^^^^^^^^^^^^^^
Branch weights are assign to every case (including ``default`` case which is
always case #0).
Branch weights are assigned to every case (including the ``default`` case which
is always case #0).
.. code-block:: llvm
@ -55,7 +53,7 @@ always case #0).
``IndirectBrInst``
^^^^^^^^^^^^^^^^^^
Branch weights are assign to every destination.
Branch weights are assigned to every destination.
.. code-block:: llvm

View File

@ -1,5 +1,3 @@
.. _bugpoint:
====================================
LLVM bugpoint tool: design and usage
====================================
@ -136,9 +134,9 @@ non-obvious ways. Here are some hints and tips:
It is often useful to capture the output of the program to file. For example,
in the C shell, you can run:
.. code-block:: bash
.. code-block:: console
bugpoint ... |& tee bugpoint.log
$ bugpoint ... |& tee bugpoint.log
to get a copy of ``bugpoint``'s output in the file ``bugpoint.log``, as well
as on your terminal.

View File

@ -1,5 +1,3 @@
.. _building-with-cmake:
========================
Building LLVM with CMake
========================
@ -36,7 +34,7 @@ We use here the command-line, non-interactive CMake interface.
#. Create a directory for containing the build. It is not supported to build
LLVM on the source directory. cd to this directory:
.. code-block:: bash
.. code-block:: console
$ mkdir mybuilddir
$ cd mybuilddir
@ -44,7 +42,7 @@ We use here the command-line, non-interactive CMake interface.
#. Execute this command on the shell replacing `path/to/llvm/source/root` with
the path to the root of your LLVM source tree:
.. code-block:: bash
.. code-block:: console
$ cmake path/to/llvm/source/root
@ -80,14 +78,14 @@ the corresponding *Generator* for creating files for your build tool. You can
explicitly specify the generator with the command line option ``-G "Name of the
generator"``. For knowing the available generators on your platform, execute
.. code-block:: bash
.. code-block:: console
$ cmake --help
This will list the generator's names at the end of the help text. Generator's
names are case-sensitive. Example:
.. code-block:: bash
.. code-block:: console
$ cmake -G "Visual Studio 9 2008" path/to/llvm/source/root
@ -110,14 +108,14 @@ Variables customize how the build will be generated. Options are boolean
variables, with possible values ON/OFF. Options and variables are defined on the
CMake command line like this:
.. code-block:: bash
.. code-block:: console
$ cmake -DVARIABLE=value path/to/llvm/source
You can set a variable after the initial CMake invocation for changing its
value. You can also undefine a variable:
.. code-block:: bash
.. code-block:: console
$ cmake -UVARIABLE path/to/llvm/source
@ -127,7 +125,7 @@ on the root of the build directory. Do not hand-edit it.
Variables are listed here appending its type after a colon. It is correct to
write the variable and the type on the CMake command line:
.. code-block:: bash
.. code-block:: console
$ cmake -DVARIABLE:TYPE=value path/to/llvm/source
@ -206,7 +204,7 @@ LLVM-specific variables
tests.
**LLVM_APPEND_VC_REV**:BOOL
Append version control revision info (svn revision number or git revision id)
Append version control revision info (svn revision number or Git revision id)
to LLVM version string (stored in the PACKAGE_VERSION macro). For this to work
cmake must be invoked before the build. Defaults to OFF.
@ -280,7 +278,7 @@ Testing is performed when the *check* target is built. For instance, if you are
using makefiles, execute this command while on the top level of your build
directory:
.. code-block:: bash
.. code-block:: console
$ make check
@ -355,13 +353,15 @@ an equivalent variant of snippet shown above:
target_link_libraries(mycompiler ${REQ_LLVM_LIBRARIES})
.. _cmake-out-of-source-pass:
Developing LLVM pass out of source
----------------------------------
It is possible to develop LLVM passes against installed LLVM. An example of
project layout provided below:
.. code-block:: bash
.. code-block:: none
<project dir>/
|

View File

@ -1,5 +1,3 @@
.. _code_generator:
==========================================
The LLVM Target-Independent Code Generator
==========================================
@ -17,6 +15,8 @@ The LLVM Target-Independent Code Generator
.partial { background-color: #F88017 }
.yes { background-color: #0F0; }
.yes:before { content: "Y" }
.na { background-color: #6666FF; }
.na:before { content: "N/A" }
</style>
.. contents::
@ -172,7 +172,7 @@ architecture. These target descriptions often have a large amount of common
information (e.g., an ``add`` instruction is almost identical to a ``sub``
instruction). In order to allow the maximum amount of commonality to be
factored out, the LLVM code generator uses the
`TableGen <TableGenFundamentals.html>`_ tool to describe big chunks of the
:doc:`TableGen <TableGenFundamentals>` tool to describe big chunks of the
target machine, which allows the use of domain-specific and target-specific
abstractions to reduce the amount of repetition.
@ -230,7 +230,7 @@ for structures, the alignment requirements for various data types, the size of
pointers in the target, and whether the target is little-endian or
big-endian.
.. _targetlowering:
.. _TargetLowering:
The ``TargetLowering`` class
----------------------------
@ -250,6 +250,8 @@ operations. Among other things, this class indicates:
* various high-level characteristics, like whether it is profitable to turn
division by a constant into a multiplication sequence.
.. _TargetRegisterInfo:
The ``TargetRegisterInfo`` class
--------------------------------
@ -283,12 +285,10 @@ The ``TargetInstrInfo`` class
-----------------------------
The ``TargetInstrInfo`` class is used to describe the machine instructions
supported by the target. It is essentially an array of ``TargetInstrDescriptor``
objects, each of which describes one instruction the target
supports. Descriptors define things like the mnemonic for the opcode, the number
of operands, the list of implicit register uses and defs, whether the
instruction has certain target-independent properties (accesses memory, is
commutable, etc), and holds any target-specific flags.
supported by the target. Descriptions define things like the mnemonic for
the opcode, the number of operands, the list of implicit register uses and defs,
whether the instruction has certain target-independent properties (accesses
memory, is commutable, etc), and holds any target-specific flags.
The ``TargetFrameInfo`` class
-----------------------------
@ -771,6 +771,8 @@ value of type i1, i8, i16, or i64 would be illegal, as would a DAG that uses a
SREM or UREM operation. The `legalize types`_ and `legalize operations`_ phases
are responsible for turning an illegal DAG into a legal DAG.
.. _SelectionDAG-Process:
SelectionDAG Instruction Selection Process
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
@ -874,7 +876,7 @@ found, the elements are converted to scalars ("scalarizing").
A target implementation tells the legalizer which types are supported (and which
register class to use for them) by calling the ``addRegisterClass`` method in
its TargetLowering constructor.
its ``TargetLowering`` constructor.
.. _legalize operations:
.. _Legalizer:
@ -968,7 +970,8 @@ The ``FADDS`` instruction is a simple binary single-precision add instruction.
To perform this pattern match, the PowerPC backend includes the following
instruction definitions:
::
.. code-block:: text
:emphasize-lines: 4-5,9
def FMADDS : AForm_1<59, 29,
(ops F4RC:$FRT, F4RC:$FRA, F4RC:$FRC, F4RC:$FRB),
@ -980,10 +983,10 @@ instruction definitions:
"fadds $FRT, $FRA, $FRB",
[(set F4RC:$FRT, (fadd F4RC:$FRA, F4RC:$FRB))]>;
The portion of the instruction definition in bold indicates the pattern used to
match the instruction. The DAG operators (like ``fmul``/``fadd``) are defined
in the ``include/llvm/Target/TargetSelectionDAG.td`` file. " ``F4RC``" is the
register class of the input and result values.
The highlighted portion of the instruction definitions indicates the pattern
used to match the instructions. The DAG operators (like ``fmul``/``fadd``)
are defined in the ``include/llvm/Target/TargetSelectionDAG.td`` file.
"``F4RC``" is the register class of the input and result values.
The TableGen DAG instruction selector generator reads the instruction patterns
in the ``.td`` file and automatically builds parts of the pattern matching code
@ -1035,6 +1038,24 @@ for your target. It has the following strengths:
are used to manipulate the input immediate (in this case, take the high or low
16-bits of the immediate).
* When using the 'Pat' class to map a pattern to an instruction that has one
or more complex operands (like e.g. `X86 addressing mode`_), the pattern may
either specify the operand as a whole using a ``ComplexPattern``, or else it
may specify the components of the complex operand separately. The latter is
done e.g. for pre-increment instructions by the PowerPC back end:
::
def STWU : DForm_1<37, (outs ptr_rc:$ea_res), (ins GPRC:$rS, memri:$dst),
"stwu $rS, $dst", LdStStoreUpd, []>,
RegConstraint<"$dst.reg = $ea_res">, NoEncode<"$ea_res">;
def : Pat<(pre_store GPRC:$rS, ptr_rc:$ptrreg, iaddroff:$ptroff),
(STWU GPRC:$rS, iaddroff:$ptroff, ptr_rc:$ptrreg)>;
Here, the pair of ``ptroff`` and ``ptrreg`` operands is matched onto the
complex operand ``dst`` of class ``memri`` in the ``STWU`` instruction.
* While the system does automate a lot, it still allows you to write custom C++
code to match special cases if there is something that is hard to
express.
@ -1727,6 +1748,8 @@ This section of the document explains features or design decisions that are
specific to the code generator for a particular target. First we start with a
table that summarizes what features are supported by each target.
.. _target-feature-matrix:
Target Feature Matrix
---------------------
@ -1741,12 +1764,14 @@ the key:
:raw-html:`<table border="1" cellspacing="0">`
:raw-html:`<tr>`
:raw-html:`<th>Unknown</th>`
:raw-html:`<th>Not Applicable</th>`
:raw-html:`<th>No support</th>`
:raw-html:`<th>Partial Support</th>`
:raw-html:`<th>Complete Support</th>`
:raw-html:`</tr>`
:raw-html:`<tr>`
:raw-html:`<td class="unknown"></td>`
:raw-html:`<td class="na"></td>`
:raw-html:`<td class="no"></td>`
:raw-html:`<td class="partial"></td>`
:raw-html:`<td class="yes"></td>`
@ -1762,12 +1787,11 @@ Here is the table:
:raw-html:`<tr>`
:raw-html:`<th>Feature</th>`
:raw-html:`<th>ARM</th>`
:raw-html:`<th>CellSPU</th>`
:raw-html:`<th>Hexagon</th>`
:raw-html:`<th>MBlaze</th>`
:raw-html:`<th>MSP430</th>`
:raw-html:`<th>Mips</th>`
:raw-html:`<th>PTX</th>`
:raw-html:`<th>NVPTX</th>`
:raw-html:`<th>PowerPC</th>`
:raw-html:`<th>Sparc</th>`
:raw-html:`<th>X86</th>`
@ -1777,12 +1801,11 @@ Here is the table:
:raw-html:`<tr>`
:raw-html:`<td><a href="#feat_reliable">is generally reliable</a></td>`
:raw-html:`<td class="yes"></td> <!-- ARM -->`
:raw-html:`<td class="no"></td> <!-- CellSPU -->`
:raw-html:`<td class="yes"></td> <!-- Hexagon -->`
:raw-html:`<td class="no"></td> <!-- MBlaze -->`
:raw-html:`<td class="unknown"></td> <!-- MSP430 -->`
:raw-html:`<td class="yes"></td> <!-- Mips -->`
:raw-html:`<td class="no"></td> <!-- PTX -->`
:raw-html:`<td class="yes"></td> <!-- NVPTX -->`
:raw-html:`<td class="yes"></td> <!-- PowerPC -->`
:raw-html:`<td class="yes"></td> <!-- Sparc -->`
:raw-html:`<td class="yes"></td> <!-- X86 -->`
@ -1792,12 +1815,11 @@ Here is the table:
:raw-html:`<tr>`
:raw-html:`<td><a href="#feat_asmparser">assembly parser</a></td>`
:raw-html:`<td class="no"></td> <!-- ARM -->`
:raw-html:`<td class="no"></td> <!-- CellSPU -->`
:raw-html:`<td class="no"></td> <!-- Hexagon -->`
:raw-html:`<td class="yes"></td> <!-- MBlaze -->`
:raw-html:`<td class="no"></td> <!-- MSP430 -->`
:raw-html:`<td class="no"></td> <!-- Mips -->`
:raw-html:`<td class="no"></td> <!-- PTX -->`
:raw-html:`<td class="no"></td> <!-- NVPTX -->`
:raw-html:`<td class="no"></td> <!-- PowerPC -->`
:raw-html:`<td class="no"></td> <!-- Sparc -->`
:raw-html:`<td class="yes"></td> <!-- X86 -->`
@ -1807,12 +1829,11 @@ Here is the table:
:raw-html:`<tr>`
:raw-html:`<td><a href="#feat_disassembler">disassembler</a></td>`
:raw-html:`<td class="yes"></td> <!-- ARM -->`
:raw-html:`<td class="no"></td> <!-- CellSPU -->`
:raw-html:`<td class="no"></td> <!-- Hexagon -->`
:raw-html:`<td class="yes"></td> <!-- MBlaze -->`
:raw-html:`<td class="no"></td> <!-- MSP430 -->`
:raw-html:`<td class="no"></td> <!-- Mips -->`
:raw-html:`<td class="no"></td> <!-- PTX -->`
:raw-html:`<td class="na"></td> <!-- NVPTX -->`
:raw-html:`<td class="no"></td> <!-- PowerPC -->`
:raw-html:`<td class="no"></td> <!-- Sparc -->`
:raw-html:`<td class="yes"></td> <!-- X86 -->`
@ -1822,12 +1843,11 @@ Here is the table:
:raw-html:`<tr>`
:raw-html:`<td><a href="#feat_inlineasm">inline asm</a></td>`
:raw-html:`<td class="yes"></td> <!-- ARM -->`
:raw-html:`<td class="no"></td> <!-- CellSPU -->`
:raw-html:`<td class="yes"></td> <!-- Hexagon -->`
:raw-html:`<td class="yes"></td> <!-- MBlaze -->`
:raw-html:`<td class="unknown"></td> <!-- MSP430 -->`
:raw-html:`<td class="no"></td> <!-- Mips -->`
:raw-html:`<td class="unknown"></td> <!-- PTX -->`
:raw-html:`<td class="yes"></td> <!-- NVPTX -->`
:raw-html:`<td class="yes"></td> <!-- PowerPC -->`
:raw-html:`<td class="unknown"></td> <!-- Sparc -->`
:raw-html:`<td class="yes"></td> <!-- X86 -->`
@ -1837,12 +1857,11 @@ Here is the table:
:raw-html:`<tr>`
:raw-html:`<td><a href="#feat_jit">jit</a></td>`
:raw-html:`<td class="partial"><a href="#feat_jit_arm">*</a></td> <!-- ARM -->`
:raw-html:`<td class="no"></td> <!-- CellSPU -->`
:raw-html:`<td class="no"></td> <!-- Hexagon -->`
:raw-html:`<td class="no"></td> <!-- MBlaze -->`
:raw-html:`<td class="unknown"></td> <!-- MSP430 -->`
:raw-html:`<td class="yes"></td> <!-- Mips -->`
:raw-html:`<td class="unknown"></td> <!-- PTX -->`
:raw-html:`<td class="na"></td> <!-- NVPTX -->`
:raw-html:`<td class="yes"></td> <!-- PowerPC -->`
:raw-html:`<td class="unknown"></td> <!-- Sparc -->`
:raw-html:`<td class="yes"></td> <!-- X86 -->`
@ -1852,12 +1871,11 @@ Here is the table:
:raw-html:`<tr>`
:raw-html:`<td><a href="#feat_objectwrite">.o&nbsp;file writing</a></td>`
:raw-html:`<td class="no"></td> <!-- ARM -->`
:raw-html:`<td class="no"></td> <!-- CellSPU -->`
:raw-html:`<td class="no"></td> <!-- Hexagon -->`
:raw-html:`<td class="yes"></td> <!-- MBlaze -->`
:raw-html:`<td class="no"></td> <!-- MSP430 -->`
:raw-html:`<td class="no"></td> <!-- Mips -->`
:raw-html:`<td class="no"></td> <!-- PTX -->`
:raw-html:`<td class="na"></td> <!-- NVPTX -->`
:raw-html:`<td class="no"></td> <!-- PowerPC -->`
:raw-html:`<td class="no"></td> <!-- Sparc -->`
:raw-html:`<td class="yes"></td> <!-- X86 -->`
@ -1867,12 +1885,11 @@ Here is the table:
:raw-html:`<tr>`
:raw-html:`<td><a hr:raw-html:`ef="#feat_tailcall">tail calls</a></td>`
:raw-html:`<td class="yes"></td> <!-- ARM -->`
:raw-html:`<td class="no"></td> <!-- CellSPU -->`
:raw-html:`<td class="yes"></td> <!-- Hexagon -->`
:raw-html:`<td class="no"></td> <!-- MBlaze -->`
:raw-html:`<td class="unknown"></td> <!-- MSP430 -->`
:raw-html:`<td class="no"></td> <!-- Mips -->`
:raw-html:`<td class="unknown"></td> <!-- PTX -->`
:raw-html:`<td class="no"></td> <!-- NVPTX -->`
:raw-html:`<td class="yes"></td> <!-- PowerPC -->`
:raw-html:`<td class="unknown"></td> <!-- Sparc -->`
:raw-html:`<td class="yes"></td> <!-- X86 -->`
@ -1882,12 +1899,11 @@ Here is the table:
:raw-html:`<tr>`
:raw-html:`<td><a href="#feat_segstacks">segmented stacks</a></td>`
:raw-html:`<td class="no"></td> <!-- ARM -->`
:raw-html:`<td class="no"></td> <!-- CellSPU -->`
:raw-html:`<td class="no"></td> <!-- Hexagon -->`
:raw-html:`<td class="no"></td> <!-- MBlaze -->`
:raw-html:`<td class="no"></td> <!-- MSP430 -->`
:raw-html:`<td class="no"></td> <!-- Mips -->`
:raw-html:`<td class="no"></td> <!-- PTX -->`
:raw-html:`<td class="no"></td> <!-- NVPTX -->`
:raw-html:`<td class="no"></td> <!-- PowerPC -->`
:raw-html:`<td class="no"></td> <!-- Sparc -->`
:raw-html:`<td class="partial"><a href="#feat_segstacks_x86">*</a></td> <!-- X86 -->`
@ -1991,8 +2007,8 @@ Tail call optimization
Tail call optimization, callee reusing the stack of the caller, is currently
supported on x86/x86-64 and PowerPC. It is performed if:
* Caller and callee have the calling convention ``fastcc`` or ``cc 10`` (GHC
call convention).
* Caller and callee have the calling convention ``fastcc``, ``cc 10`` (GHC
calling convention) or ``cc 11`` (HiPE calling convention).
* The call is a tail call - in tail position (ret immediately follows call and
ret uses value of call or is void).
@ -2369,17 +2385,17 @@ Dynamic Allocation
TODO - More to come.
The PTX backend
---------------
The NVPTX backend
-----------------
The PTX code generator lives in the lib/Target/PTX directory. It is currently a
work-in-progress, but already supports most of the code generation functionality
needed to generate correct PTX kernels for CUDA devices.
The NVPTX code generator under lib/Target/NVPTX is an open-source version of
the NVIDIA NVPTX code generator for LLVM. It is contributed by NVIDIA and is
a port of the code generator used in the CUDA compiler (nvcc). It targets the
PTX 3.0/3.1 ISA and can target any compute capability greater than or equal to
2.0 (Fermi).
The code generator can target PTX 2.0+, and shader model 1.0+. The PTX ISA
Reference Manual is used as the primary source of ISA information, though an
effort is made to make the output of the code generator match the output of the
NVidia nvcc compiler, whenever possible.
This target is of production quality and should be completely compatible with
the official NVIDIA toolchain.
Code Generator Options:
@ -2389,39 +2405,28 @@ Code Generator Options:
:raw-html:`<th>Description</th>`
:raw-html:`</tr>`
:raw-html:`<tr>`
:raw-html:`<td>``double``</td>`
:raw-html:`<td align="left">If enabled, the map_f64_to_f32 directive is disabled in the PTX output, allowing native double-precision arithmetic</td>`
:raw-html:`<td>sm_20</td>`
:raw-html:`<td align="left">Set shader model/compute capability to 2.0</td>`
:raw-html:`</tr>`
:raw-html:`<tr>`
:raw-html:`<td>``no-fma``</td>`
:raw-html:`<td align="left">Disable generation of Fused-Multiply Add instructions, which may be beneficial for some devices</td>`
:raw-html:`<td>sm_21</td>`
:raw-html:`<td align="left">Set shader model/compute capability to 2.1</td>`
:raw-html:`</tr>`
:raw-html:`<tr>`
:raw-html:`<td>``smxy / computexy``</td>`
:raw-html:`<td align="left">Set shader model/compute capability to x.y, e.g. sm20 or compute13</td>`
:raw-html:`<td>sm_30</td>`
:raw-html:`<td align="left">Set shader model/compute capability to 3.0</td>`
:raw-html:`</tr>`
:raw-html:`<tr>`
:raw-html:`<td>sm_35</td>`
:raw-html:`<td align="left">Set shader model/compute capability to 3.5</td>`
:raw-html:`</tr>`
:raw-html:`<tr>`
:raw-html:`<td>ptx30</td>`
:raw-html:`<td align="left">Target PTX 3.0</td>`
:raw-html:`</tr>`
:raw-html:`<tr>`
:raw-html:`<td>ptx31</td>`
:raw-html:`<td align="left">Target PTX 3.1</td>`
:raw-html:`</tr>`
:raw-html:`</table>`
Working:
* Arithmetic instruction selection (including combo FMA)
* Bitwise instruction selection
* Control-flow instruction selection
* Function calls (only on SM 2.0+ and no return arguments)
* Addresses spaces (0 = global, 1 = constant, 2 = local, 4 = shared)
* Thread synchronization (bar.sync)
* Special register reads ([N]TID, [N]CTAID, PMx, CLOCK, etc.)
In Progress:
* Robust call instruction selection
* Stack frame allocation
* Device-specific instruction scheduling optimizations

View File

@ -1,5 +1,3 @@
.. _coding_standards:
=====================
LLVM Coding Standards
=====================
@ -284,17 +282,10 @@ listed. We prefer these ``#include``\s to be listed in this order:
#. Main Module Header
#. Local/Private Headers
#. ``llvm/*``
#. ``llvm/Analysis/*``
#. ``llvm/Assembly/*``
#. ``llvm/Bitcode/*``
#. ``llvm/CodeGen/*``
#. ...
#. ``llvm/Support/*``
#. ``llvm/Config/*``
#. ``llvm/...``
#. System ``#include``\s
and each category should be sorted by name.
and each category should be sorted lexicographically by the full path.
The `Main Module Header`_ file applies to ``.cpp`` files which implement an
interface defined by a ``.h`` file. This ``#include`` should always be included
@ -409,7 +400,8 @@ code.
That said, LLVM does make extensive use of a hand-rolled form of RTTI that use
templates like `isa<>, cast<>, and dyn_cast<> <ProgrammersManual.html#isa>`_.
This form of RTTI is opt-in and can be added to any class. It is also
This form of RTTI is opt-in and can be
:doc:`added to any class <HowToSetUpLLVMStyleRTTI>`. It is also
substantially more efficient than ``dynamic_cast<>``.
.. _static constructor:
@ -713,8 +705,8 @@ sort of thing is:
.. code-block:: c++
bool FoundFoo = false;
for (unsigned i = 0, e = BarList.size(); i != e; ++i)
if (BarList[i]->isFoo()) {
for (unsigned I = 0, E = BarList.size(); I != E; ++I)
if (BarList[I]->isFoo()) {
FoundFoo = true;
break;
}
@ -732,8 +724,8 @@ code to be structured like this:
/// \returns true if the specified list has an element that is a foo.
static bool containsFoo(const std::vector<Bar*> &List) {
for (unsigned i = 0, e = List.size(); i != e; ++i)
if (List[i]->isFoo())
for (unsigned I = 0, E = List.size(); I != E; ++I)
if (List[I]->isFoo())
return true;
return false;
}
@ -820,8 +812,8 @@ Here are some examples of good and bad names:
Vehicle MakeVehicle(VehicleType Type) {
VehicleMaker M; // Might be OK if having a short life-span.
Tire tmp1 = M.makeTire(); // Bad -- 'tmp1' provides no information.
Light headlight = M.makeLight("head"); // Good -- descriptive.
Tire Tmp1 = M.makeTire(); // Bad -- 'Tmp1' provides no information.
Light Headlight = M.makeLight("head"); // Good -- descriptive.
...
}
@ -841,9 +833,9 @@ enforced, and hopefully what to do about it. Here is one complete example:
.. code-block:: c++
inline Value *getOperand(unsigned i) {
assert(i < Operands.size() && "getOperand() out of range!");
return Operands[i];
inline Value *getOperand(unsigned I) {
assert(I < Operands.size() && "getOperand() out of range!");
return Operands[I];
}
Here are more examples:
@ -1035,7 +1027,7 @@ form has two problems. First it may be less efficient than evaluating it at the
start of the loop. In this case, the cost is probably minor --- a few extra
loads every time through the loop. However, if the base expression is more
complex, then the cost can rise quickly. I've seen loops where the end
expression was actually something like: "``SomeMap[x]->end()``" and map lookups
expression was actually something like: "``SomeMap[X]->end()``" and map lookups
really aren't cheap. By writing it in the second form consistently, you
eliminate the issue entirely and don't even have to think about it.
@ -1096,6 +1088,34 @@ flushes the output stream. In other words, these are equivalent:
Most of the time, you probably have no reason to flush the output stream, so
it's better to use a literal ``'\n'``.
Don't use ``inline`` when defining a function in a class definition
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
A member function defined in a class definition is implicitly inline, so don't
put the ``inline`` keyword in this case.
Don't:
.. code-block:: c++
class Foo {
public:
inline void bar() {
// ...
}
};
Do:
.. code-block:: c++
class Foo {
public:
void bar() {
// ...
}
};
Microscopic Details
-------------------
@ -1111,27 +1131,27 @@ macros. For example, this is good:
.. code-block:: c++
if (x) ...
for (i = 0; i != 100; ++i) ...
while (llvm_rocks) ...
if (X) ...
for (I = 0; I != 100; ++I) ...
while (LLVMRocks) ...
somefunc(42);
assert(3 != 4 && "laws of math are failing me");
a = foo(42, 92) + bar(x);
A = foo(42, 92) + bar(X);
and this is bad:
.. code-block:: c++
if(x) ...
for(i = 0; i != 100; ++i) ...
while(llvm_rocks) ...
if(X) ...
for(I = 0; I != 100; ++I) ...
while(LLVMRocks) ...
somefunc (42);
assert (3 != 4 && "laws of math are failing me");
a = foo (42, 92) + bar (x);
A = foo (42, 92) + bar (X);
The reason for doing this is not completely arbitrary. This style makes control
flow operators stand out more, and makes expressions flow better. The function
@ -1139,11 +1159,11 @@ call operator binds very tightly as a postfix operator. Putting a space after a
function name (as in the last example) makes it appear that the code might bind
the arguments of the left-hand-side of a binary operator with the argument list
of a function and the name of the right side. More specifically, it is easy to
misread the "``a``" example as:
misread the "``A``" example as:
.. code-block:: c++
a = foo ((42, 92) + bar) (x);
A = foo ((42, 92) + bar) (X);
when skimming through the code. By avoiding a space in a function, we avoid
this misinterpretation.
@ -1310,7 +1330,7 @@ namespace just because it was declared there.
See Also
========
A lot of these comments and recommendations have been culled for other sources.
A lot of these comments and recommendations have been culled from other sources.
Two particularly important books for our work are:
#. `Effective C++

View File

@ -1,99 +1,79 @@
FileCheck - Flexible pattern matching file verifier
===================================================
SYNOPSIS
--------
**FileCheck** *match-filename* [*--check-prefix=XXX*] [*--strict-whitespace*]
:program:`FileCheck` *match-filename* [*--check-prefix=XXX*] [*--strict-whitespace*]
DESCRIPTION
-----------
:program:`FileCheck` reads two files (one from standard input, and one
specified on the command line) and uses one to verify the other. This
behavior is particularly useful for the testsuite, which wants to verify that
the output of some tool (e.g. :program:`llc`) contains the expected information
(for example, a movsd from esp or whatever is interesting). This is similar to
using :program:`grep`, but it is optimized for matching multiple different
inputs in one file in a specific order.
**FileCheck** reads two files (one from standard input, and one specified on the
command line) and uses one to verify the other. This behavior is particularly
useful for the testsuite, which wants to verify that the output of some tool
(e.g. llc) contains the expected information (for example, a movsd from esp or
whatever is interesting). This is similar to using grep, but it is optimized
for matching multiple different inputs in one file in a specific order.
The *match-filename* file specifies the file that contains the patterns to
The ``match-filename`` file specifies the file that contains the patterns to
match. The file to verify is always read from standard input.
OPTIONS
-------
**-help**
.. option:: -help
Print a summary of command line options.
.. option:: --check-prefix prefix
FileCheck searches the contents of ``match-filename`` for patterns to match.
By default, these patterns are prefixed with "``CHECK:``". If you'd like to
use a different prefix (e.g. because the same input file is checking multiple
different tool or options), the :option:`--check-prefix` argument allows you
to specify a specific prefix to match.
**--check-prefix** *prefix*
FileCheck searches the contents of *match-filename* for patterns to match. By
default, these patterns are prefixed with "CHECK:". If you'd like to use a
different prefix (e.g. because the same input file is checking multiple
different tool or options), the **--check-prefix** argument allows you to specify
a specific prefix to match.
**--input-file** *filename*
.. option:: --input-file filename
File to check (defaults to stdin).
**--strict-whitespace**
.. option:: --strict-whitespace
By default, FileCheck canonicalizes input horizontal whitespace (spaces and
tabs) which causes it to ignore these differences (a space will match a tab).
The --strict-whitespace argument disables this behavior.
The :option:`--strict-whitespace` argument disables this behavior. End-of-line
sequences are canonicalized to UNIX-style '\n' in all modes.
**-version**
.. option:: -version
Show the version number of this program.
EXIT STATUS
-----------
If **FileCheck** verifies that the file matches the expected contents, it exits
with 0. Otherwise, if not, or if an error occurs, it will exit with a non-zero
value.
If :program:`FileCheck` verifies that the file matches the expected contents,
it exits with 0. Otherwise, if not, or if an error occurs, it will exit with a
non-zero value.
TUTORIAL
--------
FileCheck is typically used from LLVM regression tests, being invoked on the RUN
line of the test. A simple example of using FileCheck from a RUN line looks
like this:
.. code-block:: llvm
; RUN: llvm-as < %s | llc -march=x86-64 | FileCheck %s
This syntax says to pipe the current file ("%s") into llvm-as, pipe that into
llc, then pipe the output of llc into FileCheck. This means that FileCheck will
be verifying its standard input (the llc output) against the filename argument
specified (the original .ll file specified by "%s"). To see how this works,
let's look at the rest of the .ll file (after the RUN line):
This syntax says to pipe the current file ("``%s``") into ``llvm-as``, pipe
that into ``llc``, then pipe the output of ``llc`` into ``FileCheck``. This
means that FileCheck will be verifying its standard input (the llc output)
against the filename argument specified (the original ``.ll`` file specified by
"``%s``"). To see how this works, let's look at the rest of the ``.ll`` file
(after the RUN line):
.. code-block:: llvm
@ -113,32 +93,30 @@ let's look at the rest of the .ll file (after the RUN line):
ret void
}
Here you can see some "``CHECK:``" lines specified in comments. Now you can
see how the file is piped into ``llvm-as``, then ``llc``, and the machine code
output is what we are verifying. FileCheck checks the machine code output to
verify that it matches what the "``CHECK:``" lines specify.
Here you can see some "CHECK:" lines specified in comments. Now you can see
how the file is piped into llvm-as, then llc, and the machine code output is
what we are verifying. FileCheck checks the machine code output to verify that
it matches what the "CHECK:" lines specify.
The syntax of the CHECK: lines is very simple: they are fixed strings that
The syntax of the "``CHECK:``" lines is very simple: they are fixed strings that
must occur in order. FileCheck defaults to ignoring horizontal whitespace
differences (e.g. a space is allowed to match a tab) but otherwise, the contents
of the CHECK: line is required to match some thing in the test file exactly.
of the "``CHECK:``" line is required to match some thing in the test file exactly.
One nice thing about FileCheck (compared to grep) is that it allows merging
test cases together into logical groups. For example, because the test above
is checking for the "sub1:" and "inc4:" labels, it will not match unless there
is a "subl" in between those labels. If it existed somewhere else in the file,
that would not count: "grep subl" matches if subl exists anywhere in the
file.
is checking for the "``sub1:``" and "``inc4:``" labels, it will not match
unless there is a "``subl``" in between those labels. If it existed somewhere
else in the file, that would not count: "``grep subl``" matches if "``subl``"
exists anywhere in the file.
The FileCheck -check-prefix option
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
The FileCheck -check-prefix option allows multiple test configurations to be
driven from one .ll file. This is useful in many circumstances, for example,
testing different architectural variants with llc. Here's a simple example:
The FileCheck :option:`-check-prefix` option allows multiple test
configurations to be driven from one `.ll` file. This is useful in many
circumstances, for example, testing different architectural variants with
:program:`llc`. Here's a simple example:
.. code-block:: llvm
@ -157,21 +135,17 @@ testing different architectural variants with llc. Here's a simple example:
; X64: pinsrd $1, %edi, %xmm0
}
In this case, we're testing that we get the expected code generation with
both 32-bit and 64-bit code generation.
The "CHECK-NEXT:" directive
~~~~~~~~~~~~~~~~~~~~~~~~~~~
Sometimes you want to match lines and would like to verify that matches
happen on exactly consecutive lines with no other lines in between them. In
this case, you can use CHECK: and CHECK-NEXT: directives to specify this. If
you specified a custom check prefix, just use "<PREFIX>-NEXT:". For
example, something like this works as you'd expect:
this case, you can use "``CHECK:``" and "``CHECK-NEXT:``" directives to specify
this. If you specified a custom check prefix, just use "``<PREFIX>-NEXT:``".
For example, something like this works as you'd expect:
.. code-block:: llvm
@ -193,22 +167,18 @@ example, something like this works as you'd expect:
; CHECK-NEXT: ret
}
CHECK-NEXT: directives reject the input unless there is exactly one newline
between it an the previous directive. A CHECK-NEXT cannot be the first
directive in a file.
"``CHECK-NEXT:``" directives reject the input unless there is exactly one
newline between it and the previous directive. A "``CHECK-NEXT:``" cannot be
the first directive in a file.
The "CHECK-NOT:" directive
~~~~~~~~~~~~~~~~~~~~~~~~~~
The CHECK-NOT: directive is used to verify that a string doesn't occur
The "``CHECK-NOT:``" directive is used to verify that a string doesn't occur
between two matches (or before the first match, or after the last match). For
example, to verify that a load is removed by a transformation, a test like this
can be used:
.. code-block:: llvm
define i8 @coerce_offset0(i32 %V, i32* %P) {
@ -224,27 +194,22 @@ can be used:
; CHECK: ret i8
}
FileCheck Pattern Matching Syntax
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
The CHECK: and CHECK-NOT: directives both take a pattern to match. For most
uses of FileCheck, fixed string matching is perfectly sufficient. For some
things, a more flexible form of matching is desired. To support this, FileCheck
allows you to specify regular expressions in matching strings, surrounded by
double braces: **{{yourregex}}**. Because we want to use fixed string
matching for a majority of what we do, FileCheck has been designed to support
mixing and matching fixed string matching with regular expressions. This allows
you to write things like this:
The "``CHECK:``" and "``CHECK-NOT:``" directives both take a pattern to match.
For most uses of FileCheck, fixed string matching is perfectly sufficient. For
some things, a more flexible form of matching is desired. To support this,
FileCheck allows you to specify regular expressions in matching strings,
surrounded by double braces: ``{{yourregex}}``. Because we want to use fixed
string matching for a majority of what we do, FileCheck has been designed to
support mixing and matching fixed string matching with regular expressions.
This allows you to write things like this:
.. code-block:: llvm
; CHECK: movhpd {{[0-9]+}}(%esp), {{%xmm[0-7]}}
In this case, any offset from the ESP register will be allowed, and any xmm
register will be allowed.
@ -252,19 +217,16 @@ Because regular expressions are enclosed with double braces, they are
visually distinct, and you don't need to use escape characters within the double
braces like you would in C. In the rare case that you want to match double
braces explicitly from the input, you can use something ugly like
**{{[{][{]}}** as your pattern.
``{{[{][{]}}`` as your pattern.
FileCheck Variables
~~~~~~~~~~~~~~~~~~~
It is often useful to match a pattern and then verify that it occurs again
later in the file. For codegen tests, this can be useful to allow any register,
but verify that that register is used consistently later. To do this, FileCheck
allows named variables to be defined and substituted into patterns. Here is a
simple example:
but verify that that register is used consistently later. To do this,
:program:`FileCheck` allows named variables to be defined and substituted into
patterns. Here is a simple example:
.. code-block:: llvm
@ -272,19 +234,46 @@ simple example:
; CHECK: notw [[REGISTER:%[a-z]+]]
; CHECK: andw {{.*}}[[REGISTER]]
The first check line matches a regex ``%[a-z]+`` and captures it into the
variable ``REGISTER``. The second line verifies that whatever is in
``REGISTER`` occurs later in the file after an "``andw``". :program:`FileCheck`
variable references are always contained in ``[[ ]]`` pairs, and their names can
be formed with the regex ``[a-zA-Z][a-zA-Z0-9]*``. If a colon follows the name,
then it is a definition of the variable; otherwise, it is a use.
The first check line matches a regex (**%[a-z]+**) and captures it into
the variable "REGISTER". The second line verifies that whatever is in REGISTER
occurs later in the file after an "andw". FileCheck variable references are
always contained in **[[ ]]** pairs, and their names can be formed with the
regex **[a-zA-Z][a-zA-Z0-9]***. If a colon follows the name, then it is a
definition of the variable; otherwise, it is a use.
:program:`FileCheck` variables can be defined multiple times, and uses always
get the latest value. Variables can also be used later on the same line they
were defined on. For example:
.. code-block:: llvm
; CHECK: op [[REG:r[0-9]+]], [[REG]]
Can be useful if you want the operands of ``op`` to be the same register,
and don't care exactly which register it is.
FileCheck Expressions
~~~~~~~~~~~~~~~~~~~~~
Sometimes there's a need to verify output which refers line numbers of the
match file, e.g. when testing compiler diagnostics. This introduces a certain
fragility of the match file structure, as "``CHECK:``" lines contain absolute
line numbers in the same file, which have to be updated whenever line numbers
change due to text addition or deletion.
To support this case, FileCheck allows using ``[[@LINE]]``,
``[[@LINE+<offset>]]``, ``[[@LINE-<offset>]]`` expressions in patterns. These
expressions expand to a number of the line where a pattern is located (with an
optional integer offset).
This way match patterns can be put near the relevant test lines and include
relative line number references, for example:
.. code-block:: c++
// CHECK: test.cpp:[[@LINE+4]]:6: error: expected ';' after top level declarator
// CHECK-NEXT: {{^int a}}
// CHECK-NEXT: {{^ \^}}
// CHECK-NEXT: {{^ ;}}
int a
FileCheck variables can be defined multiple times, and uses always get the
latest value. Note that variables are all read at the start of a "CHECK" line
and are all defined at the end. This means that if you have something like
"**CHECK: [[XYZ:.\\*]]x[[XYZ]]**", the check line will read the previous
value of the XYZ variable and define a new one after the match is performed. If
you need to do something like this you can probably take advantage of the fact
that FileCheck is not actually line-oriented when it matches, this allows you to
define two separate CHECK lines that match on the same line.

View File

@ -1,19 +1,15 @@
bugpoint - automatic test case reduction tool
=============================================
SYNOPSIS
--------
**bugpoint** [*options*] [*input LLVM ll/bc files*] [*LLVM passes*] **--args**
*program arguments*
DESCRIPTION
-----------
**bugpoint** narrows down the source of problems in LLVM tools and passes. It
can be used to debug three types of failures: optimizer crashes, miscompilations
by optimizers, or bad native code generation (including problems in the static
@ -22,82 +18,61 @@ For more information on the design and inner workings of **bugpoint**, as well a
advice for using bugpoint, see *llvm/docs/Bugpoint.html* in the LLVM
distribution.
OPTIONS
-------
**--additional-so** *library*
Load the dynamic shared object *library* into the test program whenever it is
run. This is useful if you are debugging programs which depend on non-LLVM
libraries (such as the X or curses libraries) to run.
**--append-exit-code**\ =\ *{true,false}*
Append the test programs exit code to the output file so that a change in exit
code is considered a test failure. Defaults to false.
**--args** *program args*
Pass all arguments specified after -args to the test program whenever it runs.
Note that if any of the *program args* start with a '-', you should use:
Pass all arguments specified after **--args** to the test program whenever it runs.
Note that if any of the *program args* start with a "``-``", you should use:
.. code-block:: perl
.. code-block:: bash
bugpoint [bugpoint args] --args -- [program args]
The "--" right after the **--args** option tells **bugpoint** to consider any
options starting with ``-`` to be part of the **--args** option, not as options to
**bugpoint** itself.
The "``--``" right after the **--args** option tells **bugpoint** to consider
any options starting with "``-``" to be part of the **--args** option, not as
options to **bugpoint** itself.
**--tool-args** *tool args*
Pass all arguments specified after --tool-args to the LLVM tool under test
Pass all arguments specified after **--tool-args** to the LLVM tool under test
(**llc**, **lli**, etc.) whenever it runs. You should use this option in the
following way:
.. code-block:: perl
.. code-block:: bash
bugpoint [bugpoint args] --tool-args -- [tool args]
The "--" right after the **--tool-args** option tells **bugpoint** to consider any
options starting with ``-`` to be part of the **--tool-args** option, not as
options to **bugpoint** itself. (See **--args**, above.)
The "``--``" right after the **--tool-args** option tells **bugpoint** to
consider any options starting with "``-``" to be part of the **--tool-args**
option, not as options to **bugpoint** itself. (See **--args**, above.)
**--safe-tool-args** *tool args*
Pass all arguments specified after **--safe-tool-args** to the "safe" execution
tool.
**--gcc-tool-args** *gcc tool args*
Pass all arguments specified after **--gcc-tool-args** to the invocation of
**gcc**.
**--opt-args** *opt args*
Pass all arguments specified after **--opt-args** to the invocation of **opt**.
**--disable-{dce,simplifycfg}**
Do not run the specified passes to clean up and reduce the size of the test
@ -105,36 +80,26 @@ OPTIONS
reduce test programs. If you're trying to find a bug in one of these passes,
**bugpoint** may crash.
**--enable-valgrind**
Use valgrind to find faults in the optimization phase. This will allow
bugpoint to find otherwise asymptomatic problems caused by memory
mis-management.
**-find-bugs**
Continually randomize the specified passes and run them on the test program
until a bug is found or the user kills **bugpoint**.
**-help**
Print a summary of command line options.
**--input** *filename*
Open *filename* and redirect the standard input of the test program, whenever
it runs, to come from that file.
**--load** *plugin*
Load the dynamic object *plugin* into **bugpoint** itself. This object should
@ -143,20 +108,15 @@ OPTIONS
optimizations, use the **-help** and **--load** options together; for example:
.. code-block:: perl
.. code-block:: bash
bugpoint --load myNewPass.so -help
**--mlimit** *megabytes*
Specifies an upper limit on memory usage of the optimization and codegen. Set
to zero to disable the limit.
**--output** *filename*
Whenever the test program produces output on its standard output stream, it
@ -164,14 +124,10 @@ OPTIONS
do not use this option, **bugpoint** will attempt to generate a reference output
by compiling the program with the "safe" backend and running it.
**--profile-info-file** *filename*
Profile file loaded by **--profile-loader**.
**--run-{int,jit,llc,custom}**
Whenever the test program is compiled, **bugpoint** should generate code for it
@ -179,8 +135,6 @@ OPTIONS
interpreter, the JIT compiler, the static native code compiler, or a
custom command (see **--exec-command**) respectively.
**--safe-{llc,custom}**
When debugging a code generator, **bugpoint** should use the specified code
@ -192,16 +146,12 @@ OPTIONS
respectively. The interpreter and the JIT backends cannot currently
be used as the "safe" backends.
**--exec-command** *command*
This option defines the command to use with the **--run-custom** and
**--safe-custom** options to execute the bitcode testcase. This can
be useful for cross-compilation.
**--compile-command** *command*
This option defines the command to use with the **--compile-custom**
@ -210,38 +160,28 @@ OPTIONS
generate a reduced unit test, you may add CHECK directives to the
testcase and pass the name of an executable compile-command script in this form:
.. code-block:: sh
#!/bin/sh
llc "$@"
not FileCheck [bugpoint input file].ll < bugpoint-test-program.s
This script will "fail" as long as FileCheck passes. So the result
will be the minimum bitcode that passes FileCheck.
**--safe-path** *path*
This option defines the path to the command to execute with the
**--safe-{int,jit,llc,custom}**
option.
EXIT STATUS
-----------
If **bugpoint** succeeds in finding a problem, it will exit with 0. Otherwise,
if an error occurs, it will exit with a non-zero value.
SEE ALSO
--------
opt|opt

View File

@ -1,5 +1,3 @@
.. _commands:
LLVM Command Guide
------------------
@ -30,6 +28,7 @@ Basic Commands
llvm-diff
llvm-cov
llvm-stress
llvm-symbolizer
Debugging Tools
~~~~~~~~~~~~~~~

View File

@ -1,351 +1,278 @@
lit - LLVM Integrated Tester
============================
SYNOPSIS
--------
**lit** [*options*] [*tests*]
:program:`lit` [*options*] [*tests*]
DESCRIPTION
-----------
:program:`lit` is a portable tool for executing LLVM and Clang style test
suites, summarizing their results, and providing indication of failures.
:program:`lit` is designed to be a lightweight testing tool with as simple a
user interface as possible.
**lit** is a portable tool for executing LLVM and Clang style test suites,
summarizing their results, and providing indication of failures. **lit** is
designed to be a lightweight testing tool with as simple a user interface as
possible.
**lit** should be run with one or more *tests* to run specified on the command
line. Tests can be either individual test files or directories to search for
tests (see "TEST DISCOVERY").
:program:`lit` should be run with one or more *tests* to run specified on the
command line. Tests can be either individual test files or directories to
search for tests (see :ref:`test-discovery`).
Each specified test will be executed (potentially in parallel) and once all
tests have been run **lit** will print summary information on the number of tests
which passed or failed (see "TEST STATUS RESULTS"). The **lit** program will
execute with a non-zero exit code if any tests fail.
tests have been run :program:`lit` will print summary information on the number
of tests which passed or failed (see :ref:`test-status-results`). The
:program:`lit` program will execute with a non-zero exit code if any tests
fail.
By default **lit** will use a succinct progress display and will only print
summary information for test failures. See "OUTPUT OPTIONS" for options
controlling the **lit** progress display and output.
By default :program:`lit` will use a succinct progress display and will only
print summary information for test failures. See :ref:`output-options` for
options controlling the :program:`lit` progress display and output.
**lit** also includes a number of options for controlling how tests are executed
(specific features may depend on the particular test format). See "EXECUTION
OPTIONS" for more information.
:program:`lit` also includes a number of options for controlling how tests are
executed (specific features may depend on the particular test format). See
:ref:`execution-options` for more information.
Finally, **lit** also supports additional options for only running a subset of
the options specified on the command line, see "SELECTION OPTIONS" for
more information.
Users interested in the **lit** architecture or designing a **lit** testing
implementation should see "LIT INFRASTRUCTURE"
Finally, :program:`lit` also supports additional options for only running a
subset of the options specified on the command line, see
:ref:`selection-options` for more information.
Users interested in the :program:`lit` architecture or designing a
:program:`lit` testing implementation should see :ref:`lit-infrastructure`.
GENERAL OPTIONS
---------------
.. option:: -h, --help
Show the :program:`lit` help message.
**-h**, **--help**
.. option:: -j N, --threads=N
Show the **lit** help message.
Run ``N`` tests in parallel. By default, this is automatically chosen to
match the number of detected available CPUs.
.. option:: --config-prefix=NAME
Search for :file:`{NAME}.cfg` and :file:`{NAME}.site.cfg` when searching for
test suites, instead of :file:`lit.cfg` and :file:`lit.site.cfg`.
**-j** *N*, **--threads**\ =\ *N*
.. option:: --param NAME, --param NAME=VALUE
Run *N* tests in parallel. By default, this is automatically chosen to match
the number of detected available CPUs.
**--config-prefix**\ =\ *NAME*
Search for *NAME.cfg* and *NAME.site.cfg* when searching for test suites,
instead of *lit.cfg* and *lit.site.cfg*.
**--param** *NAME*, **--param** *NAME*\ =\ *VALUE*
Add a user defined parameter *NAME* with the given *VALUE* (or the empty
string if not given). The meaning and use of these parameters is test suite
Add a user defined parameter ``NAME`` with the given ``VALUE`` (or the empty
string if not given). The meaning and use of these parameters is test suite
dependent.
.. _output-options:
OUTPUT OPTIONS
--------------
**-q**, **--quiet**
.. option:: -q, --quiet
Suppress any output except for test failures.
**-s**, **--succinct**
.. option:: -s, --succinct
Show less output, for example don't show information on tests that pass.
**-v**, **--verbose**
.. option:: -v, --verbose
Show more information on test failures, for example the entire test output
instead of just the test result.
**--no-progress-bar**
.. option:: --no-progress-bar
Do not use curses based progress bar.
.. _execution-options:
EXECUTION OPTIONS
-----------------
.. option:: --path=PATH
Specify an additional ``PATH`` to use when searching for executables in tests.
**--path**\ =\ *PATH*
.. option:: --vg
Specify an addition *PATH* to use when searching for executables in tests.
Run individual tests under valgrind (using the memcheck tool). The
``--error-exitcode`` argument for valgrind is used so that valgrind failures
will cause the program to exit with a non-zero status.
When this option is enabled, :program:`lit` will also automatically provide a
"``valgrind``" feature that can be used to conditionally disable (or expect
failure in) certain tests.
.. option:: --vg-arg=ARG
**--vg**
When :option:`--vg` is used, specify an additional argument to pass to
:program:`valgrind` itself.
Run individual tests under valgrind (using the memcheck tool). The
*--error-exitcode* argument for valgrind is used so that valgrind failures will
cause the program to exit with a non-zero status.
.. option:: --vg-leak
When this option is enabled, **lit** will also automatically provide a
"valgrind" feature that can be used to conditionally disable (or expect failure
in) certain tests.
**--vg-arg**\ =\ *ARG*
When *--vg* is used, specify an additional argument to pass to valgrind itself.
**--vg-leak**
When *--vg* is used, enable memory leak checks. When this option is enabled,
**lit** will also automatically provide a "vg_leak" feature that can be
used to conditionally disable (or expect failure in) certain tests.
**--time-tests**
Track the wall time individual tests take to execute and includes the results in
the summary output. This is useful for determining which tests in a test suite
take the most time to execute. Note that this option is most useful with *-j
1*.
When :option:`--vg` is used, enable memory leak checks. When this option is
enabled, :program:`lit` will also automatically provide a "``vg_leak``"
feature that can be used to conditionally disable (or expect failure in)
certain tests.
.. option:: --time-tests
Track the wall time individual tests take to execute and includes the results
in the summary output. This is useful for determining which tests in a test
suite take the most time to execute. Note that this option is most useful
with ``-j 1``.
.. _selection-options:
SELECTION OPTIONS
-----------------
.. option:: --max-tests=N
Run at most ``N`` tests and then terminate.
**--max-tests**\ =\ *N*
.. option:: --max-time=N
Run at most *N* tests and then terminate.
Spend at most ``N`` seconds (approximately) running tests and then terminate.
**--max-time**\ =\ *N*
Spend at most *N* seconds (approximately) running tests and then terminate.
**--shuffle**
.. option:: --shuffle
Run the tests in a random order.
ADDITIONAL OPTIONS
------------------
.. option:: --debug
Run :program:`lit` in debug mode, for debugging configuration issues and
:program:`lit` itself.
**--debug**
Run **lit** in debug mode, for debugging configuration issues and **lit** itself.
**--show-suites**
.. option:: --show-suites
List the discovered test suites as part of the standard output.
.. option:: --repeat=N
**--no-tcl-as-sh**
Run Tcl scripts internally (instead of converting to shell scripts).
**--repeat**\ =\ *N*
Run each test *N* times. Currently this is primarily useful for timing tests,
other results are not collated in any reasonable fashion.
Run each test ``N`` times. Currently this is primarily useful for timing
tests, other results are not collated in any reasonable fashion.
EXIT STATUS
-----------
**lit** will exit with an exit code of 1 if there are any FAIL or XPASS
results. Otherwise, it will exit with the status 0. Other exit codes are used
:program:`lit` will exit with an exit code of 1 if there are any FAIL or XPASS
results. Otherwise, it will exit with the status 0. Other exit codes are used
for non-test related failures (for example a user error or an internal program
error).
.. _test-discovery:
TEST DISCOVERY
--------------
The inputs passed to :program:`lit` can be either individual tests, or entire
directories or hierarchies of tests to run. When :program:`lit` starts up, the
first thing it does is convert the inputs into a complete list of tests to run
as part of *test discovery*.
The inputs passed to **lit** can be either individual tests, or entire
directories or hierarchies of tests to run. When **lit** starts up, the first
thing it does is convert the inputs into a complete list of tests to run as part
of *test discovery*.
In the :program:`lit` model, every test must exist inside some *test suite*.
:program:`lit` resolves the inputs specified on the command line to test suites
by searching upwards from the input path until it finds a :file:`lit.cfg` or
:file:`lit.site.cfg` file. These files serve as both a marker of test suites
and as configuration files which :program:`lit` loads in order to understand
how to find and run the tests inside the test suite.
In the **lit** model, every test must exist inside some *test suite*. **lit**
resolves the inputs specified on the command line to test suites by searching
upwards from the input path until it finds a *lit.cfg* or *lit.site.cfg*
file. These files serve as both a marker of test suites and as configuration
files which **lit** loads in order to understand how to find and run the tests
inside the test suite.
Once **lit** has mapped the inputs into test suites it traverses the list of
inputs adding tests for individual files and recursively searching for tests in
directories.
Once :program:`lit` has mapped the inputs into test suites it traverses the
list of inputs adding tests for individual files and recursively searching for
tests in directories.
This behavior makes it easy to specify a subset of tests to run, while still
allowing the test suite configuration to control exactly how tests are
interpreted. In addition, **lit** always identifies tests by the test suite they
are in, and their relative path inside the test suite. For appropriately
configured projects, this allows **lit** to provide convenient and flexible
support for out-of-tree builds.
interpreted. In addition, :program:`lit` always identifies tests by the test
suite they are in, and their relative path inside the test suite. For
appropriately configured projects, this allows :program:`lit` to provide
convenient and flexible support for out-of-tree builds.
.. _test-status-results:
TEST STATUS RESULTS
-------------------
Each test ultimately produces one of the following six results:
**PASS**
The test succeeded.
**XFAIL**
The test failed, but that is expected. This is used for test formats which allow
The test failed, but that is expected. This is used for test formats which allow
specifying that a test does not currently work, but wish to leave it in the test
suite.
**XPASS**
The test succeeded, but it was expected to fail. This is used for tests which
The test succeeded, but it was expected to fail. This is used for tests which
were specified as expected to fail, but are now succeeding (generally because
the feature they test was broken and has been fixed).
**FAIL**
The test failed.
**UNRESOLVED**
The test result could not be determined. For example, this occurs when the test
The test result could not be determined. For example, this occurs when the test
could not be run, the test itself is invalid, or the test was interrupted.
**UNSUPPORTED**
The test is not supported in this environment. This is used by test formats
The test is not supported in this environment. This is used by test formats
which can report unsupported tests.
Depending on the test format tests may produce additional information about
their status (generally only for failures). See the Output|"OUTPUT OPTIONS"
their status (generally only for failures). See the :ref:`output-options`
section for more information.
.. _lit-infrastructure:
LIT INFRASTRUCTURE
------------------
This section describes the :program:`lit` testing architecture for users interested in
creating a new :program:`lit` testing implementation, or extending an existing one.
This section describes the **lit** testing architecture for users interested in
creating a new **lit** testing implementation, or extending an existing one.
**lit** proper is primarily an infrastructure for discovering and running
:program:`lit` proper is primarily an infrastructure for discovering and running
arbitrary tests, and to expose a single convenient interface to these
tests. **lit** itself doesn't know how to run tests, rather this logic is
tests. :program:`lit` itself doesn't know how to run tests, rather this logic is
defined by *test suites*.
TEST SUITES
~~~~~~~~~~~
As described in "TEST DISCOVERY", tests are always located inside a *test
suite*. Test suites serve to define the format of the tests they contain, the
As described in :ref:`test-discovery`, tests are always located inside a *test
suite*. Test suites serve to define the format of the tests they contain, the
logic for finding those tests, and any additional information to run the tests.
**lit** identifies test suites as directories containing *lit.cfg* or
*lit.site.cfg* files (see also **--config-prefix**). Test suites are initially
discovered by recursively searching up the directory hierarchy for all the input
files passed on the command line. You can use **--show-suites** to display the
discovered test suites at startup.
:program:`lit` identifies test suites as directories containing ``lit.cfg`` or
``lit.site.cfg`` files (see also :option:`--config-prefix`). Test suites are
initially discovered by recursively searching up the directory hierarchy for
all the input files passed on the command line. You can use
:option:`--show-suites` to display the discovered test suites at startup.
Once a test suite is discovered, its config file is loaded. Config files
themselves are Python modules which will be executed. When the config file is
Once a test suite is discovered, its config file is loaded. Config files
themselves are Python modules which will be executed. When the config file is
executed, two important global variables are predefined:
**lit**
The global **lit** configuration object (a *LitConfig* instance), which defines
the builtin test formats, global configuration parameters, and other helper
routines for implementing test configurations.
**config**
This is the config object (a *TestingConfig* instance) for the test suite,
which the config file is expected to populate. The following variables are also
which the config file is expected to populate. The following variables are also
available on the *config* object, some of which must be set by the config and
others are optional or predefined:
@ -353,135 +280,132 @@ executed, two important global variables are predefined:
diagnostics.
**test_format** *[required]* The test format object which will be used to
discover and run tests in the test suite. Generally this will be a builtin test
discover and run tests in the test suite. Generally this will be a builtin test
format available from the *lit.formats* module.
**test_src_root** The filesystem path to the test suite root. For out-of-dir
**test_src_root** The filesystem path to the test suite root. For out-of-dir
builds this is the directory that will be scanned for tests.
**test_exec_root** For out-of-dir builds, the path to the test suite root inside
the object directory. This is where tests will be run and temporary output files
the object directory. This is where tests will be run and temporary output files
placed.
**environment** A dictionary representing the environment to use when executing
tests in the suite.
**suffixes** For **lit** test formats which scan directories for tests, this
variable is a list of suffixes to identify test files. Used by: *ShTest*,
*TclTest*.
variable is a list of suffixes to identify test files. Used by: *ShTest*.
**substitutions** For **lit** test formats which substitute variables into a test
script, the list of substitutions to perform. Used by: *ShTest*, *TclTest*.
script, the list of substitutions to perform. Used by: *ShTest*.
**unsupported** Mark an unsupported directory, all tests within it will be
reported as unsupported. Used by: *ShTest*, *TclTest*.
reported as unsupported. Used by: *ShTest*.
**parent** The parent configuration, this is the config object for the directory
containing the test suite, or None.
**root** The root configuration. This is the top-most **lit** configuration in
**root** The root configuration. This is the top-most :program:`lit` configuration in
the project.
**on_clone** The config is actually cloned for every subdirectory inside a test
suite, to allow local configuration on a per-directory basis. The *on_clone*
suite, to allow local configuration on a per-directory basis. The *on_clone*
variable can be set to a Python function which will be called whenever a
configuration is cloned (for a subdirectory). The function should takes three
configuration is cloned (for a subdirectory). The function should takes three
arguments: (1) the parent configuration, (2) the new configuration (which the
*on_clone* function will generally modify), and (3) the test path to the new
directory being scanned.
TEST DISCOVERY
~~~~~~~~~~~~~~
Once test suites are located, **lit** recursively traverses the source directory
(following *test_src_root*) looking for tests. When **lit** enters a
sub-directory, it first checks to see if a nested test suite is defined in that
directory. If so, it loads that test suite recursively, otherwise it
instantiates a local test config for the directory (see "LOCAL CONFIGURATION
FILES").
Once test suites are located, :program:`lit` recursively traverses the source
directory (following *test_src_root*) looking for tests. When :program:`lit`
enters a sub-directory, it first checks to see if a nested test suite is
defined in that directory. If so, it loads that test suite recursively,
otherwise it instantiates a local test config for the directory (see
:ref:`local-configuration-files`).
Tests are identified by the test suite they are contained within, and the
relative path inside that suite. Note that the relative path may not refer to an
actual file on disk; some test formats (such as *GoogleTest*) define "virtual
tests" which have a path that contains both the path to the actual test file and
a subpath to identify the virtual test.
relative path inside that suite. Note that the relative path may not refer to
an actual file on disk; some test formats (such as *GoogleTest*) define
"virtual tests" which have a path that contains both the path to the actual
test file and a subpath to identify the virtual test.
.. _local-configuration-files:
LOCAL CONFIGURATION FILES
~~~~~~~~~~~~~~~~~~~~~~~~~
When **lit** loads a subdirectory in a test suite, it instantiates a local test
configuration by cloning the configuration for the parent direction -- the root
of this configuration chain will always be a test suite. Once the test
configuration is cloned **lit** checks for a *lit.local.cfg* file in the
subdirectory. If present, this file will be loaded and can be used to specialize
the configuration for each individual directory. This facility can be used to
define subdirectories of optional tests, or to change other configuration
parameters -- for example, to change the test format, or the suffixes which
identify test files.
When :program:`lit` loads a subdirectory in a test suite, it instantiates a
local test configuration by cloning the configuration for the parent direction
--- the root of this configuration chain will always be a test suite. Once the
test configuration is cloned :program:`lit` checks for a *lit.local.cfg* file
in the subdirectory. If present, this file will be loaded and can be used to
specialize the configuration for each individual directory. This facility can
be used to define subdirectories of optional tests, or to change other
configuration parameters --- for example, to change the test format, or the
suffixes which identify test files.
TEST RUN OUTPUT FORMAT
~~~~~~~~~~~~~~~~~~~~~~
The b<lit> output for a test run conforms to the following schema, in both short
and verbose modes (although in short mode no PASS lines will be shown). This
schema has been chosen to be relatively easy to reliably parse by a machine (for
example in buildbot log scraping), and for other tools to generate.
The :program:`lit` output for a test run conforms to the following schema, in
both short and verbose modes (although in short mode no PASS lines will be
shown). This schema has been chosen to be relatively easy to reliably parse by
a machine (for example in buildbot log scraping), and for other tools to
generate.
Each test result is expected to appear on a line that matches:
<result code>: <test name> (<progress info>)
.. code-block:: none
where <result-code> is a standard test result such as PASS, FAIL, XFAIL, XPASS,
UNRESOLVED, or UNSUPPORTED. The performance result codes of IMPROVED and
<result code>: <test name> (<progress info>)
where ``<result-code>`` is a standard test result such as PASS, FAIL, XFAIL,
XPASS, UNRESOLVED, or UNSUPPORTED. The performance result codes of IMPROVED and
REGRESSED are also allowed.
The <test name> field can consist of an arbitrary string containing no newline.
The ``<test name>`` field can consist of an arbitrary string containing no
newline.
The <progress info> field can be used to report progress information such as
(1/300) or can be empty, but even when empty the parentheses are required.
The ``<progress info>`` field can be used to report progress information such
as (1/300) or can be empty, but even when empty the parentheses are required.
Each test result may include additional (multiline) log information in the
following format.
following format:
<log delineator> TEST '(<test name>)' <trailing delineator>
... log message ...
<log delineator>
.. code-block:: none
where <test name> should be the name of a preceding reported test, <log
delineator> is a string of '\*' characters *at least* four characters long (the
recommended length is 20), and <trailing delineator> is an arbitrary (unparsed)
string.
<log delineator> TEST '(<test name>)' <trailing delineator>
... log message ...
<log delineator>
where ``<test name>`` should be the name of a preceding reported test, ``<log
delineator>`` is a string of "*" characters *at least* four characters long
(the recommended length is 20), and ``<trailing delineator>`` is an arbitrary
(unparsed) string.
The following is an example of a test run output which consists of four tests A,
B, C, and D, and a log message for the failing test C::
B, C, and D, and a log message for the failing test C:
.. code-block:: none
PASS: A (1 of 4)
PASS: B (2 of 4)
FAIL: C (3 of 4)
\*\*\*\*\*\*\*\*\*\*\*\*\*\*\*\*\*\*\*\* TEST 'C' FAILED \*\*\*\*\*\*\*\*\*\*\*\*\*\*\*\*\*\*\*\*
******************** TEST 'C' FAILED ********************
Test 'C' failed as a result of exit code 1.
\*\*\*\*\*\*\*\*\*\*\*\*\*\*\*\*\*\*\*\*
********************
PASS: D (4 of 4)
LIT EXAMPLE TESTS
~~~~~~~~~~~~~~~~~
The **lit** distribution contains several example implementations of test suites
in the *ExampleTests* directory.
The :program:`lit` distribution contains several example implementations of
test suites in the *ExampleTests* directory.
SEE ALSO
--------
valgrind(1)

View File

@ -1,251 +1,187 @@
llc - LLVM static compiler
==========================
SYNOPSIS
--------
**llc** [*options*] [*filename*]
:program:`llc` [*options*] [*filename*]
DESCRIPTION
-----------
The **llc** command compiles LLVM source inputs into assembly language for a
specified architecture. The assembly language output can then be passed through
a native assembler and linker to generate a native executable.
The :program:`llc` command compiles LLVM source inputs into assembly language
for a specified architecture. The assembly language output can then be passed
through a native assembler and linker to generate a native executable.
The choice of architecture for the output assembly code is automatically
determined from the input file, unless the **-march** option is used to override
the default.
determined from the input file, unless the :option:`-march` option is used to
override the default.
OPTIONS
-------
If ``filename`` is "``-``" or omitted, :program:`llc` reads from standard input.
Otherwise, it will from ``filename``. Inputs can be in either the LLVM assembly
language format (``.ll``) or the LLVM bitcode format (``.bc``).
If *filename* is - or omitted, **llc** reads from standard input. Otherwise, it
will from *filename*. Inputs can be in either the LLVM assembly language
format (.ll) or the LLVM bitcode format (.bc).
If the :option:`-o` option is omitted, then :program:`llc` will send its output
to standard output if the input is from standard input. If the :option:`-o`
option specifies "``-``", then the output will also be sent to standard output.
If the **-o** option is omitted, then **llc** will send its output to standard
output if the input is from standard input. If the **-o** option specifies -,
then the output will also be sent to standard output.
If no :option:`-o` option is specified and an input file other than "``-``" is
specified, then :program:`llc` creates the output filename by taking the input
filename, removing any existing ``.bc`` extension, and adding a ``.s`` suffix.
If no **-o** option is specified and an input file other than - is specified,
then **llc** creates the output filename by taking the input filename,
removing any existing *.bc* extension, and adding a *.s* suffix.
Other **llc** options are as follows:
Other :program:`llc` options are described below.
End-user Options
~~~~~~~~~~~~~~~~
**-help**
.. option:: -help
Print a summary of command line options.
.. option:: -O=uint
Generate code at different optimization levels. These correspond to the
``-O0``, ``-O1``, ``-O2``, and ``-O3`` optimization levels used by
:program:`llvm-gcc` and :program:`clang`.
**-O**\ =\ *uint*
Generate code at different optimization levels. These correspond to the *-O0*,
*-O1*, *-O2*, and *-O3* optimization levels used by **llvm-gcc** and
**clang**.
**-mtriple**\ =\ *target triple*
.. option:: -mtriple=<target triple>
Override the target triple specified in the input file with the specified
string.
**-march**\ =\ *arch*
.. option:: -march=<arch>
Specify the architecture for which to generate assembly, overriding the target
encoded in the input file. See the output of **llc -help** for a list of
encoded in the input file. See the output of ``llc -help`` for a list of
valid architectures. By default this is inferred from the target triple or
autodetected to the current architecture.
**-mcpu**\ =\ *cpuname*
.. option:: -mcpu=<cpuname>
Specify a specific chip in the current architecture to generate code for.
By default this is inferred from the target triple and autodetected to
the current architecture. For a list of available CPUs, use:
**llvm-as < /dev/null | llc -march=xyz -mcpu=help**
.. code-block:: none
llvm-as < /dev/null | llc -march=xyz -mcpu=help
**-mattr**\ =\ *a1,+a2,-a3,...*
.. option:: -mattr=a1,+a2,-a3,...
Override or control specific attributes of the target, such as whether SIMD
operations are enabled or not. The default set of attributes is set by the
current CPU. For a list of available attributes, use:
**llvm-as < /dev/null | llc -march=xyz -mattr=help**
.. code-block:: none
llvm-as < /dev/null | llc -march=xyz -mattr=help
**--disable-fp-elim**
.. option:: --disable-fp-elim
Disable frame pointer elimination optimization.
**--disable-excess-fp-precision**
.. option:: --disable-excess-fp-precision
Disable optimizations that may produce excess precision for floating point.
Note that this option can dramatically slow down code on some systems
(e.g. X86).
**--enable-no-infs-fp-math**
.. option:: --enable-no-infs-fp-math
Enable optimizations that assume no Inf values.
**--enable-no-nans-fp-math**
.. option:: --enable-no-nans-fp-math
Enable optimizations that assume no NAN values.
**--enable-unsafe-fp-math**
.. option:: --enable-unsafe-fp-math
Enable optimizations that make unsafe assumptions about IEEE math (e.g. that
addition is associative) or may not work for all input ranges. These
optimizations allow the code generator to make use of some instructions which
would otherwise not be usable (such as fsin on X86).
would otherwise not be usable (such as ``fsin`` on X86).
.. option:: --enable-correct-eh-support
Instruct the **lowerinvoke** pass to insert code for correct exception
handling support. This is expensive and is by default omitted for efficiency.
**--enable-correct-eh-support**
Instruct the **lowerinvoke** pass to insert code for correct exception handling
support. This is expensive and is by default omitted for efficiency.
**--stats**
.. option:: --stats
Print statistics recorded by code-generation passes.
**--time-passes**
.. option:: --time-passes
Record the amount of time needed for each pass and print a report to standard
error.
.. option:: --load=<dso_path>
**--load**\ =\ *dso_path*
Dynamically load *dso_path* (a path to a dynamically shared object) that
implements an LLVM target. This will permit the target name to be used with the
**-march** option so that code can be generated for that target.
Dynamically load ``dso_path`` (a path to a dynamically shared object) that
implements an LLVM target. This will permit the target name to be used with
the :option:`-march` option so that code can be generated for that target.
Tuning/Configuration Options
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
**--print-machineinstrs**
.. option:: --print-machineinstrs
Print generated machine code between compilation phases (useful for debugging).
.. option:: --regalloc=<allocator>
**--regalloc**\ =\ *allocator*
Specify the register allocator to use. The default *allocator* is *local*.
Specify the register allocator to use. The default ``allocator`` is *local*.
Valid register allocators are:
*simple*
Very simple "always spill" register allocator
*local*
Local register allocator
*linearscan*
Linear scan global register allocator
*iterativescan*
Iterative scan global register allocator
**--spiller**\ =\ *spiller*
.. option:: --spiller=<spiller>
Specify the spiller to use for register allocators that support it. Currently
this option is used only by the linear scan register allocator. The default
*spiller* is *local*. Valid spillers are:
this option is used only by the linear scan register allocator. The default
``spiller`` is *local*. Valid spillers are:
*simple*
Simple spiller
*local*
Local spiller
Intel IA-32-specific Options
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. option:: --x86-asm-syntax=[att|intel]
**--x86-asm-syntax=att|intel**
Specify whether to emit assembly code in AT&T syntax (the default) or intel
Specify whether to emit assembly code in AT&T syntax (the default) or Intel
syntax.
EXIT STATUS
-----------
If **llc** succeeds, it will exit with 0. Otherwise, if an error occurs,
it will exit with a non-zero value.
If :program:`llc` succeeds, it will exit with 0. Otherwise, if an error
occurs, it will exit with a non-zero value.
SEE ALSO
--------
lli
lli|lli

View File

@ -50,7 +50,7 @@ GENERAL OPTIONS
**-load**\ =\ *puginfilename*
**-load**\ =\ *pluginfilename*
Causes **lli** to load the plugin (shared object) named *pluginfilename* and use
it for optimization.

View File

@ -1,424 +1,305 @@
llvm-bcanalyzer - LLVM bitcode analyzer
=======================================
SYNOPSIS
--------
**llvm-bcanalyzer** [*options*] [*filename*]
:program:`llvm-bcanalyzer` [*options*] [*filename*]
DESCRIPTION
-----------
The :program:`llvm-bcanalyzer` command is a small utility for analyzing bitcode
files. The tool reads a bitcode file (such as generated with the
:program:`llvm-as` tool) and produces a statistical report on the contents of
the bitcode file. The tool can also dump a low level but human readable
version of the bitcode file. This tool is probably not of much interest or
utility except for those working directly with the bitcode file format. Most
LLVM users can just ignore this tool.
The **llvm-bcanalyzer** command is a small utility for analyzing bitcode files.
The tool reads a bitcode file (such as generated with the **llvm-as** tool) and
produces a statistical report on the contents of the bitcode file. The tool
can also dump a low level but human readable version of the bitcode file.
This tool is probably not of much interest or utility except for those working
directly with the bitcode file format. Most LLVM users can just ignore
this tool.
If *filename* is omitted or is ``-``, then **llvm-bcanalyzer** reads its input
from standard input. This is useful for combining the tool into a pipeline.
Output is written to the standard output.
If *filename* is omitted or is ``-``, then :program:`llvm-bcanalyzer` reads its
input from standard input. This is useful for combining the tool into a
pipeline. Output is written to the standard output.
OPTIONS
-------
.. program:: llvm-bcanalyzer
.. option:: -nodetails
**-nodetails**
Causes :program:`llvm-bcanalyzer` to abbreviate its output by writing out only
a module level summary. The details for individual functions are not
displayed.
Causes **llvm-bcanalyzer** to abbreviate its output by writing out only a module
level summary. The details for individual functions are not displayed.
.. option:: -dump
Causes :program:`llvm-bcanalyzer` to dump the bitcode in a human readable
format. This format is significantly different from LLVM assembly and
provides details about the encoding of the bitcode file.
.. option:: -verify
**-dump**
Causes **llvm-bcanalyzer** to dump the bitcode in a human readable format. This
format is significantly different from LLVM assembly and provides details about
the encoding of the bitcode file.
**-verify**
Causes **llvm-bcanalyzer** to verify the module produced by reading the
bitcode. This ensures that the statistics generated are based on a consistent
Causes :program:`llvm-bcanalyzer` to verify the module produced by reading the
bitcode. This ensures that the statistics generated are based on a consistent
module.
**-help**
.. option:: -help
Print a summary of command line options.
EXIT STATUS
-----------
If **llvm-bcanalyzer** succeeds, it will exit with 0. Otherwise, if an error
occurs, it will exit with a non-zero value, usually 1.
If :program:`llvm-bcanalyzer` succeeds, it will exit with 0. Otherwise, if an
error occurs, it will exit with a non-zero value, usually 1.
SUMMARY OUTPUT DEFINITIONS
--------------------------
The following items are always printed by llvm-bcanalyzer. They comprize the
The following items are always printed by llvm-bcanalyzer. They comprize the
summary output.
**Bitcode Analysis Of Module**
This just provides the name of the module for which bitcode analysis is being
generated.
**Bitcode Version Number**
The bitcode version (not LLVM version) of the file read by the analyzer.
**File Size**
The size, in bytes, of the entire bitcode file.
**Module Bytes**
The size, in bytes, of the module block. Percentage is relative to File Size.
The size, in bytes, of the module block. Percentage is relative to File Size.
**Function Bytes**
The size, in bytes, of all the function blocks. Percentage is relative to File
The size, in bytes, of all the function blocks. Percentage is relative to File
Size.
**Global Types Bytes**
The size, in bytes, of the Global Types Pool. Percentage is relative to File
Size. This is the size of the definitions of all types in the bitcode file.
The size, in bytes, of the Global Types Pool. Percentage is relative to File
Size. This is the size of the definitions of all types in the bitcode file.
**Constant Pool Bytes**
The size, in bytes, of the Constant Pool Blocks Percentage is relative to File
Size.
**Module Globals Bytes**
Ths size, in bytes, of the Global Variable Definitions and their initializers.
Percentage is relative to File Size.
**Instruction List Bytes**
The size, in bytes, of all the instruction lists in all the functions.
Percentage is relative to File Size. Note that this value is also included in
Percentage is relative to File Size. Note that this value is also included in
the Function Bytes.
**Compaction Table Bytes**
The size, in bytes, of all the compaction tables in all the functions.
Percentage is relative to File Size. Note that this value is also included in
Percentage is relative to File Size. Note that this value is also included in
the Function Bytes.
**Symbol Table Bytes**
The size, in bytes, of all the symbol tables in all the functions. Percentage is
relative to File Size. Note that this value is also included in the Function
The size, in bytes, of all the symbol tables in all the functions. Percentage is
relative to File Size. Note that this value is also included in the Function
Bytes.
**Dependent Libraries Bytes**
The size, in bytes, of the list of dependent libraries in the module. Percentage
is relative to File Size. Note that this value is also included in the Module
The size, in bytes, of the list of dependent libraries in the module. Percentage
is relative to File Size. Note that this value is also included in the Module
Global Bytes.
**Number Of Bitcode Blocks**
The total number of blocks of any kind in the bitcode file.
**Number Of Functions**
The total number of function definitions in the bitcode file.
**Number Of Types**
The total number of types defined in the Global Types Pool.
**Number Of Constants**
The total number of constants (of any type) defined in the Constant Pool.
**Number Of Basic Blocks**
The total number of basic blocks defined in all functions in the bitcode file.
**Number Of Instructions**
The total number of instructions defined in all functions in the bitcode file.
**Number Of Long Instructions**
The total number of long instructions defined in all functions in the bitcode
file. Long instructions are those taking greater than 4 bytes. Typically long
file. Long instructions are those taking greater than 4 bytes. Typically long
instructions are GetElementPtr with several indices, PHI nodes, and calls to
functions with large numbers of arguments.
**Number Of Operands**
The total number of operands used in all instructions in the bitcode file.
**Number Of Compaction Tables**
The total number of compaction tables in all functions in the bitcode file.
**Number Of Symbol Tables**
The total number of symbol tables in all functions in the bitcode file.
**Number Of Dependent Libs**
The total number of dependent libraries found in the bitcode file.
**Total Instruction Size**
The total size of the instructions in all functions in the bitcode file.
**Average Instruction Size**
The average number of bytes per instruction across all functions in the bitcode
file. This value is computed by dividing Total Instruction Size by Number Of
file. This value is computed by dividing Total Instruction Size by Number Of
Instructions.
**Maximum Type Slot Number**
The maximum value used for a type's slot number. Larger slot number values take
The maximum value used for a type's slot number. Larger slot number values take
more bytes to encode.
**Maximum Value Slot Number**
The maximum value used for a value's slot number. Larger slot number values take
The maximum value used for a value's slot number. Larger slot number values take
more bytes to encode.
**Bytes Per Value**
The average size of a Value definition (of any type). This is computed by
The average size of a Value definition (of any type). This is computed by
dividing File Size by the total number of values of any type.
**Bytes Per Global**
The average size of a global definition (constants and global variables).
**Bytes Per Function**
The average number of bytes per function definition. This is computed by
The average number of bytes per function definition. This is computed by
dividing Function Bytes by Number Of Functions.
**# of VBR 32-bit Integers**
The total number of 32-bit integers encoded using the Variable Bit Rate
encoding scheme.
**# of VBR 64-bit Integers**
The total number of 64-bit integers encoded using the Variable Bit Rate encoding
scheme.
**# of VBR Compressed Bytes**
The total number of bytes consumed by the 32-bit and 64-bit integers that use
the Variable Bit Rate encoding scheme.
**# of VBR Expanded Bytes**
The total number of bytes that would have been consumed by the 32-bit and 64-bit
integers had they not been compressed with the Variable Bit Rage encoding
scheme.
**Bytes Saved With VBR**
The total number of bytes saved by using the Variable Bit Rate encoding scheme.
The percentage is relative to # of VBR Expanded Bytes.
DETAILED OUTPUT DEFINITIONS
---------------------------
The following definitions occur only if the -nodetails option was not given.
The detailed output provides additional information on a per-function basis.
**Type**
The type signature of the function.
**Byte Size**
The total number of bytes in the function's block.
**Basic Blocks**
The number of basic blocks defined by the function.
**Instructions**
The number of instructions defined by the function.
**Long Instructions**
The number of instructions using the long instruction format in the function.
**Operands**
The number of operands used by all instructions in the function.
**Instruction Size**
The number of bytes consumed by instructions in the function.
**Average Instruction Size**
The average number of bytes consumed by the instructions in the function. This
value is computed by dividing Instruction Size by Instructions.
The average number of bytes consumed by the instructions in the function.
This value is computed by dividing Instruction Size by Instructions.
**Bytes Per Instruction**
The average number of bytes used by the function per instruction. This value is
computed by dividing Byte Size by Instructions. Note that this is not the same
as Average Instruction Size. It computes a number relative to the total function
size not just the size of the instruction list.
The average number of bytes used by the function per instruction. This value
is computed by dividing Byte Size by Instructions. Note that this is not the
same as Average Instruction Size. It computes a number relative to the total
function size not just the size of the instruction list.
**Number of VBR 32-bit Integers**
The total number of 32-bit integers found in this function (for any use).
**Number of VBR 64-bit Integers**
The total number of 64-bit integers found in this function (for any use).
**Number of VBR Compressed Bytes**
The total number of bytes in this function consumed by the 32-bit and 64-bit
integers that use the Variable Bit Rate encoding scheme.
**Number of VBR Expanded Bytes**
The total number of bytes in this function that would have been consumed by
the 32-bit and 64-bit integers had they not been compressed with the Variable
Bit Rate encoding scheme.
**Bytes Saved With VBR**
The total number of bytes saved in this function by using the Variable Bit
Rate encoding scheme. The percentage is relative to # of VBR Expanded Bytes.
Rate encoding scheme. The percentage is relative to # of VBR Expanded Bytes.
SEE ALSO
--------
:doc:`/CommandGuide/llvm-dis`, :doc:`/BitCodeFormat`
llvm-dis|llvm-dis, `http://llvm.org/docs/BitCodeFormat.html <http://llvm.org/docs/BitCodeFormat.html>`_

View File

@ -1,51 +1,39 @@
llvm-cov - emit coverage information
====================================
SYNOPSIS
--------
**llvm-cov** [-gcno=filename] [-gcda=filename] [dump]
:program:`llvm-cov` [-gcno=filename] [-gcda=filename] [dump]
DESCRIPTION
-----------
The experimental **llvm-cov** tool reads in description file generated by compiler
and coverage data file generated by instrumented program. This program assumes
that the description and data file uses same format as gcov files.
The experimental :program:`llvm-cov` tool reads in description file generated
by compiler and coverage data file generated by instrumented program. This
program assumes that the description and data file uses same format as gcov
files.
OPTIONS
-------
.. option:: -gcno=filename
This option selects input description file generated by compiler while
instrumenting program.
**-gcno=filename]**
This option selects input description file generated by compiler while instrumenting
program.
**-gcda=filename]**
.. option:: -gcda=filename
This option selects coverage data file generated by instrumented compiler.
.. option:: -dump
**-dump**
This options enables output dump that is suitable for a developer to help debug
**llvm-cov** itself.
This options enables output dump that is suitable for a developer to help
debug :program:`llvm-cov` itself.
EXIT STATUS
-----------
:program:`llvm-cov` returns 1 if it cannot read input files. Otherwise, it
exits with zero.
**llvm-cov** returns 1 if it cannot read input files. Otherwise, it exits with zero.

View File

@ -1,96 +1,56 @@
llvm-link - LLVM linker
=======================
llvm-link - LLVM bitcode linker
===============================
SYNOPSIS
--------
**llvm-link** [*options*] *filename ...*
:program:`llvm-link` [*options*] *filename ...*
DESCRIPTION
-----------
**llvm-link** takes several LLVM bitcode files and links them together into a
single LLVM bitcode file. It writes the output file to standard output, unless
the **-o** option is used to specify a filename.
**llvm-link** attempts to load the input files from the current directory. If
that fails, it looks for each file in each of the directories specified by the
**-L** options on the command line. The library search paths are global; each
one is searched for every input file if necessary. The directories are searched
in the order they were specified on the command line.
:program:`llvm-link` takes several LLVM bitcode files and links them together
into a single LLVM bitcode file. It writes the output file to standard output,
unless the :option:`-o` option is used to specify a filename.
OPTIONS
-------
.. option:: -f
Enable binary output on terminals. Normally, :program:`llvm-link` will refuse
to write raw bitcode output if the output stream is a terminal. With this
option, :program:`llvm-link` will write raw bitcode regardless of the output
device.
**-L** *directory*
.. option:: -o filename
Add the specified *directory* to the library search path. When looking for
libraries, **llvm-link** will look in path name for libraries. This option can be
specified multiple times; **llvm-link** will search inside these directories in
the order in which they were specified on the command line.
Specify the output file name. If ``filename`` is "``-``", then
:program:`llvm-link` will write its output to standard output.
**-f**
Enable binary output on terminals. Normally, **llvm-link** will refuse to
write raw bitcode output if the output stream is a terminal. With this option,
**llvm-link** will write raw bitcode regardless of the output device.
**-o** *filename*
Specify the output file name. If *filename* is ``-``, then **llvm-link** will
write its output to standard output.
**-S**
.. option:: -S
Write output in LLVM intermediate language (instead of bitcode).
.. option:: -d
If specified, :program:`llvm-link` prints a human-readable version of the
output bitcode file to standard error.
**-d**
If specified, **llvm-link** prints a human-readable version of the output
bitcode file to standard error.
**-help**
.. option:: -help
Print a summary of command line options.
.. option:: -v
**-v**
Verbose mode. Print information about what **llvm-link** is doing. This
typically includes a message for each bitcode file linked in and for each
Verbose mode. Print information about what :program:`llvm-link` is doing.
This typically includes a message for each bitcode file linked in and for each
library found.
EXIT STATUS
-----------
If **llvm-link** succeeds, it will exit with 0. Otherwise, if an error
If :program:`llvm-link` succeeds, it will exit with 0. Otherwise, if an error
occurs, it will exit with a non-zero value.
SEE ALSO
--------
gccld|gccld

View File

@ -1,48 +1,34 @@
llvm-stress - generate random .ll files
=======================================
SYNOPSIS
--------
**llvm-stress** [-size=filesize] [-seed=initialseed] [-o=outfile]
:program:`llvm-stress` [-size=filesize] [-seed=initialseed] [-o=outfile]
DESCRIPTION
-----------
The **llvm-stress** tool is used to generate random .ll files that can be used to
test different components of LLVM.
The :program:`llvm-stress` tool is used to generate random ``.ll`` files that
can be used to test different components of LLVM.
OPTIONS
-------
**-o** *filename*
.. option:: -o filename
Specify the output filename.
.. option:: -size size
Specify the size of the generated ``.ll`` file.
**-size** *size*
Specify the size of the generated .ll file.
**-seed** *seed*
.. option:: -seed seed
Specify the seed to be used for the randomly generated instructions.
EXIT STATUS
-----------
:program:`llvm-stress` returns 0.
**llvm-stress** returns 0.

View File

@ -0,0 +1,65 @@
llvm-symbolizer - convert addresses into source code locations
==============================================================
SYNOPSIS
--------
:program:`llvm-symbolizer` [options]
DESCRIPTION
-----------
:program:`llvm-symbolizer` reads object file names and addresses from standard
input and prints corresponding source code locations to standard output. This
program uses debug info sections and symbol table in the object files.
EXAMPLE
--------
.. code-block:: console
$ cat addr.txt
a.out 0x4004f4
/tmp/b.out 0x400528
/tmp/c.so 0x710
$ llvm-symbolizer < addr.txt
main
/tmp/a.cc:4
f(int, int)
/tmp/b.cc:11
h_inlined_into_g
/tmp/header.h:2
g_inlined_into_f
/tmp/header.h:7
f_inlined_into_main
/tmp/source.cc:3
main
/tmp/source.cc:8
OPTIONS
-------
.. option:: -functions
Print function names as well as source file/line locations. Defaults to true.
.. option:: -use-symbol-table
Prefer function names stored in symbol table to function names
in debug info sections. Defaults to true.
.. option:: -demangle
Print demangled function names. Defaults to true.
.. option:: -inlining
If a source code location is in an inlined function, prints all the
inlnied frames. Defaults to true.
EXIT STATUS
-----------
:program:`llvm-symbolizer` returns 0. Other exit codes imply internal program error.

View File

@ -1,183 +1,143 @@
opt - LLVM optimizer
====================
SYNOPSIS
--------
**opt** [*options*] [*filename*]
:program:`opt` [*options*] [*filename*]
DESCRIPTION
-----------
The :program:`opt` command is the modular LLVM optimizer and analyzer. It
takes LLVM source files as input, runs the specified optimizations or analyses
on it, and then outputs the optimized file or the analysis results. The
function of :program:`opt` depends on whether the :option:`-analyze` option is
given.
The **opt** command is the modular LLVM optimizer and analyzer. It takes LLVM
source files as input, runs the specified optimizations or analyses on it, and then
outputs the optimized file or the analysis results. The function of
**opt** depends on whether the **-analyze** option is given.
When **-analyze** is specified, **opt** performs various analyses of the input
source. It will usually print the results on standard output, but in a few
cases, it will print output to standard error or generate a file with the
analysis output, which is usually done when the output is meant for another
When :option:`-analyze` is specified, :program:`opt` performs various analyses
of the input source. It will usually print the results on standard output, but
in a few cases, it will print output to standard error or generate a file with
the analysis output, which is usually done when the output is meant for another
program.
While **-analyze** is *not* given, **opt** attempts to produce an optimized
output file. The optimizations available via **opt** depend upon what
libraries were linked into it as well as any additional libraries that have
been loaded with the **-load** option. Use the **-help** option to determine
what optimizations you can use.
While :option:`-analyze` is *not* given, :program:`opt` attempts to produce an
optimized output file. The optimizations available via :program:`opt` depend
upon what libraries were linked into it as well as any additional libraries
that have been loaded with the :option:`-load` option. Use the :option:`-help`
option to determine what optimizations you can use.
If *filename* is omitted from the command line or is *-*, **opt** reads its
input from standard input. Inputs can be in either the LLVM assembly language
format (.ll) or the LLVM bitcode format (.bc).
If an output filename is not specified with the **-o** option, **opt**
writes its output to the standard output.
If ``filename`` is omitted from the command line or is "``-``", :program:`opt`
reads its input from standard input. Inputs can be in either the LLVM assembly
language format (``.ll``) or the LLVM bitcode format (``.bc``).
If an output filename is not specified with the :option:`-o` option,
:program:`opt` writes its output to the standard output.
OPTIONS
-------
.. option:: -f
Enable binary output on terminals. Normally, :program:`opt` will refuse to
write raw bitcode output if the output stream is a terminal. With this option,
:program:`opt` will write raw bitcode regardless of the output device.
**-f**
Enable binary output on terminals. Normally, **opt** will refuse to
write raw bitcode output if the output stream is a terminal. With this option,
**opt** will write raw bitcode regardless of the output device.
**-help**
.. option:: -help
Print a summary of command line options.
**-o** *filename*
.. option:: -o <filename>
Specify the output filename.
**-S**
.. option:: -S
Write output in LLVM intermediate language (instead of bitcode).
.. option:: -{passname}
:program:`opt` provides the ability to run any of LLVM's optimization or
analysis passes in any order. The :option:`-help` option lists all the passes
available. The order in which the options occur on the command line are the
order in which they are executed (within pass constraints).
**-{passname}**
**opt** provides the ability to run any of LLVM's optimization or analysis passes
in any order. The **-help** option lists all the passes available. The order in
which the options occur on the command line are the order in which they are
executed (within pass constraints).
**-std-compile-opts**
.. option:: -std-compile-opts
This is short hand for a standard list of *compile time optimization* passes.
This is typically used to optimize the output from the llvm-gcc front end. It
might be useful for other front end compilers as well. To discover the full set
of options available, use the following command:
This is typically used to optimize the output from the llvm-gcc front end. It
might be useful for other front end compilers as well. To discover the full
set of options available, use the following command:
.. code-block:: sh
llvm-as < /dev/null | opt -std-compile-opts -disable-output -debug-pass=Arguments
.. option:: -disable-inlining
This option is only meaningful when :option:`-std-compile-opts` is given. It
simply removes the inlining pass from the standard list.
.. option:: -disable-opt
**-disable-inlining**
This option is only meaningful when :option:`-std-compile-opts` is given. It
disables most, but not all, of the :option:`-std-compile-opts`. The ones that
remain are :option:`-verify`, :option:`-lower-setjmp`, and
:option:`-funcresolve`.
This option is only meaningful when **-std-compile-opts** is given. It simply
removes the inlining pass from the standard list.
**-disable-opt**
This option is only meaningful when **-std-compile-opts** is given. It disables
most, but not all, of the **-std-compile-opts**. The ones that remain are
**-verify**, **-lower-setjmp**, and **-funcresolve**.
**-strip-debug**
.. option:: -strip-debug
This option causes opt to strip debug information from the module before
applying other optimizations. It is essentially the same as **-strip** but it
ensures that stripping of debug information is done first.
applying other optimizations. It is essentially the same as :option:`-strip`
but it ensures that stripping of debug information is done first.
.. option:: -verify-each
This option causes opt to add a verify pass after every pass otherwise
specified on the command line (including :option:`-verify`). This is useful
for cases where it is suspected that a pass is creating an invalid module but
it is not clear which pass is doing it. The combination of
:option:`-std-compile-opts` and :option:`-verify-each` can quickly track down
this kind of problem.
**-verify-each**
.. option:: -profile-info-file <filename>
This option causes opt to add a verify pass after every pass otherwise specified
on the command line (including **-verify**). This is useful for cases where it
is suspected that a pass is creating an invalid module but it is not clear which
pass is doing it. The combination of **-std-compile-opts** and **-verify-each**
can quickly track down this kind of problem.
Specify the name of the file loaded by the ``-profile-loader`` option.
**-profile-info-file** *filename*
Specify the name of the file loaded by the -profile-loader option.
**-stats**
.. option:: -stats
Print statistics.
**-time-passes**
.. option:: -time-passes
Record the amount of time needed for each pass and print it to standard
error.
.. option:: -debug
If this is a debug build, this option will enable debug printouts from passes
which use the ``DEBUG()`` macro. See the `LLVM Programmer's Manual
<../ProgrammersManual.html>`_, section ``#DEBUG`` for more information.
**-debug**
If this is a debug build, this option will enable debug printouts
from passes which use the *DEBUG()* macro. See the **LLVM Programmer's
Manual**, section *#DEBUG* for more information.
**-load**\ =\ *plugin*
Load the dynamic object *plugin*. This object should register new optimization
or analysis passes. Once loaded, the object will add new command line options to
enable various optimizations or analyses. To see the new complete list of
optimizations, use the **-help** and **-load** options together. For example:
.. option:: -load=<plugin>
Load the dynamic object ``plugin``. This object should register new
optimization or analysis passes. Once loaded, the object will add new command
line options to enable various optimizations or analyses. To see the new
complete list of optimizations, use the :option:`-help` and :option:`-load`
options together. For example:
.. code-block:: sh
opt -load=plugin.so -help
**-p**
.. option:: -p
Print module after each transformation.
EXIT STATUS
-----------
If **opt** succeeds, it will exit with 0. Otherwise, if an error
If :program:`opt` succeeds, it will exit with 0. Otherwise, if an error
occurs, it will exit with a non-zero value.

View File

@ -1,186 +1,129 @@
tblgen - Target Description To C++ Code Generator
=================================================
SYNOPSIS
--------
**tblgen** [*options*] [*filename*]
:program:`tblgen` [*options*] [*filename*]
DESCRIPTION
-----------
:program:`tblgen` translates from target description (``.td``) files into C++
code that can be included in the definition of an LLVM target library. Most
users of LLVM will not need to use this program. It is only for assisting with
writing an LLVM target backend.
**tblgen** translates from target description (.td) files into C++ code that can
be included in the definition of an LLVM target library. Most users of LLVM will
not need to use this program. It is only for assisting with writing an LLVM
target backend.
The input and output of **tblgen** is beyond the scope of this short
introduction. Please see the *CodeGeneration* page in the LLVM documentation.
The *filename* argument specifies the name of a Target Description (.td) file
to read as input.
The input and output of :program:`tblgen` is beyond the scope of this short
introduction. Please see :doc:`../TableGenFundamentals`.
The *filename* argument specifies the name of a Target Description (``.td``)
file to read as input.
OPTIONS
-------
**-help**
.. option:: -help
Print a summary of command line options.
.. option:: -o filename
Specify the output file name. If ``filename`` is ``-``, then
:program:`tblgen` sends its output to standard output.
**-o** *filename*
.. option:: -I directory
Specify the output file name. If *filename* is ``-``, then **tblgen**
sends its output to standard output.
Specify where to find other target description files for inclusion. The
``directory`` value should be a full or partial path to a directory that
contains target description files.
.. option:: -asmparsernum N
Make -gen-asm-parser emit assembly writer number ``N``.
**-I** *directory*
.. option:: -asmwriternum N
Specify where to find other target description files for inclusion. The
*directory* value should be a full or partial path to a directory that contains
target description files.
Make -gen-asm-writer emit assembly writer number ``N``.
**-asmparsernum** *N*
Make -gen-asm-parser emit assembly writer number *N*.
**-asmwriternum** *N*
Make -gen-asm-writer emit assembly writer number *N*.
**-class** *class Name*
.. option:: -class className
Print the enumeration list for this class.
**-print-records**
.. option:: -print-records
Print all records to standard output (default).
**-print-enums**
.. option:: -print-enums
Print enumeration values for a class
**-print-sets**
.. option:: -print-sets
Print expanded sets for testing DAG exprs.
**-gen-emitter**
.. option:: -gen-emitter
Generate machine code emitter.
**-gen-register-info**
.. option:: -gen-register-info
Generate registers and register classes info.
**-gen-instr-info**
.. option:: -gen-instr-info
Generate instruction descriptions.
**-gen-asm-writer**
.. option:: -gen-asm-writer
Generate the assembly writer.
**-gen-disassembler**
.. option:: -gen-disassembler
Generate disassembler.
**-gen-pseudo-lowering**
.. option:: -gen-pseudo-lowering
Generate pseudo instruction lowering.
**-gen-dag-isel**
.. option:: -gen-dag-isel
Generate a DAG (Directed Acycle Graph) instruction selector.
**-gen-asm-matcher**
.. option:: -gen-asm-matcher
Generate assembly instruction matcher.
**-gen-dfa-packetizer**
.. option:: -gen-dfa-packetizer
Generate DFA Packetizer for VLIW targets.
**-gen-fast-isel**
.. option:: -gen-fast-isel
Generate a "fast" instruction selector.
**-gen-subtarget**
.. option:: -gen-subtarget
Generate subtarget enumerations.
**-gen-intrinsic**
.. option:: -gen-intrinsic
Generate intrinsic information.
**-gen-tgt-intrinsic**
.. option:: -gen-tgt-intrinsic
Generate target intrinsic information.
**-gen-enhanced-disassembly-info**
.. option:: -gen-enhanced-disassembly-info
Generate enhanced disassembly info.
**-version**
.. option:: -version
Show the version number of this program.
EXIT STATUS
-----------
If **tblgen** succeeds, it will exit with 0. Otherwise, if an error
If :program:`tblgen` succeeds, it will exit with 0. Otherwise, if an error
occurs, it will exit with a non-zero value.

View File

@ -1,5 +1,3 @@
.. _commandline:
==============================
CommandLine 2.0 Library Manual
==============================
@ -68,9 +66,7 @@ CommandLine library to have the following features:
This document will hopefully let you jump in and start using CommandLine in your
utility quickly and painlessly. Additionally it should be a simple reference
manual to figure out how stuff works. If it is failing in some area (or you
want an extension to the library), nag the author, `Chris
Lattner <mailto:sabre@nondot.org>`_.
manual to figure out how stuff works.
Quick Start Guide
=================

View File

@ -1,5 +1,3 @@
.. _compiler_writer_info:
========================================================
Architecture & Platform Information for Compiler Writers
========================================================
@ -12,8 +10,6 @@ Architecture & Platform Information for Compiler Writers
This document is a work-in-progress. Additions and clarifications are
welcome.
Compiled by `Misha Brukman <http://misha.brukman.net>`_.
Hardware
========
@ -24,6 +20,11 @@ ARM
* `ABI <http://www.arm.com/products/DevTools/ABI.html>`_
AArch64
-------
* `ARMv8 Instruction Set Overview <http://infocenter.arm.com/help/index.jsp?topic=/com.arm.doc.genc010197a/index.html>`_
Itanium (ia64)
--------------
@ -40,19 +41,15 @@ PowerPC
IBM - Official manuals and docs
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
* `PowerPC Architecture Book <http://www-106.ibm.com/developerworks/eserver/articles/archguide.html>`_
* `Power Instruction Set Architecture, Versions 2.03 through 2.06 (authentication required, free sign-up) <https://www.power.org/technology-introduction/standards-specifications>`_
* Book I: `PowerPC User Instruction Set Architecture <http://www-106.ibm.com/developerworks/eserver/pdfs/archpub1.pdf>`_
* `PowerPC Compiler Writer's Guide <http://www.ibm.com/chips/techlib/techlib.nsf/techdocs/852569B20050FF7785256996007558C6>`_
* Book II: `PowerPC Virtual Environment Architecture <http://www-106.ibm.com/developerworks/eserver/pdfs/archpub2.pdf>`_
* `Intro to PowerPC Architecture <http://www.ibm.com/developerworks/linux/library/l-powarch/>`_
* Book III: `PowerPC Operating Environment Architecture <http://www-106.ibm.com/developerworks/eserver/pdfs/archpub3.pdf>`_
* `PowerPC Processor Manuals (embedded) <http://www.ibm.com/chips/techlib/techlib.nsf/products/PowerPC>`_
* `PowerPC Compiler Writer's Guide <http://www-3.ibm.com/chips/techlib/techlib.nsf/techdocs/852569B20050FF7785256996007558C6>`_
* `PowerPC Processor Manuals <http://www-3.ibm.com/chips/techlib/techlib.nsf/products/PowerPC>`_
* `Intro to PowerPC Architecture <http://www-106.ibm.com/developerworks/linux/library/l-powarch/>`_
* `Various IBM specifications and white papers <https://www.power.org/documentation/?document_company=105&document_category=all&publish_year=all&grid_order=DESC&grid_sort=title>`_
* `IBM AIX/5L for POWER Assembly Reference <http://publibn.boulder.ibm.com/doc_link/en_US/a_doc_lib/aixassem/alangref/alangreftfrm.htm>`_
@ -81,13 +78,13 @@ AMD - Official manuals and docs
Intel - Official manuals and docs
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
* `IA-32 manuals <http://developer.intel.com/design/pentium4/manuals/index_new.htm>`_
* `Intel 64 and IA-32 manuals <http://www.intel.com/content/www/us/en/processors/architectures-software-developer-manuals.html>`_
* `Intel Itanium documentation <http://www.intel.com/design/itanium/documentation.htm?iid=ipp_srvr_proc_itanium2+techdocs>`_
Other x86-specific information
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
* `Calling conventions for different C++ compilers and operating systems <http://www.agner.org/assem/calling_conventions.pdf>`_
* `Calling conventions for different C++ compilers and operating systems <http://www.agner.org/optimize/calling_conventions.pdf>`_
Other relevant lists
--------------------
@ -101,6 +98,8 @@ Linux
-----
* `PowerPC 64-bit ELF ABI Supplement <http://www.linuxbase.org/spec/ELF/ppc64/>`_
* `Procedure Call Standard for the AArch64 Architecture <http://infocenter.arm.com/help/topic/com.arm.doc.ihi0055a/IHI0055A_aapcs64.pdf>`_
* `ELF for the ARM 64-bit Architecture (AArch64) <http://infocenter.arm.com/help/topic/com.arm.doc.ihi0056a/IHI0056A_aaelf64.pdf>`_
OS X
----
@ -108,6 +107,12 @@ OS X
* `Mach-O Runtime Architecture <http://developer.apple.com/documentation/Darwin/RuntimeArchitecture-date.html>`_
* `Notes on Mach-O ABI <http://www.unsanity.org/archives/000044.php>`_
NVPTX
=====
* `CUDA Documentation <http://docs.nvidia.com/cuda/index.html>`_ includes the PTX
ISA and Driver API documentation
Miscellaneous Resources
=======================

View File

@ -1,11 +1,7 @@
.. _debugging-jited-code:
==============================
Debugging JIT-ed Code With GDB
==============================
.. sectionauthor:: Reid Kleckner and Eli Bendersky
Background
==========

View File

@ -1,5 +1,3 @@
.. _developer_policy:
=====================
LLVM Developer Policy
=====================
@ -26,8 +24,8 @@ This policy is also designed to accomplish the following objectives:
#. Keep the top of Subversion trees as stable as possible.
#. Establish awareness of the project's `copyright, license, and patent
policies`_ with contributors to the project.
#. Establish awareness of the project's :ref:`copyright, license, and patent
policies <copyright-license-patents>` with contributors to the project.
This policy is aimed at frequent contributors to LLVM. People interested in
contributing one-off patches can do so in an informal way by sending them to the
@ -180,8 +178,8 @@ Developers are required to create test cases for any bugs fixed and any new
features added. Some tips for getting your testcase approved:
* All feature and regression test cases are added to the ``llvm/test``
directory. The appropriate sub-directory should be selected (see the `Testing
Guide <TestingGuide.html>`_ for details).
directory. The appropriate sub-directory should be selected (see the
:doc:`Testing Guide <TestingGuide>` for details).
* Test cases should be written in `LLVM assembly language <LangRef.html>`_
unless the feature or regression being tested requires another language
@ -401,7 +399,7 @@ Hacker!" in the commit message.
Overall, please do not add contributor names to the source code.
.. _copyright, license, and patent policies:
.. _copyright-license-patents:
Copyright, License, and Patents
===============================

0
docs/Dummy.html Normal file
View File

View File

@ -1,5 +1,3 @@
.. _exception_handling:
==========================
Exception Handling in LLVM
==========================
@ -34,13 +32,13 @@ execution of an application.
A more complete description of the Itanium ABI exception handling runtime
support of can be found at `Itanium C++ ABI: Exception Handling
<http://www.codesourcery.com/cxx-abi/abi-eh.html>`_. A description of the
<http://mentorembedded.github.com/cxx-abi/abi-eh.html>`_. A description of the
exception frame format can be found at `Exception Frames
<http://refspecs.freestandards.org/LSB_3.0.0/LSB-Core-generic/LSB-Core-generic/ehframechpt.html>`_,
<http://refspecs.linuxfoundation.org/LSB_3.0.0/LSB-Core-generic/LSB-Core-generic/ehframechpt.html>`_,
with details of the DWARF 4 specification at `DWARF 4 Standard
<http://dwarfstd.org/Dwarf4Std.php>`_. A description for the C++ exception
table formats can be found at `Exception Handling Tables
<http://www.codesourcery.com/cxx-abi/exceptions.pdf>`_.
<http://mentorembedded.github.com/cxx-abi/exceptions.pdf>`_.
Setjmp/Longjmp Exception Handling
---------------------------------
@ -151,10 +149,10 @@ type info index are passed in as arguments. The landing pad saves the exception
structure reference and then proceeds to select the catch block that corresponds
to the type info of the exception object.
The LLVM `landingpad instruction <LangRef.html#i_landingpad>`_ is used to convey
information about the landing pad to the back end. For C++, the ``landingpad``
instruction returns a pointer and integer pair corresponding to the pointer to
the *exception structure* and the *selector value* respectively.
The LLVM :ref:`i_landingpad` is used to convey information about the landing
pad to the back end. For C++, the ``landingpad`` instruction returns a pointer
and integer pair corresponding to the pointer to the *exception structure* and
the *selector value* respectively.
The ``landingpad`` instruction takes a reference to the personality function to
be used for this ``try``/``catch`` sequence. The remainder of the instruction is
@ -203,10 +201,9 @@ A cleanup is extra code which needs to be run as part of unwinding a scope. C++
destructors are a typical example, but other languages and language extensions
provide a variety of different kinds of cleanups. In general, a landing pad may
need to run arbitrary amounts of cleanup code before actually entering a catch
block. To indicate the presence of cleanups, a `landingpad
instruction <LangRef.html#i_landingpad>`_ should have a *cleanup*
clause. Otherwise, the unwinder will not stop at the landing pad if there are no
catches or filters that require it to.
block. To indicate the presence of cleanups, a :ref:`i_landingpad` should have
a *cleanup* clause. Otherwise, the unwinder will not stop at the landing pad if
there are no catches or filters that require it to.
.. note::
@ -226,9 +223,9 @@ Throw Filters
C++ allows the specification of which exception types may be thrown from a
function. To represent this, a top level landing pad may exist to filter out
invalid types. To express this in LLVM code the `landingpad
instruction <LangRef.html#i_landingpad>`_ will have a filter clause. The clause
consists of an array of type infos. ``landingpad`` will return a negative value
invalid types. To express this in LLVM code the :ref:`i_landingpad` will have a
filter clause. The clause consists of an array of type infos.
``landingpad`` will return a negative value
if the exception does not match any of the type infos. If no match is found then
a call to ``__cxa_call_unexpected`` should be made, otherwise
``_Unwind_Resume``. Each of these functions requires a reference to the
@ -269,8 +266,8 @@ handling information at various points in generated code.
.. _llvm.eh.typeid.for:
llvm.eh.typeid.for
------------------
``llvm.eh.typeid.for``
----------------------
.. code-block:: llvm
@ -283,8 +280,8 @@ function. This value can be used to compare against the result of
.. _llvm.eh.sjlj.setjmp:
llvm.eh.sjlj.setjmp
-------------------
``llvm.eh.sjlj.setjmp``
-----------------------
.. code-block:: llvm
@ -305,8 +302,8 @@ available for use in a target-specific manner.
.. _llvm.eh.sjlj.longjmp:
llvm.eh.sjlj.longjmp
--------------------
``llvm.eh.sjlj.longjmp``
------------------------
.. code-block:: llvm
@ -318,8 +315,8 @@ a buffer populated by `llvm.eh.sjlj.setjmp`_. The frame pointer and stack
pointer are restored from the buffer, then control is transferred to the
destination address.
llvm.eh.sjlj.lsda
-----------------
``llvm.eh.sjlj.lsda``
---------------------
.. code-block:: llvm
@ -330,8 +327,8 @@ the address of the Language Specific Data Area (LSDA) for the current
function. The SJLJ front-end code stores this address in the exception handling
function context for use by the runtime.
llvm.eh.sjlj.callsite
---------------------
``llvm.eh.sjlj.callsite``
-------------------------
.. code-block:: llvm

View File

@ -1,5 +1,3 @@
.. _extending_llvm:
============================================================
Extending LLVM: Adding instructions, intrinsics, types, etc.
============================================================

View File

@ -1,5 +1,3 @@
.. _faq:
================================
Frequently Asked Questions (FAQ)
================================
@ -53,6 +51,29 @@ Some porting problems may exist in the following areas:
like the Bourne Shell and sed. Porting to systems without these tools
(MacOS 9, Plan 9) will require more effort.
What API do I use to store a value to one of the virtual registers in LLVM IR's SSA representation?
---------------------------------------------------------------------------------------------------
In short: you can't. It's actually kind of a silly question once you grok
what's going on. Basically, in code like:
.. code-block:: llvm
%result = add i32 %foo, %bar
, ``%result`` is just a name given to the ``Value`` of the ``add``
instruction. In other words, ``%result`` *is* the add instruction. The
"assignment" doesn't explicitly "store" anything to any "virtual register";
the "``=``" is more like the mathematical sense of equality.
Longer explanation: In order to generate a textual representation of the
IR, some kind of name has to be given to each instruction so that other
instructions can textually reference it. However, the isomorphic in-memory
representation that you manipulate from C++ has no such restriction since
instructions can simply keep pointers to any other ``Value``'s that they
reference. In fact, the names of dummy numbered temporaries like ``%1`` are
not explicitly represented in the in-memory representation at all (see
``Value::getName()``).
Build Problems
==============
@ -79,7 +100,7 @@ grabbing the wrong linker/assembler/etc, there are two ways to fix it:
#. Run ``configure`` with an alternative ``PATH`` that is correct. In a
Bourne compatible shell, the syntax would be:
.. code-block:: bash
.. code-block:: console
% PATH=[the path without the bad program] ./configure ...
@ -106,7 +127,7 @@ I've modified a Makefile in my source tree, but my build tree keeps using the ol
If the Makefile already exists in your object tree, you can just run the
following command in the top level directory of your object tree:
.. code-block:: bash
.. code-block:: console
% ./config.status <relative path to Makefile>;
@ -133,13 +154,13 @@ This is most likely occurring because you built a profile or release
For example, if you built LLVM with the command:
.. code-block:: bash
.. code-block:: console
% gmake ENABLE_PROFILING=1
...then you must run the tests with the following commands:
.. code-block:: bash
.. code-block:: console
% cd llvm/test
% gmake ENABLE_PROFILING=1
@ -175,17 +196,17 @@ After Subversion update, rebuilding gives the error "No rule to make target".
-----------------------------------------------------------------------------
If the error is of the form:
.. code-block:: bash
.. code-block:: console
gmake[2]: *** No rule to make target `/path/to/somefile',
needed by `/path/to/another/file.d'.
needed by `/path/to/another/file.d'.
Stop.
This may occur anytime files are moved within the Subversion repository or
removed entirely. In this case, the best solution is to erase all ``.d``
files, which list dependencies for source files, and rebuild:
.. code-block:: bash
.. code-block:: console
% cd $LLVM_OBJ_DIR
% rm -f `find . -name \*\.d`

View File

@ -1,279 +0,0 @@
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01//EN"
"http://www.w3.org/TR/html4/strict.dtd">
<html>
<head>
<meta http-equiv="Content-Type" content="text/html; charset=UTF-8">
<link rel="stylesheet" href="_static/llvm.css" type="text/css" media="screen">
<title>Building the LLVM GCC Front-End</title>
</head>
<body>
<h1>
Building the LLVM GCC Front-End
</h1>
<ol>
<li><a href="#instructions">Building llvm-gcc from Source</a></li>
<li><a href="#ada">Building the Ada front-end</a></li>
<li><a href="#fortran">Building the Fortran front-end</a></li>
<li><a href="#license">License Information</a></li>
</ol>
<div class="doc_author">
<p>Written by the LLVM Team</p>
</div>
<!-- *********************************************************************** -->
<h2><a name="instructions">Building llvm-gcc from Source</a></h2>
<!-- *********************************************************************** -->
<div>
<p>This section describes how to acquire and build llvm-gcc 4.2, which is based
on the GCC 4.2.1 front-end. Supported languages are Ada, C, C++, Fortran,
Objective-C and Objective-C++. Note that the instructions for building these
front-ends are completely different (and much easier!) than those for building
llvm-gcc3 in the past.</p>
<ol>
<li><p>Retrieve the appropriate llvm-gcc-4.2-<i>version</i>.source.tar.gz
archive from the <a href="http://llvm.org/releases/">LLVM web
site</a>.</p>
<p>It is also possible to download the sources of the llvm-gcc front end
from a read-only mirror using subversion. To check out the 4.2 code
for first time use:</p>
<div class="doc_code">
<pre>
svn co http://llvm.org/svn/llvm-project/llvm-gcc-4.2/trunk <i>dst-directory</i>
</pre>
</div>
<p>After that, the code can be be updated in the destination directory
using:</p>
<div class="doc_code">
<pre>svn update</pre>
</div>
<p>The mirror is brought up to date every evening.</p></li>
<li>Follow the directions in the top-level <tt>README.LLVM</tt> file for
up-to-date instructions on how to build llvm-gcc. See below for building
with support for Ada or Fortran.
</ol>
</div>
<!-- *********************************************************************** -->
<h2><a name="ada">Building the Ada front-end</a></h2>
<!-- *********************************************************************** -->
<div>
<p>Building with support for Ada amounts to following the directions in the
top-level <tt>README.LLVM</tt> file, adding ",ada" to EXTRALANGS, for example:
<tt>EXTRALANGS=,ada</tt></p>
<p>There are some complications however:</p>
<ol>
<li><p>The only platform for which the Ada front-end is known to build is
32 bit intel x86 running linux. It is unlikely to build for other
systems without some work.</p></li>
<li><p>The build requires having a compiler that supports Ada, C and C++.
The Ada front-end is written in Ada so an Ada compiler is needed to
build it. Compilers known to work with the
<a href="http://llvm.org/releases/download.html">LLVM 2.7 release</a>
are <a href="http://gcc.gnu.org/releases.html">gcc-4.2</a> and the
2005, 2006 and 2007 versions of the
<a href="http://libre.adacore.com/">GNAT GPL Edition</a>.
<b>GNAT GPL 2008, gcc-4.3 and later will not work</b>.
The LLVM parts of llvm-gcc are written in C++ so a C++ compiler is
needed to build them. The rest of gcc is written in C.
Some linux distributions provide a version of gcc that supports all
three languages (the Ada part often comes as an add-on package to
the rest of gcc). Otherwise it is possible to combine two versions
of gcc, one that supports Ada and C (such as the
<a href="http://libre.adacore.com/">2007 GNAT GPL Edition</a>)
and another which supports C++, see below.</p></li>
<li><p>Because the Ada front-end is experimental, it is wise to build the
compiler with checking enabled. This causes it to run much slower, but
helps catch mistakes in the compiler (please report any problems using
<a href="http://llvm.org/bugs/">LLVM bugzilla</a>).</p></li>
<li><p>The Ada front-end <a href="http://llvm.org/PR2007">fails to
bootstrap</a>, due to lack of LLVM support for
<tt>setjmp</tt>/<tt>longjmp</tt> style exception handling (used
internally by the compiler), so you must specify
<tt>--disable-bootstrap</tt>.</p></li>
</ol>
<p>Supposing appropriate compilers are available, llvm-gcc with Ada support can
be built on an x86-32 linux box using the following recipe:</p>
<ol>
<li><p>Download the <a href="http://llvm.org/releases/download.html">LLVM source</a>
and unpack it:</p>
<pre class="doc_code">
wget http://llvm.org/releases/2.7/llvm-2.7.tgz
tar xzf llvm-2.7.tgz
mv llvm-2.7 llvm
</pre>
<p>or <a href="GettingStarted.html#checkout">check out the
latest version from subversion</a>:</p>
<pre class="doc_code">svn co http://llvm.org/svn/llvm-project/llvm/trunk llvm</pre>
</li>
<li><p>Download the
<a href="http://llvm.org/releases/download.html">llvm-gcc-4.2 source</a>
and unpack it:</p>
<pre class="doc_code">
wget http://llvm.org/releases/2.7/llvm-gcc-4.2-2.7.source.tgz
tar xzf llvm-gcc-4.2-2.7.source.tgz
mv llvm-gcc-4.2-2.7.source llvm-gcc-4.2
</pre>
<p>or <a href="GettingStarted.html#checkout">check out the
latest version from subversion</a>:</p>
<pre class="doc_code">
svn co http://llvm.org/svn/llvm-project/llvm-gcc-4.2/trunk llvm-gcc-4.2
</pre>
</li>
<li><p>Make a build directory <tt>llvm-objects</tt> for llvm and make it the
current directory:</p>
<pre class="doc_code">
mkdir llvm-objects
cd llvm-objects
</pre>
</li>
<li><p>Configure LLVM (here it is configured to install into <tt>/usr/local</tt>):</p>
<pre class="doc_code">
../llvm/configure --prefix=<b>/usr/local</b> --enable-optimized --enable-assertions
</pre>
<p>If you have a multi-compiler setup and the C++ compiler is not the
default, then you can configure like this:</p>
<pre class="doc_code">
CXX=<b>PATH_TO_C++_COMPILER</b> ../llvm/configure --prefix=<b>/usr/local</b> --enable-optimized --enable-assertions
</pre>
<p>To compile without checking (not recommended), replace
<tt>--enable-assertions</tt> with <tt>--disable-assertions</tt>.</p>
</li>
<li><p>Build LLVM:</p>
<pre class="doc_code">
make
</pre>
</li>
<li><p>Install LLVM (optional):</p>
<pre class="doc_code">
make install
</pre>
</li>
<li><p>Make a build directory <tt>llvm-gcc-4.2-objects</tt> for llvm-gcc and make it the
current directory:</p>
<pre class="doc_code">
cd ..
mkdir llvm-gcc-4.2-objects
cd llvm-gcc-4.2-objects
</pre>
</li>
<li><p>Configure llvm-gcc (here it is configured to install into <tt>/usr/local</tt>).
The <tt>--enable-checking</tt> flag turns on sanity checks inside the compiler.
To turn off these checks (not recommended), replace <tt>--enable-checking</tt>
with <tt>--disable-checking</tt>.
Additional languages can be appended to the <tt>--enable-languages</tt> switch,
for example <tt>--enable-languages=ada,c,c++</tt>.</p>
<pre class="doc_code">
../llvm-gcc-4.2/configure --prefix=<b>/usr/local</b> --enable-languages=ada,c \
--enable-checking --enable-llvm=$PWD/../llvm-objects \
--disable-bootstrap --disable-multilib
</pre>
<p>If you have a multi-compiler setup, then you can configure like this:</p>
<pre class="doc_code">
export CC=<b>PATH_TO_C_AND_ADA_COMPILER</b>
export CXX=<b>PATH_TO_C++_COMPILER</b>
../llvm-gcc-4.2/configure --prefix=<b>/usr/local</b> --enable-languages=ada,c \
--enable-checking --enable-llvm=$PWD/../llvm-objects \
--disable-bootstrap --disable-multilib
</pre>
</li>
<li><p>Build and install the compiler:</p>
<pre class="doc_code">
make
make install
</pre>
</li>
</ol>
</div>
<!-- *********************************************************************** -->
<h2><a name="fortran">Building the Fortran front-end</a></h2>
<!-- *********************************************************************** -->
<div>
<p>To build with support for Fortran, follow the directions in the top-level
<tt>README.LLVM</tt> file, adding ",fortran" to EXTRALANGS, for example:</p>
<pre class="doc_code">
EXTRALANGS=,fortran
</pre>
</div>
<!-- *********************************************************************** -->
<h2><a name="license">License Information</a></h2>
<!-- *********************************************************************** -->
<div>
<p>
The LLVM GCC frontend is licensed to you under the GNU General Public License
and the GNU Lesser General Public License. Please see the files COPYING and
COPYING.LIB for more details.
</p>
<p>
More information is <a href="FAQ.html#license">available in the FAQ</a>.
</p>
</div>
<!-- *********************************************************************** -->
<hr>
<address>
<a href="http://jigsaw.w3.org/css-validator/check/referer"><img
src="http://jigsaw.w3.org/css-validator/images/vcss-blue" alt="Valid CSS"></a>
<a href="http://validator.w3.org/check/referer"><img
src="http://www.w3.org/Icons/valid-html401-blue" alt="Valid HTML 4.01"></a>
<a href="http://llvm.org/">LLVM Compiler Infrastructure</a><br>
Last modified: $Date: 2012-04-19 22:20:34 +0200 (Thu, 19 Apr 2012) $
</address>
</body>
</html>

File diff suppressed because it is too large Load Diff

1029
docs/GarbageCollection.rst Normal file

File diff suppressed because it is too large Load Diff

View File

@ -1,5 +1,3 @@
.. _gep:
=======================================
The Often Misunderstood GEP Instruction
=======================================
@ -22,7 +20,7 @@ Address Computation
When people are first confronted with the GEP instruction, they tend to relate
it to known concepts from other programming paradigms, most notably C array
indexing and field selection. GEP closely resembles C array indexing and field
selection, however it's is a little different and this leads to the following
selection, however it is a little different and this leads to the following
questions.
What is the first index of the GEP instruction?
@ -190,7 +188,7 @@ In this example, we have a global variable, ``%MyVar`` that is a pointer to a
structure containing a pointer to an array of 40 ints. The GEP instruction seems
to be accessing the 18th integer of the structure's array of ints. However, this
is actually an illegal GEP instruction. It won't compile. The reason is that the
pointer in the structure <i>must</i> be dereferenced in order to index into the
pointer in the structure *must* be dereferenced in order to index into the
array of 40 ints. Since the GEP instruction never accesses memory, it is
illegal.
@ -416,7 +414,7 @@ arithmetic, and inttoptr sequences.
Can I compute the distance between two objects, and add that value to one address to compute the other address?
---------------------------------------------------------------------------------------------------------------
As with arithmetic on null, You can use GEP to compute an address that way, but
As with arithmetic on null, you can use GEP to compute an address that way, but
you can't use that pointer to actually access the object if you do, unless the
object is managed outside of LLVM.

View File

@ -1,9 +1,10 @@
.. _getting_started:
====================================
Getting Started with the LLVM System
====================================
.. contents::
:local:
Overview
========
@ -68,33 +69,30 @@ Here's the short story for getting up and running quickly with LLVM:
* ``../llvm/configure [options]``
Some common options:
* ``--prefix=directory`` ---
* ``--prefix=directory`` --- Specify for *directory* the full pathname of
where you want the LLVM tools and libraries to be installed (default
``/usr/local``).
Specify for *directory* the full pathname of where you want the LLVM
tools and libraries to be installed (default ``/usr/local``).
* ``--enable-optimized`` --- Compile with optimizations enabled (default
is NO).
* ``--enable-optimized`` ---
Compile with optimizations enabled (default is NO).
* ``--enable-assertions`` ---
Compile with assertion checks enabled (default is YES).
* ``--enable-assertions`` --- Compile with assertion checks enabled
(default is YES).
* ``make [-j]`` --- The ``-j`` specifies the number of jobs (commands) to run
simultaneously. This builds both LLVM and Clang for Debug+Asserts mode.
The --enabled-optimized configure option is used to specify a Release
The ``--enabled-optimized`` configure option is used to specify a Release
build.
* ``make check-all`` --- This run the regression tests to ensure everything
is in working order.
* ``make update`` --- This command is used to update all the svn repositories
at once, rather then having to ``cd`` into the individual repositories and
running ``svn update``.
* It is also possible to use CMake instead of the makefiles. With CMake it is
also possible to generate project files for several IDEs: Eclipse CDT4,
possible to generate project files for several IDEs: Xcode, Eclipse CDT4,
CodeBlocks, Qt-Creator (use the CodeBlocks generator), KDevelop3.
* If you get an "internal compiler error (ICE)" or test failures, see
@ -126,6 +124,8 @@ LLVM is known to work on the following platforms:
+-----------------+----------------------+-------------------------+
|Linux | amd64 | GCC |
+-----------------+----------------------+-------------------------+
|Linux | ARM\ :sup:`13` | GCC |
+-----------------+----------------------+-------------------------+
|Solaris | V9 (Ultrasparc) | GCC |
+-----------------+----------------------+-------------------------+
|FreeBSD | x86\ :sup:`1` | GCC |
@ -161,8 +161,6 @@ LLVM has partial support for the following platforms:
.. note::
Code generation supported for Pentium processors and up
#. Code generation supported for Pentium processors and up
#. Code generation supported for 32-bit ABI only
#. No native code generation
@ -182,9 +180,9 @@ LLVM has partial support for the following platforms:
Windows-specifics that will cause the build to fail.
#. To use LLVM modules on Win32-based system, you may configure LLVM
with ``--enable-shared``.
#. To compile SPU backend, you need to add ``LDFLAGS=-Wl,--stack,16777216`` to
configure.
#. MCJIT not working well pre-v7, old JIT engine not supported any more.
Note that you will need about 1-3 GB of space for a full LLVM build in Debug
mode, depending on the system (it is so large because of all the debugging
@ -219,11 +217,7 @@ uses the package and provides other details.
+--------------------------------------------------------------+-----------------+---------------------------------------------+
| `SVN <http://subversion.tigris.org/project_packages.html>`_ | >=1.3 | Subversion access to LLVM\ :sup:`2` |
+--------------------------------------------------------------+-----------------+---------------------------------------------+
| `DejaGnu <http://savannah.gnu.org/projects/dejagnu>`_ | 1.4.2 | Automated test suite\ :sup:`3` |
+--------------------------------------------------------------+-----------------+---------------------------------------------+
| `tcl <http://www.tcl.tk/software/tcltk/>`_ | 8.3, 8.4 | Automated test suite\ :sup:`3` |
+--------------------------------------------------------------+-----------------+---------------------------------------------+
| `expect <http://expect.nist.gov/>`_ | 5.38.0 | Automated test suite\ :sup:`3` |
| `python <http://www.python.org/>`_ | >=2.4 | Automated test suite\ :sup:`3` |
+--------------------------------------------------------------+-----------------+---------------------------------------------+
| `perl <http://www.perl.com/download.csp>`_ | >=5.6.0 | Utilities |
+--------------------------------------------------------------+-----------------+---------------------------------------------+
@ -368,6 +362,9 @@ optimizations are turned on. The symptom is an infinite loop in
``-O0``. A test failure in ``test/Assembler/alignstack.ll`` is one symptom of
the problem.
**GCC 4.6.3 on ARM**: Miscompiles ``llvm-readobj`` at ``-O3``. A test failure
in ``test/Object/readobj-shared-object.test`` is one symptom of the problem.
**GNU ld 2.16.X**. Some 2.16.X versions of the ld linker will produce very long
warning messages complaining that some "``.gnu.linkonce.t.*``" symbol was
defined in a discarded section. You can safely ignore these messages as they are
@ -384,6 +381,14 @@ intermittent failures when building LLVM with position independent code. The
symptom is an error about cyclic dependencies. We recommend upgrading to a
newer version of Gold.
**Clang 3.0 with libstdc++ 4.7.x**: a few Linux distributions (Ubuntu 12.10,
Fedora 17) have both Clang 3.0 and libstdc++ 4.7 in their repositories. Clang
3.0 does not implement a few builtins that are used in this library. We
recommend using the system GCC to compile LLVM and Clang in this case.
**Clang 3.0 on Mageia 2**. There's a packaging issue: Clang can not find at
least some (``cxxabi.h``) libstdc++ headers.
.. _Getting Started with LLVM:
Getting Started with LLVM
@ -459,6 +464,8 @@ The files are as follows, with *x.y* marking the version number:
Binary release of the llvm-gcc-4.2 front end for a specific platform.
.. _checkout:
Checkout LLVM from Subversion
-----------------------------
@ -505,7 +512,7 @@ directory:
If you would like to get the LLVM test suite (a separate package as of 1.4), you
get it from the Subversion repository:
.. code-block:: bash
.. code-block:: console
% cd llvm/projects
% svn co http://llvm.org/svn/llvm-project/test-suite/trunk test-suite
@ -514,33 +521,46 @@ By placing it in the ``llvm/projects``, it will be automatically configured by
the LLVM configure script as well as automatically updated when you run ``svn
update``.
GIT mirror
Git Mirror
----------
GIT mirrors are available for a number of LLVM subprojects. These mirrors sync
Git mirrors are available for a number of LLVM subprojects. These mirrors sync
automatically with each Subversion commit and contain all necessary git-svn
marks (so, you can recreate git-svn metadata locally). Note that right now
mirrors reflect only ``trunk`` for each project. You can do the read-only GIT
mirrors reflect only ``trunk`` for each project. You can do the read-only Git
clone of LLVM via:
.. code-block:: bash
.. code-block:: console
% git clone http://llvm.org/git/llvm.git
If you want to check out clang too, run:
.. code-block:: bash
.. code-block:: console
% git clone http://llvm.org/git/llvm.git
% cd llvm/tools
% git clone http://llvm.org/git/clang.git
If you want to check out compiler-rt too, run:
.. code-block:: console
% cd llvm/projects
% git clone http://llvm.org/git/compiler-rt.git
If you want to check out the Test Suite Source Code (optional), run:
.. code-block:: console
% cd llvm/projects
% git clone http://llvm.org/git/test-suite.git
Since the upstream repository is in Subversion, you should use ``git
pull --rebase`` instead of ``git pull`` to avoid generating a non-linear history
in your clone. To configure ``git pull`` to pass ``--rebase`` by default on the
master branch, run the following command:
.. code-block:: bash
.. code-block:: console
% git config branch.master.rebase true
@ -553,13 +573,13 @@ Assume ``master`` points the upstream and ``mybranch`` points your working
branch, and ``mybranch`` is rebased onto ``master``. At first you may check
sanity of whitespaces:
.. code-block:: bash
.. code-block:: console
% git diff --check master..mybranch
The easiest way to generate a patch is as below:
.. code-block:: bash
.. code-block:: console
% git diff master..mybranch > /path/to/mybranch.diff
@ -570,20 +590,20 @@ could be accepted with ``patch -p1 -N``.
But you may generate patchset with git-format-patch. It generates by-each-commit
patchset. To generate patch files to attach to your article:
.. code-block:: bash
.. code-block:: console
% git format-patch --no-attach master..mybranch -o /path/to/your/patchset
If you would like to send patches directly, you may use git-send-email or
git-imap-send. Here is an example to generate the patchset in Gmail's [Drafts].
.. code-block:: bash
.. code-block:: console
% git format-patch --attach master..mybranch --stdout | git imap-send
Then, your .git/config should have [imap] sections.
.. code-block:: bash
.. code-block:: ini
[imap]
host = imaps://imap.gmail.com
@ -603,7 +623,7 @@ For developers to work with git-svn
To set up clone from which you can submit code using ``git-svn``, run:
.. code-block:: bash
.. code-block:: console
% git clone http://llvm.org/git/llvm.git
% cd llvm
@ -619,10 +639,12 @@ To set up clone from which you can submit code using ``git-svn``, run:
% git config svn-remote.svn.fetch :refs/remotes/origin/master
% git svn rebase -l
To update this clone without generating git-svn tags that conflict with the
upstream git repo, run:
Likewise for compiler-rt and test-suite.
.. code-block:: bash
To update this clone without generating git-svn tags that conflict with the
upstream Git repo, run:
.. code-block:: console
% git fetch && (cd tools/clang && git fetch) # Get matching revisions of both trees.
% git checkout master
@ -631,20 +653,65 @@ upstream git repo, run:
git checkout master &&
git svn rebase -l)
Likewise for compiler-rt and test-suite.
This leaves your working directories on their master branches, so you'll need to
``checkout`` each working branch individually and ``rebase`` it on top of its
parent branch. (Note: This script is intended for relative newbies to git. If
you have more experience, you can likely improve on it.)
parent branch.
For those who wish to be able to update an llvm repo in a simpler fashion,
consider placing the following Git script in your path under the name
``git-svnup``:
.. code-block:: bash
#!/bin/bash
STATUS=$(git status -s | grep -v "??")
if [ ! -z "$STATUS" ]; then
STASH="yes"
git stash >/dev/null
fi
git fetch
OLD_BRANCH=$(git rev-parse --abbrev-ref HEAD)
git checkout master 2> /dev/null
git svn rebase -l
git checkout $OLD_BRANCH 2> /dev/null
if [ ! -z $STASH ]; then
git stash pop >/dev/null
fi
Then to perform the aforementioned update steps go into your source directory
and just type ``git-svnup`` or ``git svnup`` and everything will just work.
To commit back changes via git-svn, use ``dcommit``:
.. code-block:: console
% git svn dcommit
Note that git-svn will create one SVN commit for each Git commit you have pending,
so squash and edit each commit before executing ``dcommit`` to make sure they all
conform to the coding standards and the developers' policy.
On success, ``dcommit`` will rebase against the HEAD of SVN, so to avoid conflict,
please make sure your current branch is up-to-date (via fetch/rebase) before
proceeding.
The git-svn metadata can get out of sync after you mess around with branches and
``dcommit``. When that happens, ``git svn dcommit`` stops working, complaining
about files with uncommitted changes. The fix is to rebuild the metadata:
.. code-block:: bash
.. code-block:: console
% rm -rf .git/svn
% git svn rebase -l
Please, refer to the Git-SVN manual (``man git-svn``) for more information.
Local LLVM Configuration
------------------------
@ -661,14 +728,15 @@ configure the build system:
| Variable | Purpose |
+============+===========================================================+
| CC | Tells ``configure`` which C compiler to use. By default, |
| | ``configure`` will look for the first GCC C compiler in |
| | ``PATH``. Use this variable to override ``configure``\'s |
| | default behavior. |
| | ``configure`` will check ``PATH`` for ``clang`` and GCC C |
| | compilers (in this order). Use this variable to override |
| | ``configure``\'s default behavior. |
+------------+-----------------------------------------------------------+
| CXX | Tells ``configure`` which C++ compiler to use. By |
| | default, ``configure`` will look for the first GCC C++ |
| | compiler in ``PATH``. Use this variable to override |
| | ``configure``'s default behavior. |
| | default, ``configure`` will check ``PATH`` for |
| | ``clang++`` and GCC C++ compilers (in this order). Use |
| | this variable to override ``configure``'s default |
| | behavior. |
+------------+-----------------------------------------------------------+
The following options can be used to set or enable LLVM specific options:
@ -722,13 +790,13 @@ To configure LLVM, follow these steps:
#. Change directory into the object root directory:
.. code-block:: bash
.. code-block:: console
% cd OBJ_ROOT
#. Run the ``configure`` script located in the LLVM source tree:
.. code-block:: bash
.. code-block:: console
% SRC_ROOT/configure --prefix=/install/path [other options]
@ -764,7 +832,7 @@ Profile Builds
Once you have LLVM configured, you can build it by entering the *OBJ_ROOT*
directory and issuing the following command:
.. code-block:: bash
.. code-block:: console
% gmake
@ -775,7 +843,7 @@ If you have multiple processors in your machine, you may wish to use some of the
parallel build options provided by GNU Make. For example, you could use the
command:
.. code-block:: bash
.. code-block:: console
% gmake -j2
@ -842,12 +910,39 @@ any subdirectories that it contains. Entering any directory inside the LLVM
object tree and typing ``gmake`` should rebuild anything in or below that
directory that is out of date.
This does not apply to building the documentation.
LLVM's (non-Doxygen) documentation is produced with the
`Sphinx <http://sphinx-doc.org/>`_ documentation generation system.
There are some HTML documents that have not yet been converted to the new
system (which uses the easy-to-read and easy-to-write
`reStructuredText <http://sphinx-doc.org/rest.html>`_ plaintext markup
language).
The generated documentation is built in the ``SRC_ROOT/docs`` directory using
a special makefile.
For instructions on how to install Sphinx, see
`Sphinx Introduction for LLVM Developers
<http://lld.llvm.org/sphinx_intro.html>`_.
After following the instructions there for installing Sphinx, build the LLVM
HTML documentation by doing the following:
.. code-block:: console
$ cd SRC_ROOT/docs
$ make -f Makefile.sphinx
This creates a ``_build/html`` sub-directory with all of the HTML files, not
just the generated ones.
This directory corresponds to ``llvm.org/docs``.
For example, ``_build/html/SphinxQuickstartTemplate.html`` corresponds to
``llvm.org/docs/SphinxQuickstartTemplate.html``.
The :doc:`SphinxQuickstartTemplate` is useful when creating a new document.
Cross-Compiling LLVM
--------------------
It is possible to cross-compile LLVM itself. That is, you can create LLVM
executables and libraries to be hosted on a platform different from the platform
where they are build (a Canadian Cross build). To configure a cross-compile,
where they are built (a Canadian Cross build). To configure a cross-compile,
supply the configure script with ``--build`` and ``--host`` options that are
different. The values of these options must be legal target triples that your
GCC compiler supports.
@ -866,13 +961,13 @@ This is accomplished in the typical autoconf manner:
* Change directory to where the LLVM object files should live:
.. code-block:: bash
.. code-block:: console
% cd OBJ_ROOT
* Run the ``configure`` script found in the LLVM source directory:
.. code-block:: bash
.. code-block:: console
% SRC_ROOT/configure
@ -913,12 +1008,12 @@ Optional Configuration Items
----------------------------
If you're running on a Linux system that supports the `binfmt_misc
<http://www.tat.physik.uni-tuebingen.de/~rguenth/linux/binfmt_misc.html>`_
<http://en.wikipedia.org/wiki/binfmt_misc>`_
module, and you have root access on the system, you can set your system up to
execute LLVM bitcode files directly. To do this, use commands like this (the
first command may not be required if you are already using the module):
.. code-block:: bash
.. code-block:: console
% mount -t binfmt_misc none /proc/sys/fs/binfmt_misc
% echo ':llvm:M::BC::/path/to/lli:' > /proc/sys/fs/binfmt_misc/register
@ -928,7 +1023,7 @@ first command may not be required if you are already using the module):
This allows you to execute LLVM bitcode files directly. On Debian, you can also
use this command instead of the 'echo' command above:
.. code-block:: bash
.. code-block:: console
% sudo update-binfmts --install llvm /path/to/lli --magic 'BC'
@ -1073,8 +1168,8 @@ module that must be checked out (usually to ``projects/test-suite``). This
module contains a comprehensive correctness, performance, and benchmarking test
suite for LLVM. It is a separate Subversion module because not every LLVM user
is interested in downloading or building such a comprehensive test suite. For
further details on this test suite, please see the `Testing
Guide <TestingGuide.html>`_ document.
further details on this test suite, please see the :doc:`Testing Guide
<TestingGuide>` document.
.. _tools:
@ -1219,7 +1314,7 @@ Example with clang
#. Next, compile the C file into a native executable:
.. code-block:: bash
.. code-block:: console
% clang hello.c -o hello
@ -1230,7 +1325,7 @@ Example with clang
#. Next, compile the C file into a LLVM bitcode file:
.. code-block:: bash
.. code-block:: console
% clang -O3 -emit-llvm hello.c -c -o hello.bc
@ -1240,42 +1335,42 @@ Example with clang
#. Run the program in both forms. To run the program, use:
.. code-block:: bash
.. code-block:: console
% ./hello
and
.. code-block:: bash
.. code-block:: console
% lli hello.bc
The second examples shows how to invoke the LLVM JIT, `lli
<CommandGuide/html/lli.html>`_.
The second examples shows how to invoke the LLVM JIT, :doc:`lli
<CommandGuide/lli>`.
#. Use the ``llvm-dis`` utility to take a look at the LLVM assembly code:
.. code-block:: bash
.. code-block:: console
% llvm-dis < hello.bc | less
#. Compile the program to native assembly using the LLC code generator:
.. code-block:: bash
.. code-block:: console
% llc hello.bc -o hello.s
#. Assemble the native assembly language file into a program:
.. code-block:: bash
.. code-block:: console
**Solaris:** % /opt/SUNWspro/bin/cc -xarch=v9 hello.s -o hello.native
% /opt/SUNWspro/bin/cc -xarch=v9 hello.s -o hello.native # On Solaris
**Others:** % gcc hello.s -o hello.native
% gcc hello.s -o hello.native # On others
#. Execute the native code program:
.. code-block:: bash
.. code-block:: console
% ./hello.native

View File

@ -1,5 +1,3 @@
.. _winvs:
==================================================================
Getting Started with the LLVM System using Microsoft Visual Studio
==================================================================

View File

@ -1,11 +1,7 @@
.. _gold-plugin:
====================
The LLVM gold plugin
====================
.. sectionauthor:: Nick Lewycky
Introduction
============

View File

@ -1,11 +1,7 @@
.. _how_to_add_a_builder:
===================================================================
How To Add Your Build Configuration To LLVM Buildbot Infrastructure
===================================================================
.. sectionauthor:: Galina Kistanova <gkistanova@gmail.com>
Introduction
============

View File

@ -1,11 +1,7 @@
.. _how_to_build_on_arm:
===================================================================
How To Build On ARM
===================================================================
.. sectionauthor:: Wei-Ren Chen (陳韋任) <chenwj@iis.sinica.edu.tw>
Introduction
============
@ -40,8 +36,8 @@ on the ARMv6 and ARMv7 architectures and may be inapplicable to older chips.
.. code-block:: bash
./configure --build=armv7l-unknown-linux-gnueabihf
--host=armv7l-unknown-linux-gnueabihf
--target=armv7l-unknown-linux-gnueabihf --with-cpu=cortex-a9
--with-float=hard --with-abi=aapcs-vfp --with-fpu=neon
--enable-targets=arm --disable-optimized --enable-assertions
./configure --build=armv7l-unknown-linux-gnueabihf \
--host=armv7l-unknown-linux-gnueabihf \
--target=armv7l-unknown-linux-gnueabihf --with-cpu=cortex-a9 \
--with-float=hard --with-abi=aapcs-vfp --with-fpu=neon \
--enable-targets=arm --enable-optimized --enable-assertions

View File

@ -1,581 +0,0 @@
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01//EN"
"http://www.w3.org/TR/html4/strict.dtd">
<html>
<head>
<meta http-equiv="Content-Type" content="text/html; charset=utf-8">
<title>How To Release LLVM To The Public</title>
<link rel="stylesheet" href="_static/llvm.css" type="text/css">
</head>
<body>
<h1>How To Release LLVM To The Public</h1>
<ol>
<li><a href="#introduction">Introduction</a></li>
<li><a href="#criteria">Qualification Criteria</a></li>
<li><a href="#introduction">Release Timeline</a></li>
<li><a href="#process">Release Process</a></li>
</ol>
<div class="doc_author">
<p>Written by <a href="mailto:tonic@nondot.org">Tanya Lattner</a>,
<a href="mailto:rspencer@x10sys.com">Reid Spencer</a>,
<a href="mailto:criswell@cs.uiuc.edu">John Criswell</a>, &amp;
<a href="mailto:wendling@apple.com">Bill Wendling</a>
</p>
</div>
<!-- *********************************************************************** -->
<h2><a name="introduction">Introduction</a></h2>
<!-- *********************************************************************** -->
<div>
<p>This document contains information about successfully releasing LLVM &mdash;
including subprojects: e.g., <tt>clang</tt> and <tt>dragonegg</tt> &mdash; to
the public. It is the Release Manager's responsibility to ensure that a high
quality build of LLVM is released.</p>
</div>
<!-- *********************************************************************** -->
<h2><a name="process">Release Timeline</a></h2>
<!-- *********************************************************************** -->
<div>
<p>LLVM is released on a time based schedule &mdash; roughly every 6 months. We
do not normally have dot releases because of the nature of LLVM's incremental
development philosophy. That said, the only thing preventing dot releases for
critical bug fixes from happening is a lack of resources &mdash; testers,
machines, time, etc. And, because of the high quality we desire for LLVM
releases, we cannot allow for a truncated form of release qualification.</p>
<p>The release process is roughly as follows:</p>
<ul>
<li><p>Set code freeze and branch creation date for 6 months after last code
freeze date. Announce release schedule to the LLVM community and update
the website.</p></li>
<li><p>Create release branch and begin release process.</p></li>
<li><p>Send out release candidate sources for first round of testing. Testing
lasts 7-10 days. During the first round of testing, any regressions found
should be fixed. Patches are merged from mainline into the release
branch. Also, all features need to be completed during this time. Any
features not completed at the end of the first round of testing will be
removed or disabled for the release.</p></li>
<li><p>Generate and send out the second release candidate sources. Only
<em>critial</em> bugs found during this testing phase will be fixed. Any
bugs introduced by merged patches will be fixed. If so a third round of
testing is needed.</p></li>
<li><p>The release notes are updated.</p></li>
<li><p>Finally, release!</p></li>
</ul>
</div>
<!-- *********************************************************************** -->
<h2><a name="process">Release Process</a></h2>
<!-- *********************************************************************** -->
<div>
<ol>
<li><a href="#release-admin">Release Administrative Tasks</a>
<ol>
<li><a href="#branch">Create Release Branch</a></li>
<li><a href="#verchanges">Update Version Numbers</a></li>
</ol>
</li>
<li><a href="#release-build">Building the Release</a>
<ol>
<li><a href="#dist">Build the LLVM Source Distributions</a></li>
<li><a href="#build">Build LLVM</a></li>
<li><a href="#clangbin">Build the Clang Binary Distribution</a></li>
<li><a href="#target-build">Target Specific Build Details</a></li>
</ol>
</li>
<li><a href="#release-qualify">Release Qualification Criteria</a>
<ol>
<li><a href="#llvm-qualify">Qualify LLVM</a></li>
<li><a href="#clang-qualify">Qualify Clang</a></li>
<li><a href="#targets">Specific Target Qualification Details</a></li>
</ol>
</li>
<li><a href="#commTest">Community Testing</a></li>
<li><a href="#release-patch">Release Patch Rules</a></li>
<li><a href="#release-final">Release final tasks</a>
<ol>
<li><a href="#updocs">Update Documentation</a></li>
<li><a href="#tag">Tag the LLVM Final Release</a></li>
<li><a href="#updemo">Update the LLVM Demo Page</a></li>
<li><a href="#webupdates">Update the LLVM Website</a></li>
<li><a href="#announce">Announce the Release</a></li>
</ol>
</li>
</ol>
<!-- ======================================================================= -->
<h3><a name="release-admin">Release Administrative Tasks</a></h3>
<div>
<p>This section describes a few administrative tasks that need to be done for
the release process to begin. Specifically, it involves:</p>
<ul>
<li>Creating the release branch,</li>
<li>Setting version numbers, and</li>
<li>Tagging release candidates for the release team to begin testing</li>
</ul>
<!-- ======================================================================= -->
<h4><a name="branch">Create Release Branch</a></h4>
<div>
<p>Branch the Subversion trunk using the following procedure:</p>
<ol>
<li><p>Remind developers that the release branching is imminent and to refrain
from committing patches that might break the build. E.g., new features,
large patches for works in progress, an overhaul of the type system, an
exciting new TableGen feature, etc.</p></li>
<li><p>Verify that the current Subversion trunk is in decent shape by
examining nightly tester and buildbot results.</p></li>
<li><p>Create the release branch for <tt>llvm</tt>, <tt>clang</tt>,
the <tt>test-suite</tt>, and <tt>dragonegg</tt> from the last known good
revision. The branch's name is <tt>release_<i>XY</i></tt>,
where <tt>X</tt> is the major and <tt>Y</tt> the minor release
numbers. The branches should be created using the following commands:</p>
<div class="doc_code">
<pre>
$ svn copy https://llvm.org/svn/llvm-project/llvm/trunk \
https://llvm.org/svn/llvm-project/llvm/branches/release_<i>XY</i>
$ svn copy https://llvm.org/svn/llvm-project/cfe/trunk \
https://llvm.org/svn/llvm-project/cfe/branches/release_<i>XY</i>
$ svn copy https://llvm.org/svn/llvm-project/dragonegg/trunk \
https://llvm.org/svn/llvm-project/dragonegg/branches/release_<i>XY</i>
$ svn copy https://llvm.org/svn/llvm-project/test-suite/trunk \
https://llvm.org/svn/llvm-project/test-suite/branches/release_<i>XY</i>
</pre>
</div></li>
<li><p>Advise developers that they may now check their patches into the
Subversion tree again.</p></li>
<li><p>The Release Manager should switch to the release branch, because all
changes to the release will now be done in the branch. The easiest way to
do this is to grab a working copy using the following commands:</p>
<div class="doc_code">
<pre>
$ svn co https://llvm.org/svn/llvm-project/llvm/branches/release_<i>XY</i> llvm-<i>X.Y</i>
$ svn co https://llvm.org/svn/llvm-project/cfe/branches/release_<i>XY</i> clang-<i>X.Y</i>
$ svn co https://llvm.org/svn/llvm-project/dragonegg/branches/release_<i>XY</i> dragonegg-<i>X.Y</i>
$ svn co https://llvm.org/svn/llvm-project/test-suite/branches/release_<i>XY</i> test-suite-<i>X.Y</i>
</pre>
</div></li>
</ol>
</div>
<!-- ======================================================================= -->
<h4><a name="verchanges">Update LLVM Version</a></h4>
<div>
<p>After creating the LLVM release branch, update the release branches'
<tt>autoconf</tt> and <tt>configure.ac</tt> versions from '<tt>X.Ysvn</tt>'
to '<tt>X.Y</tt>'. Update it on mainline as well to be the next version
('<tt>X.Y+1svn</tt>'). Regenerate the configure scripts for both
<tt>llvm</tt> and the <tt>test-suite</tt>.</p>
<p>In addition, the version numbers of all the Bugzilla components must be
updated for the next release.</p>
</div>
<!-- ======================================================================= -->
<h4><a name="dist">Build the LLVM Release Candidates</a></h4>
<div>
<p>Create release candidates for <tt>llvm</tt>, <tt>clang</tt>,
<tt>dragonegg</tt>, and the LLVM <tt>test-suite</tt> by tagging the branch
with the respective release candidate number. For instance, to
create <b>Release Candidate 1</b> you would issue the following commands:</p>
<div class="doc_code">
<pre>
$ svn mkdir https://llvm.org/svn/llvm-project/llvm/tags/RELEASE_<i>XY</i>
$ svn copy https://llvm.org/svn/llvm-project/llvm/branches/release_<i>XY</i> \
https://llvm.org/svn/llvm-project/llvm/tags/RELEASE_<i>XY</i>/rc1
$ svn mkdir https://llvm.org/svn/llvm-project/cfe/tags/RELEASE_<i>XY</i>
$ svn copy https://llvm.org/svn/llvm-project/cfe/branches/release_<i>XY</i> \
https://llvm.org/svn/llvm-project/cfe/tags/RELEASE_<i>XY</i>/rc1
$ svn mkdir https://llvm.org/svn/llvm-project/dragonegg/tags/RELEASE_<i>XY</i>
$ svn copy https://llvm.org/svn/llvm-project/dragonegg/branches/release_<i>XY</i> \
https://llvm.org/svn/llvm-project/dragonegg/tags/RELEASE_<i>XY</i>/rc1
$ svn mkdir https://llvm.org/svn/llvm-project/test-suite/tags/RELEASE_<i>XY</i>
$ svn copy https://llvm.org/svn/llvm-project/test-suite/branches/release_<i>XY</i> \
https://llvm.org/svn/llvm-project/test-suite/tags/RELEASE_<i>XY</i>/rc1
</pre>
</div>
<p>Similarly, <b>Release Candidate 2</b> would be named <tt>RC2</tt> and so
on. This keeps a permanent copy of the release candidate around for people to
export and build as they wish. The final released sources will be tagged in
the <tt>RELEASE_<i>XY</i></tt> directory as <tt>Final</tt>
(c.f. <a href="#tag">Tag the LLVM Final Release</a>).</p>
<p>The Release Manager may supply pre-packaged source tarballs for users. This
can be done with the following commands:</p>
<div class="doc_code">
<pre>
$ svn export https://llvm.org/svn/llvm-project/llvm/tags/RELEASE_<i>XY</i>/rc1 llvm-<i>X.Y</i>rc1
$ svn export https://llvm.org/svn/llvm-project/cfe/tags/RELEASE_<i>XY</i>/rc1 clang-<i>X.Y</i>rc1
$ svn export https://llvm.org/svn/llvm-project/dragonegg/tags/RELEASE_<i>XY</i>/rc1 dragonegg-<i>X.Y</i>rc1
$ svn export https://llvm.org/svn/llvm-project/test-suite/tags/RELEASE_<i>XY</i>/rc1 llvm-test-<i>X.Y</i>rc1
$ tar -cvf - llvm-<i>X.Y</i>rc1 | gzip &gt; llvm-<i>X.Y</i>rc1.src.tar.gz
$ tar -cvf - clang-<i>X.Y</i>rc1 | gzip &gt; clang-<i>X.Y</i>rc1.src.tar.gz
$ tar -cvf - dragonegg-<i>X.Y</i>rc1 | gzip &gt; dragonegg-<i>X.Y</i>rc1.src.tar.gz
$ tar -cvf - llvm-test-<i>X.Y</i>rc1 | gzip &gt; llvm-test-<i>X.Y</i>rc1.src.tar.gz
</pre>
</div>
</div>
</div>
<!-- ======================================================================= -->
<h3><a name="release-build">Building the Release</a></h3>
<div>
<p>The builds of <tt>llvm</tt>, <tt>clang</tt>, and <tt>dragonegg</tt>
<em>must</em> be free of errors and warnings in Debug, Release+Asserts, and
Release builds. If all builds are clean, then the release passes Build
Qualification.</p>
<p>The <tt>make</tt> options for building the different modes:</p>
<table>
<tr><th>Mode</th><th>Options</th></tr>
<tr align="left"><td>Debug</td><td><tt>ENABLE_OPTIMIZED=0</tt></td></tr>
<tr align="left"><td>Release+Asserts</td><td><tt>ENABLE_OPTIMIZED=1</tt></td></tr>
<tr align="left"><td>Release</td><td><tt>ENABLE_OPTIMIZED=1 DISABLE_ASSERTIONS=1</tt></td></tr>
</table>
<!-- ======================================================================= -->
<h4><a name="build">Build LLVM</a></h4>
<div>
<p>Build <tt>Debug</tt>, <tt>Release+Asserts</tt>, and <tt>Release</tt> versions
of <tt>llvm</tt> on all supported platforms. Directions to build
<tt>llvm</tt> are <a href="GettingStarted.html#quickstart">here</a>.</p>
</div>
<!-- ======================================================================= -->
<h4><a name="clangbin">Build Clang Binary Distribution</a></h4>
<div>
<p>Creating the <tt>clang</tt> binary distribution
(Debug/Release+Asserts/Release) requires performing the following steps for
each supported platform:</p>
<ol>
<li>Build clang according to the directions
<a href="http://clang.llvm.org/get_started.html">here</a>.</li>
<li>Build both a Debug and Release version of clang. The binary will be the
Release build.</lI>
<li>Package <tt>clang</tt> (details to follow).</li>
</ol>
</div>
<!-- ======================================================================= -->
<h4><a name="target-build">Target Specific Build Details</a></h4>
<div>
<p>The table below specifies which compilers are used for each Arch/OS
combination when qualifying the build of <tt>llvm</tt>, <tt>clang</tt>,
and <tt>dragonegg</tt>.</p>
<table>
<tr><th>Architecture</th> <th>OS</th> <th>compiler</th></tr>
<tr><td>x86-32</td> <td>Mac OS 10.5</td> <td>gcc 4.0.1</td></tr>
<tr><td>x86-32</td> <td>Linux</td> <td>gcc 4.2.X, gcc 4.3.X</td></tr>
<tr><td>x86-32</td> <td>FreeBSD</td> <td>gcc 4.2.X</td></tr>
<tr><td>x86-32</td> <td>mingw</td> <td>gcc 3.4.5</td></tr>
<tr><td>x86-64</td> <td>Mac OS 10.5</td> <td>gcc 4.0.1</td></tr>
<tr><td>x86-64</td> <td>Linux</td> <td>gcc 4.2.X, gcc 4.3.X</td></tr>
<tr><td>x86-64</td> <td>FreeBSD</td> <td>gcc 4.2.X</td></tr>
</table>
</div>
</div>
<!-- ======================================================================= -->
<h3><a name="release-qualify">Building the Release</a></h3>
<div>
<p>A release is qualified when it has no regressions from the previous release
(or baseline). Regressions are related to correctness first and performance
second. (We may tolerate some minor performance regressions if they are
deemed necessary for the general quality of the compiler.)</p>
<p><b>Regressions are new failures in the set of tests that are used to qualify
each product and only include things on the list. Every release will have
some bugs in it. It is the reality of developing a complex piece of
software. We need a very concrete and definitive release criteria that
ensures we have monotonically improving quality on some metric. The metric we
use is described below. This doesn't mean that we don't care about other
criteria, but these are the criteria which we found to be most important and
which must be satisfied before a release can go out</b></p>
<!-- ======================================================================= -->
<h4><a name="llvm-qualify">Qualify LLVM</a></h4>
<div>
<p>LLVM is qualified when it has a clean test run without a front-end. And it
has no regressions when using either <tt>clang</tt> or <tt>dragonegg</tt>
with the <tt>test-suite</tt> from the previous release.</p>
</div>
<!-- ======================================================================= -->
<h4><a name="clang-qualify">Qualify Clang</a></h4>
<div>
<p><tt>Clang</tt> is qualified when front-end specific tests in the
<tt>llvm</tt> dejagnu test suite all pass, clang's own test suite passes
cleanly, and there are no regressions in the <tt>test-suite</tt>.</p>
</div>
<!-- ======================================================================= -->
<h4><a name="targets">Specific Target Qualification Details</a></h4>
<div>
<table>
<tr><th>Architecture</th> <th>OS</th> <th>clang baseline</th> <th>tests</th></tr>
<tr><td>x86-32</td> <td>Linux</td> <td>last release</td> <td>llvm dejagnu, clang tests, test-suite (including spec)</td></tr>
<tr><td>x86-32</td> <td>FreeBSD</td> <td>last release</td> <td>llvm dejagnu, clang tests, test-suite</td></tr>
<tr><td>x86-32</td> <td>mingw</td> <td>none</td> <td>QT</td></tr>
<tr><td>x86-64</td> <td>Mac OS 10.X</td> <td>last release</td> <td>llvm dejagnu, clang tests, test-suite (including spec)</td></tr>
<tr><td>x86-64</td> <td>Linux</td> <td>last release</td> <td>llvm dejagnu, clang tests, test-suite (including spec)</td></tr>
<tr><td>x86-64</td> <td>FreeBSD</td> <td>last release</td> <td>llvm dejagnu, clang tests, test-suite</td></tr>
</table>
</div>
</div>
<!-- ======================================================================= -->
<h3><a name="commTest">Community Testing</a></h3>
<div>
<p>Once all testing has been completed and appropriate bugs filed, the release
candidate tarballs are put on the website and the LLVM community is
notified. Ask that all LLVM developers test the release in 2 ways:</p>
<ol>
<li>Download <tt>llvm-<i>X.Y</i></tt>, <tt>llvm-test-<i>X.Y</i></tt>, and the
appropriate <tt>clang</tt> binary. Build LLVM. Run <tt>make check</tt> and
the full LLVM test suite (<tt>make TEST=nightly report</tt>).</li>
<li>Download <tt>llvm-<i>X.Y</i></tt>, <tt>llvm-test-<i>X.Y</i></tt>, and the
<tt>clang</tt> sources. Compile everything. Run <tt>make check</tt> and
the full LLVM test suite (<tt>make TEST=nightly report</tt>).</li>
</ol>
<p>Ask LLVM developers to submit the test suite report and <tt>make check</tt>
results to the list. Verify that there are no regressions from the previous
release. The results are not used to qualify a release, but to spot other
potential problems. For unsupported targets, verify that <tt>make check</tt>
is at least clean.</p>
<p>During the first round of testing, all regressions must be fixed before the
second release candidate is tagged.</p>
<p>If this is the second round of testing, the testing is only to ensure that
bug fixes previously merged in have not created new major problems. <i>This
is not the time to solve additional and unrelated bugs!</i> If no patches are
merged in, the release is determined to be ready and the release manager may
move onto the next stage.</p>
</div>
<!-- ======================================================================= -->
<h3><a name="release-patch">Release Patch Rules</a></h3>
<div>
<p>Below are the rules regarding patching the release branch:</p>
<ol>
<li><p>Patches applied to the release branch may only be applied by the
release manager.</p></li>
<li><p>During the first round of testing, patches that fix regressions or that
are small and relatively risk free (verified by the appropriate code
owner) are applied to the branch. Code owners are asked to be very
conservative in approving patches for the branch. We reserve the right to
reject any patch that does not fix a regression as previously
defined.</p></li>
<li><p>During the remaining rounds of testing, only patches that fix critical
regressions may be applied.</p></li>
</ol>
</div>
<!-- ======================================================================= -->
<h3><a name="release-final">Release Final Tasks</a></h3>
<div>
<p>The final stages of the release process involves tagging the "final" release
branch, updating documentation that refers to the release, and updating the
demo page.</p>
<!-- ======================================================================= -->
<h4><a name="updocs">Update Documentation</a></h4>
<div>
<p>Review the documentation and ensure that it is up to date. The "Release
Notes" must be updated to reflect new features, bug fixes, new known issues,
and changes in the list of supported platforms. The "Getting Started Guide"
should be updated to reflect the new release version number tag available from
Subversion and changes in basic system requirements. Merge both changes from
mainline into the release branch.</p>
</div>
<!-- ======================================================================= -->
<h4><a name="tag">Tag the LLVM Final Release</a></h4>
<div>
<p>Tag the final release sources using the following procedure:</p>
<div class="doc_code">
<pre>
$ svn copy https://llvm.org/svn/llvm-project/llvm/branches/release_XY \
https://llvm.org/svn/llvm-project/llvm/tags/RELEASE_<i>XY</i>/Final
$ svn copy https://llvm.org/svn/llvm-project/cfe/branches/release_XY \
https://llvm.org/svn/llvm-project/cfe/tags/RELEASE_<i>XY</i>/Final
$ svn copy https://llvm.org/svn/llvm-project/dragonegg/branches/release_XY \
https://llvm.org/svn/llvm-project/dragonegg/tags/RELEASE_<i>XY</i>/Final
$ svn copy https://llvm.org/svn/llvm-project/test-suite/branches/release_XY \
https://llvm.org/svn/llvm-project/test-suite/tags/RELEASE_<i>XY</i>/Final
</pre>
</div>
</div>
</div>
<!-- ======================================================================= -->
<h3><a name="updemo">Update the LLVM Demo Page</a></h3>
<div>
<p>The LLVM demo page must be updated to use the new release. This consists of
using the new <tt>clang</tt> binary and building LLVM.</p>
<!-- ======================================================================= -->
<h4><a name="webupdates">Update the LLVM Website</a></h4>
<div>
<p>The website must be updated before the release announcement is sent out. Here
is what to do:</p>
<ol>
<li>Check out the <tt>www</tt> module from Subversion.</li>
<li>Create a new subdirectory <tt>X.Y</tt> in the releases directory.</li>
<li>Commit the <tt>llvm</tt>, <tt>test-suite</tt>, <tt>clang</tt> source,
<tt>clang binaries</tt>, <tt>dragonegg</tt> source, and <tt>dragonegg</tt>
binaries in this new directory.</li>
<li>Copy and commit the <tt>llvm/docs</tt> and <tt>LICENSE.txt</tt> files
into this new directory. The docs should be built with
<tt>BUILD_FOR_WEBSITE=1</tt>.</li>
<li>Commit the <tt>index.html</tt> to the <tt>release/X.Y</tt> directory to
redirect (use from previous release.</li>
<li>Update the <tt>releases/download.html</tt> file with the new release.</li>
<li>Update the <tt>releases/index.html</tt> with the new release and link to
release documentation.</li>
<li>Finally, update the main page (<tt>index.html</tt> and sidebar) to point
to the new release and release announcement. Make sure this all gets
committed back into Subversion.</li>
</ol>
</div>
<!-- ======================================================================= -->
<h4><a name="announce">Announce the Release</a></h4>
<div>
<p>Have Chris send out the release announcement when everything is finished.</p>
</div>
</div>
</div>
<!-- *********************************************************************** -->
<hr>
<address>
<a href="http://jigsaw.w3.org/css-validator/check/referer"><img
src="http://jigsaw.w3.org/css-validator/images/vcss-blue" alt="Valid CSS"></a>
<a href="http://validator.w3.org/check/referer"><img
src="http://www.w3.org/Icons/valid-html401-blue" alt="Valid HTML 4.01"></a>
<a href="http://llvm.org/">The LLVM Compiler Infrastructure</a>
<br>
Last modified: $Date: 2012-07-31 09:05:57 +0200 (Tue, 31 Jul 2012) $
</address>
</body>
</html>

422
docs/HowToReleaseLLVM.rst Normal file
View File

@ -0,0 +1,422 @@
=================================
How To Release LLVM To The Public
=================================
.. contents::
:local:
:depth: 1
Introduction
============
This document contains information about successfully releasing LLVM ---
including subprojects: e.g., ``clang`` and ``dragonegg`` --- to the public. It
is the Release Manager's responsibility to ensure that a high quality build of
LLVM is released.
.. _timeline:
Release Timeline
================
LLVM is released on a time based schedule --- roughly every 6 months. We do
not normally have dot releases because of the nature of LLVM's incremental
development philosophy. That said, the only thing preventing dot releases for
critical bug fixes from happening is a lack of resources --- testers,
machines, time, etc. And, because of the high quality we desire for LLVM
releases, we cannot allow for a truncated form of release qualification.
The release process is roughly as follows:
* Set code freeze and branch creation date for 6 months after last code freeze
date. Announce release schedule to the LLVM community and update the website.
* Create release branch and begin release process.
* Send out release candidate sources for first round of testing. Testing lasts
7-10 days. During the first round of testing, any regressions found should be
fixed. Patches are merged from mainline into the release branch. Also, all
features need to be completed during this time. Any features not completed at
the end of the first round of testing will be removed or disabled for the
release.
* Generate and send out the second release candidate sources. Only *critial*
bugs found during this testing phase will be fixed. Any bugs introduced by
merged patches will be fixed. If so a third round of testing is needed.
* The release notes are updated.
* Finally, release!
Release Process
===============
.. contents::
:local:
Release Administrative Tasks
----------------------------
This section describes a few administrative tasks that need to be done for the
release process to begin. Specifically, it involves:
* Creating the release branch,
* Setting version numbers, and
* Tagging release candidates for the release team to begin testing.
Create Release Branch
^^^^^^^^^^^^^^^^^^^^^
Branch the Subversion trunk using the following procedure:
#. Remind developers that the release branching is imminent and to refrain from
committing patches that might break the build. E.g., new features, large
patches for works in progress, an overhaul of the type system, an exciting
new TableGen feature, etc.
#. Verify that the current Subversion trunk is in decent shape by
examining nightly tester and buildbot results.
#. Create the release branch for ``llvm``, ``clang``, the ``test-suite``, and
``dragonegg`` from the last known good revision. The branch's name is
``release_XY``, where ``X`` is the major and ``Y`` the minor release
numbers. The branches should be created using the following commands:
::
$ svn copy https://llvm.org/svn/llvm-project/llvm/trunk \
https://llvm.org/svn/llvm-project/llvm/branches/release_XY
$ svn copy https://llvm.org/svn/llvm-project/cfe/trunk \
https://llvm.org/svn/llvm-project/cfe/branches/release_XY
$ svn copy https://llvm.org/svn/llvm-project/dragonegg/trunk \
https://llvm.org/svn/llvm-project/dragonegg/branches/release_XY
$ svn copy https://llvm.org/svn/llvm-project/test-suite/trunk \
https://llvm.org/svn/llvm-project/test-suite/branches/release_XY
#. Advise developers that they may now check their patches into the Subversion
tree again.
#. The Release Manager should switch to the release branch, because all changes
to the release will now be done in the branch. The easiest way to do this is
to grab a working copy using the following commands:
::
$ svn co https://llvm.org/svn/llvm-project/llvm/branches/release_XY llvm-X.Y
$ svn co https://llvm.org/svn/llvm-project/cfe/branches/release_XY clang-X.Y
$ svn co https://llvm.org/svn/llvm-project/dragonegg/branches/release_XY dragonegg-X.Y
$ svn co https://llvm.org/svn/llvm-project/test-suite/branches/release_XY test-suite-X.Y
Update LLVM Version
^^^^^^^^^^^^^^^^^^^
After creating the LLVM release branch, update the release branches'
``autoconf`` and ``configure.ac`` versions from '``X.Ysvn``' to '``X.Y``'.
Update it on mainline as well to be the next version ('``X.Y+1svn``').
Regenerate the configure scripts for both ``llvm`` and the ``test-suite``.
In addition, the version numbers of all the Bugzilla components must be updated
for the next release.
Build the LLVM Release Candidates
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Create release candidates for ``llvm``, ``clang``, ``dragonegg``, and the LLVM
``test-suite`` by tagging the branch with the respective release candidate
number. For instance, to create **Release Candidate 1** you would issue the
following commands:
::
$ svn mkdir https://llvm.org/svn/llvm-project/llvm/tags/RELEASE_XY
$ svn copy https://llvm.org/svn/llvm-project/llvm/branches/release_XY \
https://llvm.org/svn/llvm-project/llvm/tags/RELEASE_XY/rc1
$ svn mkdir https://llvm.org/svn/llvm-project/cfe/tags/RELEASE_XY
$ svn copy https://llvm.org/svn/llvm-project/cfe/branches/release_XY \
https://llvm.org/svn/llvm-project/cfe/tags/RELEASE_XY/rc1
$ svn mkdir https://llvm.org/svn/llvm-project/dragonegg/tags/RELEASE_XY
$ svn copy https://llvm.org/svn/llvm-project/dragonegg/branches/release_XY \
https://llvm.org/svn/llvm-project/dragonegg/tags/RELEASE_XY/rc1
$ svn mkdir https://llvm.org/svn/llvm-project/test-suite/tags/RELEASE_XY
$ svn copy https://llvm.org/svn/llvm-project/test-suite/branches/release_XY \
https://llvm.org/svn/llvm-project/test-suite/tags/RELEASE_XY/rc1
Similarly, **Release Candidate 2** would be named ``RC2`` and so on. This keeps
a permanent copy of the release candidate around for people to export and build
as they wish. The final released sources will be tagged in the ``RELEASE_XY``
directory as ``Final`` (c.f. :ref:`tag`).
The Release Manager may supply pre-packaged source tarballs for users. This can
be done with the following commands:
::
$ svn export https://llvm.org/svn/llvm-project/llvm/tags/RELEASE_XY/rc1 llvm-X.Yrc1
$ svn export https://llvm.org/svn/llvm-project/cfe/tags/RELEASE_XY/rc1 clang-X.Yrc1
$ svn export https://llvm.org/svn/llvm-project/dragonegg/tags/RELEASE_XY/rc1 dragonegg-X.Yrc1
$ svn export https://llvm.org/svn/llvm-project/test-suite/tags/RELEASE_XY/rc1 llvm-test-X.Yrc1
$ tar -cvf - llvm-X.Yrc1 | gzip > llvm-X.Yrc1.src.tar.gz
$ tar -cvf - clang-X.Yrc1 | gzip > clang-X.Yrc1.src.tar.gz
$ tar -cvf - dragonegg-X.Yrc1 | gzip > dragonegg-X.Yrc1.src.tar.gz
$ tar -cvf - llvm-test-X.Yrc1 | gzip > llvm-test-X.Yrc1.src.tar.gz
Building the Release
--------------------
The builds of ``llvm``, ``clang``, and ``dragonegg`` *must* be free of
errors and warnings in Debug, Release+Asserts, and Release builds. If all
builds are clean, then the release passes Build Qualification.
The ``make`` options for building the different modes:
+-----------------+---------------------------------------------+
| Mode | Options |
+=================+=============================================+
| Debug | ``ENABLE_OPTIMIZED=0`` |
+-----------------+---------------------------------------------+
| Release+Asserts | ``ENABLE_OPTIMIZED=1`` |
+-----------------+---------------------------------------------+
| Release | ``ENABLE_OPTIMIZED=1 DISABLE_ASSERTIONS=1`` |
+-----------------+---------------------------------------------+
Build LLVM
^^^^^^^^^^
Build ``Debug``, ``Release+Asserts``, and ``Release`` versions
of ``llvm`` on all supported platforms. Directions to build ``llvm``
are :doc:`here <GettingStarted>`.
Build Clang Binary Distribution
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Creating the ``clang`` binary distribution (Debug/Release+Asserts/Release)
requires performing the following steps for each supported platform:
#. Build clang according to the directions `here
<http://clang.llvm.org/get_started.html>`__.
#. Build both a Debug and Release version of clang. The binary will be the
Release build.
#. Package ``clang`` (details to follow).
Target Specific Build Details
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
The table below specifies which compilers are used for each Arch/OS combination
when qualifying the build of ``llvm``, ``clang``, and ``dragonegg``.
+--------------+---------------+----------------------+
| Architecture | OS | compiler |
+==============+===============+======================+
| x86-32 | Mac OS 10.5 | gcc 4.0.1 |
+--------------+---------------+----------------------+
| x86-32 | Linux | gcc 4.2.X, gcc 4.3.X |
+--------------+---------------+----------------------+
| x86-32 | FreeBSD | gcc 4.2.X |
+--------------+---------------+----------------------+
| x86-32 | mingw | gcc 3.4.5 |
+--------------+---------------+----------------------+
| x86-64 | Mac OS 10.5 | gcc 4.0.1 |
+--------------+---------------+----------------------+
| x86-64 | Linux | gcc 4.2.X, gcc 4.3.X |
+--------------+---------------+----------------------+
| x86-64 | FreeBSD | gcc 4.2.X |
+--------------+---------------+----------------------+
Release Qualification Criteria
------------------------------
A release is qualified when it has no regressions from the previous release (or
baseline). Regressions are related to correctness first and performance second.
(We may tolerate some minor performance regressions if they are deemed
necessary for the general quality of the compiler.)
**Regressions are new failures in the set of tests that are used to qualify
each product and only include things on the list. Every release will have
some bugs in it. It is the reality of developing a complex piece of
software. We need a very concrete and definitive release criteria that
ensures we have monotonically improving quality on some metric. The metric we
use is described below. This doesn't mean that we don't care about other
criteria, but these are the criteria which we found to be most important and
which must be satisfied before a release can go out.**
Qualify LLVM
^^^^^^^^^^^^
LLVM is qualified when it has a clean test run without a front-end. And it has
no regressions when using either ``clang`` or ``dragonegg`` with the
``test-suite`` from the previous release.
Qualify Clang
^^^^^^^^^^^^^
``Clang`` is qualified when front-end specific tests in the ``llvm`` regression
test suite all pass, clang's own test suite passes cleanly, and there are no
regressions in the ``test-suite``.
Specific Target Qualification Details
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+--------------+-------------+----------------+-----------------------------+
| Architecture | OS | clang baseline | tests |
+==============+=============+================+=============================+
| x86-32 | Linux | last release | llvm regression tests, |
| | | | clang regression tests, |
| | | | test-suite (including spec) |
+--------------+-------------+----------------+-----------------------------+
| x86-32 | FreeBSD | last release | llvm regression tests, |
| | | | clang regression tests, |
| | | | test-suite |
+--------------+-------------+----------------+-----------------------------+
| x86-32 | mingw | none | QT |
+--------------+-------------+----------------+-----------------------------+
| x86-64 | Mac OS 10.X | last release | llvm regression tests, |
| | | | clang regression tests, |
| | | | test-suite (including spec) |
+--------------+-------------+----------------+-----------------------------+
| x86-64 | Linux | last release | llvm regression tests, |
| | | | clang regression tests, |
| | | | test-suite (including spec) |
+--------------+-------------+----------------+-----------------------------+
| x86-64 | FreeBSD | last release | llvm regression tests, |
| | | | clang regression tests, |
| | | | test-suite |
+--------------+-------------+----------------+-----------------------------+
Community Testing
-----------------
Once all testing has been completed and appropriate bugs filed, the release
candidate tarballs are put on the website and the LLVM community is notified.
Ask that all LLVM developers test the release in 2 ways:
#. Download ``llvm-X.Y``, ``llvm-test-X.Y``, and the appropriate ``clang``
binary. Build LLVM. Run ``make check`` and the full LLVM test suite (``make
TEST=nightly report``).
#. Download ``llvm-X.Y``, ``llvm-test-X.Y``, and the ``clang`` sources. Compile
everything. Run ``make check`` and the full LLVM test suite (``make
TEST=nightly report``).
Ask LLVM developers to submit the test suite report and ``make check`` results
to the list. Verify that there are no regressions from the previous release.
The results are not used to qualify a release, but to spot other potential
problems. For unsupported targets, verify that ``make check`` is at least
clean.
During the first round of testing, all regressions must be fixed before the
second release candidate is tagged.
If this is the second round of testing, the testing is only to ensure that bug
fixes previously merged in have not created new major problems. *This is not
the time to solve additional and unrelated bugs!* If no patches are merged in,
the release is determined to be ready and the release manager may move onto the
next stage.
Release Patch Rules
-------------------
Below are the rules regarding patching the release branch:
#. Patches applied to the release branch may only be applied by the release
manager.
#. During the first round of testing, patches that fix regressions or that are
small and relatively risk free (verified by the appropriate code owner) are
applied to the branch. Code owners are asked to be very conservative in
approving patches for the branch. We reserve the right to reject any patch
that does not fix a regression as previously defined.
#. During the remaining rounds of testing, only patches that fix critical
regressions may be applied.
Release Final Tasks
-------------------
The final stages of the release process involves tagging the "final" release
branch, updating documentation that refers to the release, and updating the
demo page.
Update Documentation
^^^^^^^^^^^^^^^^^^^^
Review the documentation and ensure that it is up to date. The "Release Notes"
must be updated to reflect new features, bug fixes, new known issues, and
changes in the list of supported platforms. The "Getting Started Guide" should
be updated to reflect the new release version number tag available from
Subversion and changes in basic system requirements. Merge both changes from
mainline into the release branch.
.. _tag:
Tag the LLVM Final Release
^^^^^^^^^^^^^^^^^^^^^^^^^^
Tag the final release sources using the following procedure:
::
$ svn copy https://llvm.org/svn/llvm-project/llvm/branches/release_XY \
https://llvm.org/svn/llvm-project/llvm/tags/RELEASE_XY/Final
$ svn copy https://llvm.org/svn/llvm-project/cfe/branches/release_XY \
https://llvm.org/svn/llvm-project/cfe/tags/RELEASE_XY/Final
$ svn copy https://llvm.org/svn/llvm-project/dragonegg/branches/release_XY \
https://llvm.org/svn/llvm-project/dragonegg/tags/RELEASE_XY/Final
$ svn copy https://llvm.org/svn/llvm-project/test-suite/branches/release_XY \
https://llvm.org/svn/llvm-project/test-suite/tags/RELEASE_XY/Final
Update the LLVM Demo Page
-------------------------
The LLVM demo page must be updated to use the new release. This consists of
using the new ``clang`` binary and building LLVM.
Update the LLVM Website
^^^^^^^^^^^^^^^^^^^^^^^
The website must be updated before the release announcement is sent out. Here
is what to do:
#. Check out the ``www`` module from Subversion.
#. Create a new subdirectory ``X.Y`` in the releases directory.
#. Commit the ``llvm``, ``test-suite``, ``clang`` source, ``clang binaries``,
``dragonegg`` source, and ``dragonegg`` binaries in this new directory.
#. Copy and commit the ``llvm/docs`` and ``LICENSE.txt`` files into this new
directory. The docs should be built with ``BUILD_FOR_WEBSITE=1``.
#. Commit the ``index.html`` to the ``release/X.Y`` directory to redirect (use
from previous release).
#. Update the ``releases/download.html`` file with the new release.
#. Update the ``releases/index.html`` with the new release and link to release
documentation.
#. Finally, update the main page (``index.html`` and sidebar) to point to the
new release and release announcement. Make sure this all gets committed back
into Subversion.
Announce the Release
^^^^^^^^^^^^^^^^^^^^
Have Chris send out the release announcement when everything is finished.

View File

@ -1,11 +1,7 @@
.. _how-to-set-up-llvm-style-rtti:
======================================================
How to set up LLVM-style RTTI for your class hierarchy
======================================================
.. sectionauthor:: Sean Silva <silvas@purdue.edu>
.. contents::
Background
@ -299,6 +295,78 @@ ordering right::
| OtherSpecialSquare
| Circle
A Bug to be Aware Of
--------------------
The example just given opens the door to bugs where the ``classof``\s are
not updated to match the ``Kind`` enum when adding (or removing) classes to
(from) the hierarchy.
Continuing the example above, suppose we add a ``SomewhatSpecialSquare`` as
a subclass of ``Square``, and update the ``ShapeKind`` enum like so:
.. code-block:: c++
enum ShapeKind {
SK_Square,
SK_SpecialSquare,
SK_OtherSpecialSquare,
+ SK_SomewhatSpecialSquare,
SK_Circle
}
Now, suppose that we forget to update ``Square::classof()``, so it still
looks like:
.. code-block:: c++
static bool classof(const Shape *S) {
// BUG: Returns false when S->getKind() == SK_SomewhatSpecialSquare,
// even though SomewhatSpecialSquare "is a" Square.
return S->getKind() >= SK_Square &&
S->getKind() <= SK_OtherSpecialSquare;
}
As the comment indicates, this code contains a bug. A straightforward and
non-clever way to avoid this is to introduce an explicit ``SK_LastSquare``
entry in the enum when adding the first subclass(es). For example, we could
rewrite the example at the beginning of `Concrete Bases and Deeper
Hierarchies`_ as:
.. code-block:: c++
enum ShapeKind {
SK_Square,
+ SK_SpecialSquare,
+ SK_OtherSpecialSquare,
+ SK_LastSquare,
SK_Circle
}
...
// Square::classof()
- static bool classof(const Shape *S) {
- return S->getKind() == SK_Square;
- }
+ static bool classof(const Shape *S) {
+ return S->getKind() >= SK_Square &&
+ S->getKind() <= SK_LastSquare;
+ }
Then, adding new subclasses is easy:
.. code-block:: c++
enum ShapeKind {
SK_Square,
SK_SpecialSquare,
SK_OtherSpecialSquare,
+ SK_SomewhatSpecialSquare,
SK_LastSquare,
SK_Circle
}
Notice that ``Square::classof`` does not need to be changed.
.. _classof-contract:
The Contract of ``classof``

View File

@ -1,11 +1,7 @@
.. _how-to-submit-a-bug-report:
================================
How to submit an LLVM bug report
================================
.. sectionauthor:: Chris Lattner <sabre@nondot.org> and Misha Brukman <http://misha.brukman.net>
Introduction - Got bugs?
========================

View File

@ -0,0 +1,81 @@
=====================
How To Use Attributes
=====================
.. contents::
:local:
Introduction
============
Attributes in LLVM have changed in some fundamental ways. It was necessary to
do this to support expanding the attributes to encompass more than a handful of
attributes --- e.g. command line options. The old way of handling attributes
consisted of representing them as a bit mask of values. This bit mask was
stored in a "list" structure that was reference counted. The advantage of this
was that attributes could be manipulated with 'or's and 'and's. The
disadvantage of this was that there was limited room for expansion, and
virtually no support for attribute-value pairs other than alignment.
In the new scheme, an ``Attribute`` object represents a single attribute that's
uniqued. You use the ``Attribute::get`` methods to create a new ``Attribute``
object. An attribute can be a single "enum" value (the enum being the
``Attribute::AttrKind`` enum), a string representing a target-dependent
attribute, or an attribute-value pair. Some examples:
* Target-independent: ``noinline``, ``zext``
* Target-dependent: ``"no-sse"``, ``"thumb2"``
* Attribute-value pair: ``"cpu" = "cortex-a8"``, ``align = 4``
Note: for an attribute value pair, we expect a target-dependent attribute to
have a string for the value.
``Attribute``
=============
An ``Attribute`` object is designed to be passed around by value.
Because attributes are no longer represented as a bit mask, you will need to
convert any code which does treat them as a bit mask to use the new query
methods on the Attribute class.
``AttributeSet``
================
The ``AttributeSet`` class replaces the old ``AttributeList`` class. The
``AttributeSet`` stores a collection of Attribute objects for each kind of
object that may have an attribute associated with it: the function as a
whole, the return type, or the function's parameters. A function's attributes
are at index ``AttributeSet::FunctionIndex``; the return type's attributes are
at index ``AttributeSet::ReturnIndex``; and the function's parameters'
attributes are at indices 1, ..., n (where 'n' is the number of parameters).
Most methods on the ``AttributeSet`` class take an index parameter.
An ``AttributeSet`` is also a uniqued and immutable object. You create an
``AttributeSet`` through the ``AttributeSet::get`` methods. You can add and
remove attributes, which result in the creation of a new ``AttributeSet``.
An ``AttributeSet`` object is designed to be passed around by value.
Note: It is advised that you do *not* use the ``AttributeSet`` "introspection"
methods (e.g. ``Raw``, ``getRawPointer``, etc.). These methods break
encapsulation, and may be removed in a future release (i.e. LLVM 4.0).
``AttrBuilder``
===============
Lastly, we have a "builder" class to help create the ``AttributeSet`` object
without having to create several different intermediate uniqued
``AttributeSet`` objects. The ``AttrBuilder`` class allows you to add and
remove attributes at will. The attributes won't be uniqued until you call the
appropriate ``AttributeSet::get`` method.
An ``AttrBuilder`` object is *not* designed to be passed around by value. It
should be passed by reference.
Note: It is advised that you do *not* use the ``AttrBuilder::addRawValue()``
method or the ``AttrBuilder(uint64_t Val)`` constructor. These are for
backwards compatibility and may be removed in a future release (i.e. LLVM 4.0).
And that's basically it! A lot of functionality is hidden behind these classes,
but the interfaces are pretty straight forward.

View File

@ -1,11 +1,7 @@
.. _how_to_use_instruction_mappings:
===============================
How To Use Instruction Mappings
===============================
.. sectionauthor:: Jyotsna Verma <jverma@codeaurora.org>
.. contents::
:local:
@ -120,7 +116,7 @@ to include relevant information in its definition. For example, consider
following to be the current definitions of ADD, ADD_pt (true) and ADD_pf (false)
instructions:
.. code-block::llvm
.. code-block:: llvm
def ADD : ALU32_rr<(outs IntRegs:$dst), (ins IntRegs:$a, IntRegs:$b),
"$dst = add($a, $b)",
@ -141,7 +137,7 @@ In this step, we modify these instructions to include the information
required by the relationship model, <tt>getPredOpcode</tt>, so that they can
be related.
.. code-block::llvm
.. code-block:: llvm
def ADD : PredRel, ALU32_rr<(outs IntRegs:$dst), (ins IntRegs:$a, IntRegs:$b),
"$dst = add($a, $b)",

View File

@ -1,368 +0,0 @@
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01//EN" "http://www.w3.org/TR/html4/strict.dtd">
<html>
<head>
<meta http-equiv="Content-Type" content="text/html; charset=utf-8">
<title>LLVMBuild Documentation</title>
<link rel="stylesheet" href="_static/llvm.css" type="text/css">
</head>
<body>
<h1>LLVMBuild Guide</h1>
<ol>
<li><a href="#introduction">Introduction</a></li>
<li><a href="#projectorg">Project Organization</a></li>
<li><a href="#buildintegration">Build Integration</a></li>
<li><a href="#componentoverview">Component Overview</a></li>
<li><a href="#formatreference">Format Reference</a></li>
</ol>
<!-- *********************************************************************** -->
<h2><a name="introduction">Introduction</a></h2>
<!-- *********************************************************************** -->
<div>
<p>This document describes the <tt>LLVMBuild</tt> organization and files which
we use to describe parts of the LLVM ecosystem. For description of specific
LLVMBuild related tools, please see the command guide.</p>
<p>LLVM is designed to be a modular set of libraries which can be flexibly
mixed together in order to build a variety of tools, like compilers, JITs,
custom code generators, optimization passes, interpreters, and so on. Related
projects in the LLVM system like Clang and LLDB also tend to follow this
philosophy.</p>
<p>In order to support this usage style, LLVM has a fairly strict structure as
to how the source code and various components are organized. The
<tt>LLVMBuild.txt</tt> files are the explicit specification of that structure,
and are used by the build systems and other tools in order to develop the LLVM
project.</p>
</div>
<!-- *********************************************************************** -->
<h2><a name="projectorg">Project Organization</a></h2>
<!-- *********************************************************************** -->
<!-- FIXME: We should probably have an explicit top level project object. Good
place to hang project level data, name, etc. Also useful for serving as the
$ROOT of project trees for things which can be checked out separately. -->
<div>
<p>The source code for LLVM projects using the LLVMBuild system (LLVM, Clang,
and LLDB) is organized into <em>components</em>, which define the separate
pieces of functionality that make up the project. These projects may consist
of many libraries, associated tools, build tools, or other utility tools (for
example, testing tools).</p>
<p>For the most part, the project contents are organized around defining one
main component per each subdirectory. Each such directory contains
an <tt>LLVMBuild.txt</tt> which contains the component definitions.</p>
<p>The component descriptions for the project as a whole are automatically
gathered by the LLVMBuild tools. The tools automatically traverse the source
directory structure to find all of the component description files. NOTE: For
performance/sanity reasons, we only traverse into subdirectories when the
parent itself contains an <tt>LLVMBuild.txt</tt> description file.</p>
</div>
<!-- *********************************************************************** -->
<h2><a name="buildintegration">Build Integration</a></h2>
<!-- *********************************************************************** -->
<div>
<p>The LLVMBuild files themselves are just a declarative way to describe the
project structure. The actual building of the LLVM project is handled by
another build system (currently we support
both <a href="MakefileGuide.html">Makefiles</a>
and <a href="CMake.html">CMake</a>.</p>
<p>The build system implementation will load the relevant contents of the
LLVMBuild files and use that to drive the actual project build. Typically, the
build system will only need to load this information at "configure" time, and
use it to generative native information. Build systems will also handle
automatically reconfiguring their information when the contents of
the <i>LLVMBuild.txt</i> files change.</p>
<p>Developers generally are not expected to need to be aware of the details of
how the LLVMBuild system is integrated into their build. Ideally, LLVM
developers who are not working on the build system would only ever need to
modify the contents of the <i>LLVMBuild.txt</i> description files (although we
have not reached this goal yet).</p>
<p>For more information on the utility tool we provide to help interfacing
with the build system, please see
the <a href="CommandGuide/html/llvm-build.html">llvm-build</a>
documentation.</p>
</div>
<!-- *********************************************************************** -->
<h2><a name="componentoverview">Component Overview</a></h2>
<!-- *********************************************************************** -->
<div>
<p>As mentioned earlier, LLVM projects are organized into
logical <em>components</em>. Every component is typically grouped into its
own subdirectory. Generally, a component is organized around a coherent group
of sources which have some kind of clear API separation from other parts of
the code.</p>
<p>LLVM primarily uses the following types of components:</p>
<ul>
<li><em>Libraries</em> - Library components define a distinct API which can
be independently linked into LLVM client applications. Libraries typically
have private and public header files, and may specify a link of required
libraries that they build on top of.</li>
<li><em>Build Tools</em> - Build tools are applications which are designed
to be run as part of the build process (typically to generate other source
files). Currently, LLVM uses one main build tool
called <a href="TableGenFundamentals.html">TableGen</a> to generate a
variety of source files.</li>
<li><em>Tools</em> - Command line applications which are built using the
LLVM component libraries. Most LLVM tools are small and are primarily
frontends to the library interfaces.</li>
<!-- FIXME: We also need shared libraries as a first class component, but this
is not yet implemented. -->
</ul>
<p>Components are described using <em>LLVMBuild.txt</em> files in the
directories that define the component. See
the <a href="#formatreference">Format Reference</a> section for information on
the exact format of these files.</p>
</div>
<!-- *********************************************************************** -->
<h2><a name="formatreference">LLVMBuild Format Reference</a></h2>
<!-- *********************************************************************** -->
<div>
<p>LLVMBuild files are written in a simple variant of the INI or configuration
file format (<a href="http://en.wikipedia.org/wiki/INI_file">Wikipedia
entry</a>). The format defines a list of sections each of which may contain
some number of properties. A simple example of the file format is below:</p>
<div class="doc_code">
<pre>
<i>; Comments start with a semi-colon.</i>
<i>; Sections are declared using square brackets.</i>
[component_0]
<i>; Properties are declared using '=' and are contained in the previous section.
;
; We support simple string and boolean scalar values and list values, where
; items are separated by spaces. There is no support for quoting, and so
; property values may not contain spaces.</i>
property_name = property_value
list_property_name = value_1 value_2 <em>...</em> value_n
boolean_property_name = 1 <em>(or 0)</em>
</pre>
</div>
<p>LLVMBuild files are expected to define a strict set of sections and
properties. An typical component description file for a library
component would look typically look like the following example:</p>
<div class="doc_code">
<pre>
[component_0]
type = Library
name = Linker
parent = Libraries
required_libraries = Archive BitReader Core Support TransformUtils
</pre>
</div>
<p>A full description of the exact sections and properties which are allowed
follows.</p>
<p>Each file may define exactly one common component, named "common". The
common component may define the following properties:</p>
<ul>
<li><i>subdirectories</i> <b>[optional]</b>
<p>If given, a list of the names of the subdirectories from the current
subpath to search for additional LLVMBuild files.</p></li>
</ul>
<p>Each file may define multiple components. Each component is described by a
section who name starts with "component". The remainder of the section name is
ignored, but each section name must be unique. Typically components are just
number in order for files with multiple components ("component_0",
"component_1", and so on).<p>
<p><b>Section names not matching this format (or the "common" section) are
currently unused and are disallowed.</b></p>
<p>Every component is defined by the properties in the section. The exact list
of properties that are allowed depends on the component
type. Components <b>may not</b> define any properties other than those
expected by the component type.</p>
<p>Every component must define the following properties:</p>
<ul>
<li><i>type</i> <b>[required]</b>
<p>The type of the component. Supported component types are
detailed below. Most components will define additional properties which
may be required or optional.</p></li>
<li><i>name</i> <b>[required]</b>
<p>The name of the component. Names are required to be unique
across the entire project.</p></li>
<li><i>parent</i> <b>[required]</b>
<p>The name of the logical parent of the component. Components are
organized into a logical tree to make it easier to navigate and organize
groups of components. The parents have no semantics as far as the project
build is concerned, however. Typically, the parent will be the main
component of the parent directory.</p>
<!-- FIXME: Should we make the parent optional, and default to parent
directories component? -->
<p>Components may reference the root pseudo component using '$ROOT' to
indicate they should logically be grouped at the top-level.</p>
</li>
</ul>
<p>Components may define the following properties:</p>
<ul>
<li><i>dependencies</i> <b>[optional]</b>
<p>If specified, a list of names of components which <i>must</i> be built
prior to this one. This should only be exactly those components which
produce some tool or source code required for building the
component.</p>
<p><em>NOTE:</em> Group and LibraryGroup components have no semantics for
the actual build, and are not allowed to specify dependencies.</p></li>
</ul>
<p>The following section lists the available component types, as well as the
properties which are associated with that component.</p>
<ul>
<li><i>type = Group</i>
<p>Group components exist purely to allow additional arbitrary structuring
of the logical components tree. For example, one might define a
"Libraries" group to hold all of the root library components.</p>
<p>Group components have no additionally properties.</p>
</li>
<li><i>type = Library</i>
<p>Library components define an individual library which should be built
from the source code in the component directory.</p>
<p>Components with this type use the following properties:</p>
<ul>
<li><i>library_name</i> <b>[optional]</b>
<p>If given, the name to use for the actual library file on disk. If
not given, the name is derived from the component name
itself.</p></li>
<li><i>required_libraries</i> <b>[optional]</b>
<p>If given, a list of the names of Library or LibraryGroup components
which must also be linked in whenever this library is used. That is,
the link time dependencies for this component. When tools are built,
the build system will include the transitive closure of
all <i>required_libraries</i> for the components the tool needs.</p></li>
<li><i>add_to_library_groups</i> <b>[optional]</b>
<p>If given, a list of the names of LibraryGroup components which this
component is also part of. This allows nesting groups of
components. For example, the <i>X86</i> target might define a library
group for all of the <i>X86</i> components. That library group might
then be included in the <i>all-targets</i> library group.</p></li>
<li><i>installed</i> <b>[optional]</b> <b>[boolean]</b>
<p>Whether this library is installed. Libraries that are not installed
are only reported by <tt>llvm-config</tt> when it is run as part of a
development directory.</p></li>
</ul>
</li>
<li><i>type = LibraryGroup</i>
<p>LibraryGroup components are a mechanism to allow easy definition of
useful sets of related components. In particular, we use them to easily
specify things like "all targets", or "all assembly printers".</p>
<p>Components with this type use the following properties:</p>
<ul>
<li><i>required_libraries</i> <b>[optional]</b>
<p>See the Library type for a description of this property.</p></li>
<li><i>add_to_library_groups</i> <b>[optional]</b>
<p>See the Library type for a description of this property.</p></li>
</ul>
</li>
<li><i>type = TargetGroup</i>
<p>TargetGroup components are an extension of LibraryGroups, specifically
for defining LLVM targets (which are handled specially in a few
places).</p>
<p>The name of the component should always be the name of the target.</p>
<p>Components with this type use the LibraryGroup properties in addition
to:</p>
<ul>
<li><i>has_asmparser</i> <b>[optional]</b> <b>[boolean]</b>
<p>Whether this target defines an assembly parser.</p></li>
<li><i>has_asmprinter</i> <b>[optional]</b> <b>[boolean]</b>
<p>Whether this target defines an assembly printer.</p></li>
<li><i>has_disassembler</i> <b>[optional]</b> <b>[boolean]</b>
<p>Whether this target defines a disassembler.</p></li>
<li><i>has_jit</i> <b>[optional]</b> <b>[boolean]</b>
<p>Whether this target supports JIT compilation.</p></li>
</ul>
</li>
<li><i>type = Tool</i>
<p>Tool components define standalone command line tools which should be
built from the source code in the component directory and linked.</p>
<p>Components with this type use the following properties:</p>
<ul>
<li><i>required_libraries</i> <b>[optional]</b>
<p>If given, a list of the names of Library or LibraryGroup components
which this tool is required to be linked with. <b>NOTE:</b> The values
should be the component names, which may not always match up with the
actual library names on disk.</p>
<p>Build systems are expected to properly include all of the libraries
required by the linked components (i.e., the transitive closer
of <em>required_libraries</em>).</p>
<p>Build systems are also expected to understand that those library
components must be built prior to linking -- they do not also need to
be listed under <i>dependencies</i>.</p></li>
</ul>
</li>
<li><i>type = BuildTool</i>
<p>BuildTool components are like Tool components, except that the tool is
supposed to be built for the platform where the build is running (instead
of that platform being targetted). Build systems are expected to handle
the fact that required libraries may need to be built for multiple
platforms in order to be able to link this tool.</p>
<p>BuildTool components currently use the exact same properties as Tool
components, the type distinction is only used to differentiate what the
tool is built for.</p>
</li>
</ul>
</div>
<!-- *********************************************************************** -->
<hr>
<address>
<a href="http://jigsaw.w3.org/css-validator/check/referer"><img
src="http://jigsaw.w3.org/css-validator/images/vcss-blue" alt="Valid CSS"></a>
<a href="http://validator.w3.org/check/referer"><img
src="http://www.w3.org/Icons/valid-html401-blue" alt="Valid HTML 4.01"></a>
<a href="http://llvm.org/">The LLVM Compiler Infrastructure</a><br>
Last modified: $Date$
</address>
</body>
</html>

325
docs/LLVMBuild.rst Normal file
View File

@ -0,0 +1,325 @@
===============
LLVMBuild Guide
===============
.. contents::
:local:
Introduction
============
This document describes the ``LLVMBuild`` organization and files which
we use to describe parts of the LLVM ecosystem. For description of
specific LLVMBuild related tools, please see the command guide.
LLVM is designed to be a modular set of libraries which can be flexibly
mixed together in order to build a variety of tools, like compilers,
JITs, custom code generators, optimization passes, interpreters, and so
on. Related projects in the LLVM system like Clang and LLDB also tend to
follow this philosophy.
In order to support this usage style, LLVM has a fairly strict structure
as to how the source code and various components are organized. The
``LLVMBuild.txt`` files are the explicit specification of that
structure, and are used by the build systems and other tools in order to
develop the LLVM project.
Project Organization
====================
The source code for LLVM projects using the LLVMBuild system (LLVM,
Clang, and LLDB) is organized into *components*, which define the
separate pieces of functionality that make up the project. These
projects may consist of many libraries, associated tools, build tools,
or other utility tools (for example, testing tools).
For the most part, the project contents are organized around defining
one main component per each subdirectory. Each such directory contains
an ``LLVMBuild.txt`` which contains the component definitions.
The component descriptions for the project as a whole are automatically
gathered by the LLVMBuild tools. The tools automatically traverse the
source directory structure to find all of the component description
files. NOTE: For performance/sanity reasons, we only traverse into
subdirectories when the parent itself contains an ``LLVMBuild.txt``
description file.
Build Integration
=================
The LLVMBuild files themselves are just a declarative way to describe
the project structure. The actual building of the LLVM project is
handled by another build system (currently we support both
:doc:`Makefiles <MakefileGuide>` and :doc:`CMake <CMake>`).
The build system implementation will load the relevant contents of the
LLVMBuild files and use that to drive the actual project build.
Typically, the build system will only need to load this information at
"configure" time, and use it to generative native information. Build
systems will also handle automatically reconfiguring their information
when the contents of the ``LLVMBuild.txt`` files change.
Developers generally are not expected to need to be aware of the details
of how the LLVMBuild system is integrated into their build. Ideally,
LLVM developers who are not working on the build system would only ever
need to modify the contents of the ``LLVMBuild.txt`` description files
(although we have not reached this goal yet).
For more information on the utility tool we provide to help interfacing
with the build system, please see the :doc:`llvm-build
<CommandGuide/llvm-build>` documentation.
Component Overview
==================
As mentioned earlier, LLVM projects are organized into logical
*components*. Every component is typically grouped into its own
subdirectory. Generally, a component is organized around a coherent
group of sources which have some kind of clear API separation from other
parts of the code.
LLVM primarily uses the following types of components:
- *Libraries* - Library components define a distinct API which can be
independently linked into LLVM client applications. Libraries typically
have private and public header files, and may specify a link of required
libraries that they build on top of.
- *Build Tools* - Build tools are applications which are designed to be run
as part of the build process (typically to generate other source files).
Currently, LLVM uses one main build tool called :doc:`TableGen
<TableGenFundamentals>` to generate a variety of source files.
- *Tools* - Command line applications which are built using the LLVM
component libraries. Most LLVM tools are small and are primarily
frontends to the library interfaces.
Components are described using ``LLVMBuild.txt`` files in the directories
that define the component. See the `LLVMBuild Format Reference`_ section
for information on the exact format of these files.
LLVMBuild Format Reference
==========================
LLVMBuild files are written in a simple variant of the INI or configuration
file format (`Wikipedia entry`_). The format defines a list of sections
each of which may contain some number of properties. A simple example of
the file format is below:
.. _Wikipedia entry: http://en.wikipedia.org/wiki/INI_file
.. code-block:: ini
; Comments start with a semi-colon.
; Sections are declared using square brackets.
[component_0]
; Properties are declared using '=' and are contained in the previous section.
;
; We support simple string and boolean scalar values and list values, where
; items are separated by spaces. There is no support for quoting, and so
; property values may not contain spaces.
property_name = property_value
list_property_name = value_1 value_2 ... value_n
boolean_property_name = 1 (or 0)
LLVMBuild files are expected to define a strict set of sections and
properties. An typical component description file for a library
component would look typically look like the following example:
.. code-block:: ini
[component_0]
type = Library
name = Linker
parent = Libraries
required_libraries = Archive BitReader Core Support TransformUtils
A full description of the exact sections and properties which are
allowed follows.
Each file may define exactly one common component, named ``common``. The
common component may define the following properties:
- ``subdirectories`` **[optional]**
If given, a list of the names of the subdirectories from the current
subpath to search for additional LLVMBuild files.
Each file may define multiple components. Each component is described by a
section who name starts with ``component``. The remainder of the section
name is ignored, but each section name must be unique. Typically components
are just number in order for files with multiple components
(``component_0``, ``component_1``, and so on).
.. warning::
Section names not matching this format (or the ``common`` section) are
currently unused and are disallowed.
Every component is defined by the properties in the section. The exact
list of properties that are allowed depends on the component type.
Components **may not** define any properties other than those expected
by the component type.
Every component must define the following properties:
- ``type`` **[required]**
The type of the component. Supported component types are detailed
below. Most components will define additional properties which may be
required or optional.
- ``name`` **[required]**
The name of the component. Names are required to be unique across the
entire project.
- ``parent`` **[required]**
The name of the logical parent of the component. Components are
organized into a logical tree to make it easier to navigate and
organize groups of components. The parents have no semantics as far
as the project build is concerned, however. Typically, the parent
will be the main component of the parent directory.
Components may reference the root pseudo component using ``$ROOT`` to
indicate they should logically be grouped at the top-level.
Components may define the following properties:
- ``dependencies`` **[optional]**
If specified, a list of names of components which *must* be built
prior to this one. This should only be exactly those components which
produce some tool or source code required for building the component.
.. note::
``Group`` and ``LibraryGroup`` components have no semantics for the
actual build, and are not allowed to specify dependencies.
The following section lists the available component types, as well as
the properties which are associated with that component.
- ``type = Group``
Group components exist purely to allow additional arbitrary structuring
of the logical components tree. For example, one might define a
``Libraries`` group to hold all of the root library components.
``Group`` components have no additionally properties.
- ``type = Library``
Library components define an individual library which should be built
from the source code in the component directory.
Components with this type use the following properties:
- ``library_name`` **[optional]**
If given, the name to use for the actual library file on disk. If
not given, the name is derived from the component name itself.
- ``required_libraries`` **[optional]**
If given, a list of the names of ``Library`` or ``LibraryGroup``
components which must also be linked in whenever this library is
used. That is, the link time dependencies for this component. When
tools are built, the build system will include the transitive closure
of all ``required_libraries`` for the components the tool needs.
- ``add_to_library_groups`` **[optional]**
If given, a list of the names of ``LibraryGroup`` components which
this component is also part of. This allows nesting groups of
components. For example, the ``X86`` target might define a library
group for all of the ``X86`` components. That library group might
then be included in the ``all-targets`` library group.
- ``installed`` **[optional]** **[boolean]**
Whether this library is installed. Libraries that are not installed
are only reported by ``llvm-config`` when it is run as part of a
development directory.
- ``type = LibraryGroup``
``LibraryGroup`` components are a mechanism to allow easy definition of
useful sets of related components. In particular, we use them to easily
specify things like "all targets", or "all assembly printers".
Components with this type use the following properties:
- ``required_libraries`` **[optional]**
See the ``Library`` type for a description of this property.
- ``add_to_library_groups`` **[optional]**
See the ``Library`` type for a description of this property.
- ``type = TargetGroup``
``TargetGroup`` components are an extension of ``LibraryGroup``\s,
specifically for defining LLVM targets (which are handled specially in a
few places).
The name of the component should always be the name of the target.
Components with this type use the ``LibraryGroup`` properties in
addition to:
- ``has_asmparser`` **[optional]** **[boolean]**
Whether this target defines an assembly parser.
- ``has_asmprinter`` **[optional]** **[boolean]**
Whether this target defines an assembly printer.
- ``has_disassembler`` **[optional]** **[boolean]**
Whether this target defines a disassembler.
- ``has_jit`` **[optional]** **[boolean]**
Whether this target supports JIT compilation.
- ``type = Tool``
``Tool`` components define standalone command line tools which should be
built from the source code in the component directory and linked.
Components with this type use the following properties:
- ``required_libraries`` **[optional]**
If given, a list of the names of ``Library`` or ``LibraryGroup``
components which this tool is required to be linked with.
.. note::
The values should be the component names, which may not always
match up with the actual library names on disk.
Build systems are expected to properly include all of the libraries
required by the linked components (i.e., the transitive closure of
``required_libraries``).
Build systems are also expected to understand that those library
components must be built prior to linking -- they do not also need
to be listed under ``dependencies``.
- ``type = BuildTool``
``BuildTool`` components are like ``Tool`` components, except that the
tool is supposed to be built for the platform where the build is running
(instead of that platform being targetted). Build systems are expected
to handle the fact that required libraries may need to be built for
multiple platforms in order to be able to link this tool.
``BuildTool`` components currently use the exact same properties as
``Tool`` components, the type distinction is only used to differentiate
what the tool is built for.

File diff suppressed because it is too large Load Diff

8605
docs/LangRef.rst Normal file

File diff suppressed because it is too large Load Diff

View File

@ -1,5 +1,3 @@
.. _lexicon:
================
The LLVM Lexicon
================
@ -17,11 +15,28 @@ A
**ADCE**
Aggressive Dead Code Elimination
**AST**
Abstract Syntax Tree.
Due to Clang's influence (mostly the fact that parsing and semantic
analysis are so intertwined for C and especially C++), the typical
working definition of AST in the LLVM community is roughly "the
compiler's first complete symbolic (as opposed to textual)
representation of an input program".
As such, an "AST" might be a more general graph instead of a "tree"
(consider the symbolic representation for the type of a typical "linked
list node"). This working definition is closer to what some authors
call an "annotated abstract syntax tree".
Consult your favorite compiler book or search engine for more details.
B
-
.. _lexicon-bb-vectorization:
**BB Vectorization**
Basic Block Vectorization
Basic-Block Vectorization
**BURS**
Bottom Up Rewriting System --- A method of instruction selection for code
@ -185,6 +200,10 @@ S
**SCCP**
Sparse Conditional Constant Propagation
**SLP**
Superword-Level Parallelism, same as :ref:`Basic-Block Vectorization
<lexicon-bb-vectorization>`.
**SRoA**
Scalar Replacement of Aggregates

View File

@ -1,5 +1,3 @@
.. _lto:
======================================================
LLVM Link Time Optimization: Design and Implementation
======================================================
@ -85,9 +83,10 @@ invokes system linker.
return foo1();
}
.. code-block:: bash
To compile, run:
.. code-block:: console
--- command lines ---
% clang -emit-llvm -c a.c -o a.o # <-- a.o is LLVM bitcode file
% clang -c main.c -o main.o # <-- main.o is native object file
% clang a.o main.o -o main # <-- standard link command without modifications
@ -96,7 +95,7 @@ invokes system linker.
visible symbol defined in LLVM bitcode file. The linker completes its usual
symbol resolution pass and finds that ``foo2()`` is not used
anywhere. This information is used by the LLVM optimizer and it
removes ``foo2()``.</li>
removes ``foo2()``.
* As soon as ``foo2()`` is removed, the optimizer recognizes that condition ``i
< 0`` is always false, which means ``foo3()`` is never used. Hence, the

View File

@ -46,10 +46,6 @@ clean:
html:
$(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html
@echo
@# FIXME: Remove this `cp` once HTML->Sphinx transition is completed.
@# Kind of a hack, but HTML-formatted docs are on the way out anyway.
@echo "Copying legacy HTML-formatted docs into $(BUILDDIR)/html"
@cp -a *.html tutorial $(BUILDDIR)/html
@echo "Build finished. The HTML pages are in $(BUILDDIR)/html."
dirhtml:

View File

@ -1,5 +1,3 @@
.. _makefile_guide:
===================
LLVM Makefile Guide
===================
@ -60,7 +58,7 @@ To use the makefile system, you simply create a file named ``Makefile`` in your
directory and declare values for certain variables. The variables and values
that you select determine what the makefile system will do. These variables
enable rules and processing in the makefile system that automatically Do The
Right Thing&trade;.
Right Thing (C).
Including Makefiles
-------------------
@ -170,9 +168,9 @@ openable with the ``dlopen`` function and searchable with the ``dlsym`` function
(or your operating system's equivalents). While this isn't strictly necessary on
Linux and a few other platforms, it is required on systems like HP-UX and
Darwin. You should use ``LOADABLE_MODULE`` for any shared library that you
intend to be loaded into an tool via the ``-load`` option. See the
`WritingAnLLVMPass.html <WritingAnLLVMPass.html#makefile>`_ document for an
example of why you might want to do this.
intend to be loaded into an tool via the ``-load`` option. `Pass documentation
<writing-an-llvm-pass-makefile>`_ has an example of why you might want to do
this.
Bitcode Modules
^^^^^^^^^^^^^^^
@ -241,7 +239,7 @@ and the names of the libraries you wish to link with the tool. For example:
says that we are to build a tool name ``mytool`` and that it requires three
libraries: ``mylib``, ``LLVMSupport.a`` and ``LLVMSystem.a``.
Note that two different variables are use to indicate which libraries are
Note that two different variables are used to indicate which libraries are
linked: ``USEDLIBS`` and ``LLVMLIBS``. This distinction is necessary to support
projects. ``LLVMLIBS`` refers to the LLVM libraries found in the LLVM object
directory. ``USEDLIBS`` refers to the libraries built by your project. In the
@ -339,7 +337,7 @@ the invocation of ``make check-local`` in the ``test`` directory. The intended
usage for this is to assist in running specific suites of tests. If
``TESTSUITE`` is not set, the implementation of ``check-local`` should run all
normal tests. It is up to the project to define what different values for
``TESTSUTE`` will do. See the `Testing Guide <TestingGuide.html>`_ for further
``TESTSUTE`` will do. See the :doc:`Testing Guide <TestingGuide>` for further
details.
``check-local``
@ -348,9 +346,9 @@ details.
This target should be implemented by the ``Makefile`` in the project's ``test``
directory. It is invoked by the ``check`` target elsewhere. Each project is
free to define the actions of ``check-local`` as appropriate for that
project. The LLVM project itself uses dejagnu to run a suite of feature and
regresson tests. Other projects may choose to use dejagnu or any other testing
mechanism.
project. The LLVM project itself uses the :doc:`Lit <CommandGuide/lit>` testing
tool to run a suite of feature and regression tests. Other projects may choose
to use :program:`lit` or any other testing mechanism.
``clean``
---------
@ -358,7 +356,7 @@ mechanism.
This target cleans the build directory, recursively removing all things that the
Makefile builds. The cleaning rules have been made guarded so they shouldn't go
awry (via ``rm -f $(UNSET_VARIABLE)/*`` which will attempt to erase the entire
directory structure.
directory structure).
``clean-local``
---------------
@ -606,8 +604,8 @@ system that tell it what to do for the current directory.
the build process, such as code generators (e.g. ``tblgen``).
``OPTIONAL_DIRS``
Specify a set of directories that may be built, if they exist, but its not
an error for them not to exist.
Specify a set of directories that may be built, if they exist, but it is
not an error for them not to exist.
``PARALLEL_DIRS``
Specify a set of directories to build recursively and in parallel if the
@ -701,6 +699,9 @@ The override variables are given below:
``CFLAGS``
Additional flags to be passed to the 'C' compiler.
``CPPFLAGS``
Additional flags passed to the C/C++ preprocessor.
``CXX``
Specifies the path to the C++ compiler.

View File

@ -1,5 +1,3 @@
.. _marked_up_disassembly:
=======================================
LLVM's Optional Rich Disassembly Output
=======================================

276
docs/NVPTXUsage.rst Normal file
View File

@ -0,0 +1,276 @@
=============================
User Guide for NVPTX Back-end
=============================
.. contents::
:local:
:depth: 3
Introduction
============
To support GPU programming, the NVPTX back-end supports a subset of LLVM IR
along with a defined set of conventions used to represent GPU programming
concepts. This document provides an overview of the general usage of the back-
end, including a description of the conventions used and the set of accepted
LLVM IR.
.. note::
This document assumes a basic familiarity with CUDA and the PTX
assembly language. Information about the CUDA Driver API and the PTX assembly
language can be found in the `CUDA documentation
<http://docs.nvidia.com/cuda/index.html>`_.
Conventions
===========
Marking Functions as Kernels
----------------------------
In PTX, there are two types of functions: *device functions*, which are only
callable by device code, and *kernel functions*, which are callable by host
code. By default, the back-end will emit device functions. Metadata is used to
declare a function as a kernel function. This metadata is attached to the
``nvvm.annotations`` named metadata object, and has the following format:
.. code-block:: llvm
!0 = metadata !{<function-ref>, metadata !"kernel", i32 1}
The first parameter is a reference to the kernel function. The following
example shows a kernel function calling a device function in LLVM IR. The
function ``@my_kernel`` is callable from host code, but ``@my_fmad`` is not.
.. code-block:: llvm
define float @my_fmad(float %x, float %y, float %z) {
%mul = fmul float %x, %y
%add = fadd float %mul, %z
ret float %add
}
define void @my_kernel(float* %ptr) {
%val = load float* %ptr
%ret = call float @my_fmad(float %val, float %val, float %val)
store float %ret, float* %ptr
ret void
}
!nvvm.annotations = !{!1}
!1 = metadata !{void (float*)* @my_kernel, metadata !"kernel", i32 1}
When compiled, the PTX kernel functions are callable by host-side code.
Address Spaces
--------------
The NVPTX back-end uses the following address space mapping:
============= ======================
Address Space Memory Space
============= ======================
0 Generic
1 Global
2 Internal Use
3 Shared
4 Constant
5 Local
============= ======================
Every global variable and pointer type is assigned to one of these address
spaces, with 0 being the default address space. Intrinsics are provided which
can be used to convert pointers between the generic and non-generic address
spaces.
As an example, the following IR will define an array ``@g`` that resides in
global device memory.
.. code-block:: llvm
@g = internal addrspace(1) global [4 x i32] [ i32 0, i32 1, i32 2, i32 3 ]
LLVM IR functions can read and write to this array, and host-side code can
copy data to it by name with the CUDA Driver API.
Note that since address space 0 is the generic space, it is illegal to have
global variables in address space 0. Address space 0 is the default address
space in LLVM, so the ``addrspace(N)`` annotation is *required* for global
variables.
NVPTX Intrinsics
================
Address Space Conversion
------------------------
'``llvm.nvvm.ptr.*.to.gen``' Intrinsics
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Syntax:
"""""""
These are overloaded intrinsics. You can use these on any pointer types.
.. code-block:: llvm
declare i8* @llvm.nvvm.ptr.global.to.gen.p0i8.p1i8(i8 addrspace(1)*)
declare i8* @llvm.nvvm.ptr.shared.to.gen.p0i8.p3i8(i8 addrspace(3)*)
declare i8* @llvm.nvvm.ptr.constant.to.gen.p0i8.p4i8(i8 addrspace(4)*)
declare i8* @llvm.nvvm.ptr.local.to.gen.p0i8.p5i8(i8 addrspace(5)*)
Overview:
"""""""""
The '``llvm.nvvm.ptr.*.to.gen``' intrinsics convert a pointer in a non-generic
address space to a generic address space pointer.
Semantics:
""""""""""
These intrinsics modify the pointer value to be a valid generic address space
pointer.
'``llvm.nvvm.ptr.gen.to.*``' Intrinsics
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Syntax:
"""""""
These are overloaded intrinsics. You can use these on any pointer types.
.. code-block:: llvm
declare i8* @llvm.nvvm.ptr.gen.to.global.p1i8.p0i8(i8 addrspace(1)*)
declare i8* @llvm.nvvm.ptr.gen.to.shared.p3i8.p0i8(i8 addrspace(3)*)
declare i8* @llvm.nvvm.ptr.gen.to.constant.p4i8.p0i8(i8 addrspace(4)*)
declare i8* @llvm.nvvm.ptr.gen.to.local.p5i8.p0i8(i8 addrspace(5)*)
Overview:
"""""""""
The '``llvm.nvvm.ptr.gen.to.*``' intrinsics convert a pointer in the generic
address space to a pointer in the target address space. Note that these
intrinsics are only useful if the address space of the target address space of
the pointer is known. It is not legal to use address space conversion
intrinsics to convert a pointer from one non-generic address space to another
non-generic address space.
Semantics:
""""""""""
These intrinsics modify the pointer value to be a valid pointer in the target
non-generic address space.
Reading PTX Special Registers
-----------------------------
'``llvm.nvvm.read.ptx.sreg.*``'
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Syntax:
"""""""
.. code-block:: llvm
declare i32 @llvm.nvvm.read.ptx.sreg.tid.x()
declare i32 @llvm.nvvm.read.ptx.sreg.tid.y()
declare i32 @llvm.nvvm.read.ptx.sreg.tid.z()
declare i32 @llvm.nvvm.read.ptx.sreg.ntid.x()
declare i32 @llvm.nvvm.read.ptx.sreg.ntid.y()
declare i32 @llvm.nvvm.read.ptx.sreg.ntid.z()
declare i32 @llvm.nvvm.read.ptx.sreg.ctaid.x()
declare i32 @llvm.nvvm.read.ptx.sreg.ctaid.y()
declare i32 @llvm.nvvm.read.ptx.sreg.ctaid.z()
declare i32 @llvm.nvvm.read.ptx.sreg.nctaid.x()
declare i32 @llvm.nvvm.read.ptx.sreg.nctaid.y()
declare i32 @llvm.nvvm.read.ptx.sreg.nctaid.z()
declare i32 @llvm.nvvm.read.ptx.sreg.warpsize()
Overview:
"""""""""
The '``@llvm.nvvm.read.ptx.sreg.*``' intrinsics provide access to the PTX
special registers, in particular the kernel launch bounds. These registers
map in the following way to CUDA builtins:
============ =====================================
CUDA Builtin PTX Special Register Intrinsic
============ =====================================
``threadId`` ``@llvm.nvvm.read.ptx.sreg.tid.*``
``blockIdx`` ``@llvm.nvvm.read.ptx.sreg.ctaid.*``
``blockDim`` ``@llvm.nvvm.read.ptx.sreg.ntid.*``
``gridDim`` ``@llvm.nvvm.read.ptx.sreg.nctaid.*``
============ =====================================
Barriers
--------
'``llvm.nvvm.barrier0``'
^^^^^^^^^^^^^^^^^^^^^^^^^^^
Syntax:
"""""""
.. code-block:: llvm
declare void @llvm.nvvm.barrier0()
Overview:
"""""""""
The '``@llvm.nvvm.barrier0()``' intrinsic emits a PTX ``bar.sync 0``
instruction, equivalent to the ``__syncthreads()`` call in CUDA.
Other Intrinsics
----------------
For the full set of NVPTX intrinsics, please see the
``include/llvm/IR/IntrinsicsNVVM.td`` file in the LLVM source tree.
Executing PTX
=============
The most common way to execute PTX assembly on a GPU device is to use the CUDA
Driver API. This API is a low-level interface to the GPU driver and allows for
JIT compilation of PTX code to native GPU machine code.
Initializing the Driver API:
.. code-block:: c++
CUdevice device;
CUcontext context;
// Initialize the driver API
cuInit(0);
// Get a handle to the first compute device
cuDeviceGet(&device, 0);
// Create a compute device context
cuCtxCreate(&context, 0, device);
JIT compiling a PTX string to a device binary:
.. code-block:: c++
CUmodule module;
CUfunction funcion;
// JIT compile a null-terminated PTX string
cuModuleLoadData(&module, (void*)PTXString);
// Get a handle to the "myfunction" kernel function
cuModuleGetFunction(&function, module, "myfunction");
For full examples of executing PTX assembly, please see the `CUDA Samples
<https://developer.nvidia.com/cuda-downloads>`_ distribution.

View File

@ -1,5 +1,3 @@
.. _packaging:
========================
Advice on Packaging LLVM
========================

File diff suppressed because it is too large Load Diff

1261
docs/Passes.rst Normal file

File diff suppressed because it is too large Load Diff

View File

@ -88,6 +88,12 @@ diffs between different versions of the patch as it was reviewed in the
*Revision Update History*. Most features are self descriptive - explore, and
if you have a question, drop by on #llvm in IRC to get help.
Note that as e-mail is the system of reference for code reviews, and some
people prefer it over a web interface, we do not generate automated mail
when a review changes state, for example by clicking "Accept Revision" in
the web interface. Thus, please type LGTM into the comment box to accept
a change from Phabricator.
Status
------

File diff suppressed because it is too large Load Diff

3204
docs/ProgrammersManual.rst Normal file

File diff suppressed because it is too large Load Diff

View File

@ -1,5 +1,3 @@
.. _projects:
========================
Creating an LLVM Project
========================
@ -153,12 +151,10 @@ Underneath your top level directory, you should have the following directories:
Currently, the LLVM build system provides basic support for tests. The LLVM
system provides the following:
* LLVM provides a ``tcl`` procedure that is used by ``Dejagnu`` to run tests.
It can be found in ``llvm/lib/llvm-dg.exp``. This test procedure uses ``RUN``
* LLVM contains regression tests in ``llvm/test``. These tests are run by the
:doc:`Lit <CommandGuide/lit>` testing tool. This test procedure uses ``RUN``
lines in the actual test case to determine how to run the test. See the
`TestingGuide <TestingGuide.html>`_ for more details. You can easily write
Makefile support similar to the Makefiles in ``llvm/test`` to use ``Dejagnu``
to run your project's tests.
:doc:`TestingGuide` for more details.
* LLVM contains an optional package called ``llvm-test``, which provides
benchmarks and programs that are known to compile with the Clang front

View File

@ -1,12 +1,42 @@
LLVM Documentation
==================
The LLVM documentation is currently written in two formats:
LLVM's documentation is written in reStructuredText, a lightweight
plaintext markup language (file extension `.rst`). While the
reStructuredText documentation should be quite readable in source form, it
is mostly meant to be processed by the Sphinx documentation generation
system to create HTML pages which are hosted on <http://llvm.org/docs/> and
updated after every commit. Manpage output is also supported, see below.
* Plain HTML documentation.
If you instead would like to generate and view the HTML locally, install
Sphinx <http://sphinx-doc.org/> and then do:
* reStructured Text documentation using the Sphinx documentation generator. It
is currently tested with Sphinx 1.1.3.
cd docs/
make -f Makefile.sphinx
$BROWSER _build/html/index.html
For more information, see the "Sphinx Introduction for LLVM Developers"
document.
The mapping between reStructuredText files and generated documentation is
`docs/Foo.rst` <-> `_build/html/Foo.html` <-> `http://llvm.org/docs/Foo.html`.
If you are interested in writing new documentation, you will want to read
`SphinxQuickstartTemplate.rst` which will get you writing documentation
very fast and includes examples of the most important reStructuredText
markup syntax.
Manpage Output
===============
Building the manpages is similar to building the HTML documentation. The
primary difference is to use the `man` makefile target, instead of the
default (which is `html`). Sphinx then produces the man pages in the
directory `_build/man/`.
cd docs/
make -f Makefile.sphinx man
man -l _build/man/FileCheck.1
The correspondence between .rst files and man pages is
`docs/CommandGuide/Foo.rst` <-> `_build/man/Foo.1`.
These .rst files are also included during HTML generation so they are also
viewable online (as noted above) at e.g.
`http://llvm.org/docs/CommandGuide/Foo.html`.

View File

@ -1,975 +0,0 @@
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01//EN"
"http://www.w3.org/TR/html4/strict.dtd">
<html>
<head>
<meta http-equiv="Content-Type" content="text/html; charset=utf-8">
<link rel="stylesheet" href="_static/llvm.css" type="text/css">
<title>LLVM 3.2 Release Notes</title>
</head>
<body>
<h1>LLVM 3.2 Release Notes</h1>
<div>
<img style="float:right" src="http://llvm.org/img/DragonSmall.png"
width="136" height="136" alt="LLVM Dragon Logo">
</div>
<ol>
<li><a href="#intro">Introduction</a></li>
<li><a href="#subproj">Sub-project Status Update</a></li>
<li><a href="#externalproj">External Projects Using LLVM 3.2</a></li>
<li><a href="#whatsnew">What's New in LLVM?</a></li>
<li><a href="GettingStarted.html">Installation Instructions</a></li>
<li><a href="#knownproblems">Known Problems</a></li>
<li><a href="#additionalinfo">Additional Information</a></li>
</ol>
<div class="doc_author">
<p>Written by the <a href="http://llvm.org/">LLVM Team</a></p>
</div>
<!-- *********************************************************************** -->
<h2>
<a name="intro">Introduction</a>
</h2>
<!-- *********************************************************************** -->
<div>
<p>This document contains the release notes for the LLVM Compiler
Infrastructure, release 3.2. Here we describe the status of LLVM, including
major improvements from the previous release, improvements in various
sub-projects of LLVM, and some of the current users of the code. All LLVM
releases may be downloaded from the <a href="http://llvm.org/releases/">LLVM
releases web site</a>.</p>
<p>For more information about LLVM, including information about the latest
release, please check out the <a href="http://llvm.org/">main LLVM web
site</a>. If you have questions or comments,
the <a href="http://lists.cs.uiuc.edu/mailman/listinfo/llvmdev">LLVM
Developer's Mailing List</a> is a good place to send them.</p>
<p>Note that if you are reading this file from a Subversion checkout or the main
LLVM web page, this document applies to the <i>next</i> release, not the
current one. To see the release notes for a specific release, please see the
<a href="http://llvm.org/releases/">releases page</a>.</p>
</div>
<!-- *********************************************************************** -->
<h2>
<a name="subproj">Sub-project Status Update</a>
</h2>
<!-- *********************************************************************** -->
<div>
<p>The LLVM 3.2 distribution currently consists of production-quality code
from the core LLVM repository, which roughly includes the LLVM optimizers,
code generators and supporting tools, as well as Clang, DragonEgg and
compiler-rt sub-project repositories. In addition to this code, the LLVM
Project includes other sub-projects that are in development. Here we
include updates on these sub-projects.</p>
<!--=========================================================================-->
<h3>
<a name="clang">Clang: C/C++/Objective-C Frontend Toolkit</a>
</h3>
<div>
<p><a href="http://clang.llvm.org/">Clang</a> is an LLVM front end for the C,
C++, and Objective-C languages. Clang aims to provide a better user
experience through expressive diagnostics, a high level of conformance to
language standards, fast compilation, and low memory use. Like LLVM, Clang
provides a modular, library-based architecture that makes it suitable for
creating or integrating with other development tools.</p>
<p>In the LLVM 3.2 time-frame, the Clang team has made many improvements.
Highlights include:</p>
<ul>
<li>Improvements to Clang's diagnostics</li>
<li>Support for tls_model attribute</li>
<li>Type safety attributes</li>
</ul>
<p>For more details about the changes to Clang since the 3.1 release, see the
<a href="http://llvm.org/releases/3.2/tools/clang/docs/ReleaseNotes.html">Clang 3.2 release
notes.</a></p>
<p>If Clang rejects your code but another compiler accepts it, please take a
look at the <a href="http://clang.llvm.org/compatibility.html">language
compatibility</a> guide to make sure this is not intentional or a known
issue.</p>
</div>
<!--=========================================================================-->
<h3>
<a name="dragonegg">DragonEgg: GCC front-ends, LLVM back-end</a>
</h3>
<div>
<p><a href="http://dragonegg.llvm.org/">DragonEgg</a> is a
<a href="http://gcc.gnu.org/wiki/plugins">gcc plugin</a> that replaces GCC's
optimizers and code generators with LLVM's. It works with gcc-4.5 and gcc-4.6
(and partially with gcc-4.7), can target the x86-32/x86-64 and ARM processor
families, and has been successfully used on the Darwin, FreeBSD, KFreeBSD,
Linux and OpenBSD platforms. It fully supports Ada, C, C++ and Fortran. It
has partial support for Go, Java, Obj-C and Obj-C++.</p>
<p>The 3.2 release has the following notable changes:</p>
<ul>
<li>Able to load LLVM plugins such as Polly.</li>
<li>Supports thread-local storage models.</li>
<li>Passes knowledge of variable lifetimes to the LLVM optimizers.</li>
<li>No longer requires GCC to be built with LTO support.</li>
</ul>
</div>
<!--=========================================================================-->
<h3>
<a name="compiler-rt">compiler-rt: Compiler Runtime Library</a>
</h3>
<div>
<p>The LLVM <a href="http://compiler-rt.llvm.org/">compiler-rt project</a>
is a simple library that provides an implementation of the low-level
target-specific hooks required by code generation and other runtime
components. For example, when compiling for a 32-bit target, converting a
double to a 64-bit unsigned integer is compiled into a runtime call to the
<code>__fixunsdfdi</code> function. The compiler-rt library provides highly
optimized implementations of this and other low-level routines (some are 3x
faster than the equivalent libgcc routines).</p>
<p>The 3.2 release has the following notable changes:</p>
<ul>
<li><a href="http://llvm.org/releases/3.2/tools/clang/docs/ThreadSanitizer.html">ThreadSanitizer (TSan)</a> - data race detector run-time library for C/C++ has been added.</li>
<li>Improvements to <a href="http://llvm.org/releases/3.2/tools/clang/docs/AddressSanitizer.html">AddressSanitizer</a> including: better portability
(OSX, Android NDK), support for cmake based builds, enhanced error reporting and lots of bug fixes.</li>
<li>Added support for A6 'Swift' CPU.</li>
<li><code>divsi3</code> function has been enhanced to take advantage of a hardware unsigned divide when it is available.</li>
</ul>
</div>
<!--=========================================================================-->
<h3>
<a name="lldb">LLDB: Low Level Debugger</a>
</h3>
<div>
<p><a href="http://lldb.llvm.org">LLDB</a> is a ground-up implementation of a
command line debugger, as well as a debugger API that can be used from other
applications. LLDB makes use of the Clang parser to provide high-fidelity
expression parsing (particularly for C++) and uses the LLVM JIT for target
support.</p>
<p>The 3.2 release has the following notable changes:</p>
<ul>
<li>Linux build fixes for clang (see <a href="http://lldb.llvm.org/build.html">Building LLDB</a>)</li>
<li>Some Linux stability and usability improvements</li>
<li>Switch expression evaluation to use MCJIT (from legacy JIT) on Linux</li>
</ul>
</div>
<!--=========================================================================-->
<h3>
<a name="libc++">libc++: C++ Standard Library</a>
</h3>
<div>
<p>Like compiler_rt, libc++ is now <a href="DeveloperPolicy.html#license">dual
licensed</a> under the MIT and UIUC license, allowing it to be used more
permissively.</p>
<p>Within the LLVM 3.2 time-frame there were the following highlights:</p>
<ul>
<li> C++11 shared_ptr atomic access API (20.7.2.5) has been implemented.</li>
<li>Applied noexcept and constexpr throughout library.</li>
<li>Improved C++11 conformance in associative container emplace.</li>
<li>Performance improvements in: std::rotate algorithm and I/O.</li>
<li>Operator new/delete and type_infos for exception types moved from libc++ to libc++abi.</li>
<li>Bug fixes in: <code>&lt;atomic&gt;</code>; vector<code>&lt;bool&gt;</code> algorithms,
<code>&lt;future&gt;</code>,<code>&lt;tuple&gt;</code>,
<code>&lt;type_traits&gt;</code>,<code>&lt;fstream&gt;</code>,<code>&lt;istream&gt;</code>,
<code>&lt;iterator&gt;</code>, <code>&lt;condition_variable&gt;</code>,<code>&lt;complex&gt;</code> as well as visibility fixes.
</ul>
</div>
<!--=========================================================================-->
<h3>
<a name="vmkit">VMKit</a>
</h3>
<div>
<p>The <a href="http://vmkit.llvm.org/">VMKit project</a> is an implementation
of a Java Virtual Machine (Java VM or JVM) that uses LLVM for static and
just-in-time compilation.</p>
<p>The 3.2 release has the following notable changes:</p>
<ul>
<li>Bug fixes only, no functional changes.</li>
</ul>
</div>
<!--=========================================================================-->
<h3>
<a name="Polly">Polly: Polyhedral Optimizer</a>
</h3>
<div>
<p><a href="http://polly.llvm.org/">Polly</a> is an <em>experimental</em>
optimizer for data locality and parallelism. It currently provides high-level
loop optimizations and automatic parallelization (using the OpenMP run time).
Work in the area of automatic SIMD and accelerator code generation was
started.</p>
<p>Within the LLVM 3.2 time-frame there were the following highlights:</p>
<ul>
<li>isl, the integer set library used by Polly, was relicensed under the MIT license.</li>
<li>isl based code generation.</li>
<li>MIT licensed replacement for CLooG (LGPLv2).</li>
<li>Fine grained option handling (separation of core and border computations, control overhead vs. code size).</li>
<li>Support for FORTRAN and Dragonegg.</li>
<li>OpenMP code generation fixes.</li>
</ul>
</div>
<!--=========================================================================-->
<h3>
<a name="StaticAnalyzer">Clang Static Analyzer</a>
</h3>
<div>
<p>The <a href="http://clang-analyzer.llvm.org/">Clang Static Analyzer</a>
is an advanced source code analysis tool integrated into Clang that performs
a deep analysis of code to find potential bugs.</p>
<p>In the LLVM 3.2 release, the static analyzer has made significant improvements
in many areas, with notable highlights such as:</p>
<ul>
<li>Improved interprocedural analysis within a translation unit (see details below), which greatly amplified the analyzer's ability to find bugs.</li>
<li>New infrastructure to model &quot;well-known&quot; APIs, allowing the analyzer to do a much better job when modeling calls to such functions.</li>
<li>Significant improvements to the APIs to write static analyzer checkers, with a more unified way of representing function/method calls in the checker API. Details can be found in the <a href="http://llvm.org/devmtg/2012-11#talk13">Building a Checker in 24 hours</a> talk.
</ul>
<p>The release specifically includes notable improvements for Objective-C analysis, including:</p>
<ul>
<li>Interprocedural analysis for Objective-C methods.</li>
<li>Interprocedural analysis of calls to &quot;blocks&quot;.</li>
<li>Precise modeling of GCD APIs such as <tt>dispatch_once</tt> and friends.</li>
<li>Improved support for recently added Objective-C constructs such as array and dictionary literals.</li>
</ul>
<p>The release specifically includes notable improvements for C++ analysis, including:</p>
<ul>
<li>Interprocedural analysis for C++ methods (within a translation unit).</li>
<li>More precise modeling of C++ initializers and destructors.</li>
</ul>
<p>Finally, this release includes many small improvements to <tt>scan-build</tt>, which can be used to drive the analyzer from the command line or a continuous integration system. This includes a directory-traversal issue, which could cause potential security problems in some cases. We would like to acknowledge Tim Brown of Portcullis Computer Security Ltd for reporting this issue.</p>
</div>
</div>
<!-- *********************************************************************** -->
<h2>
<a name="externalproj">External Open Source Projects Using LLVM 3.2</a>
</h2>
<!-- *********************************************************************** -->
<div>
<p>An exciting aspect of LLVM is that it is used as an enabling technology for
a lot of other language and tools projects. This section lists some of the
projects that have already been updated to work with LLVM 3.2.</p>
<h3>Crack</h3>
<div>
<p><a href="http://code.google.com/p/crack-language/">Crack</a> aims to provide
the ease of development of a scripting language with the performance of a
compiled language. The language derives concepts from C++, Java and Python,
incorporating object-oriented programming, operator overloading and strong
typing.</p>
</div>
<h3>EmbToolkit</h3>
<div>
<p><a href="http://www.embtoolkit.org/">EmbToolkit</a> provides Linux cross-compiler
toolchain/SDK (GCC/binutils/C library (uclibc,eglibc,musl)), a build system for
package cross-compilation and optionally various root file systems.
It supports ARM and MIPS. There is an ongoing effort to provide a clang+llvm
environment for the 3.2 releases,
</p>
</div>
<h3>FAUST</h3>
<div>
<p><a href="http://faust.grame.fr/">FAUST</a> is a compiled language for
real-time audio signal processing. The name FAUST stands for Functional
AUdio STream. Its programming model combines two approaches: functional
programming and block diagram composition. In addition with the C, C++, Java,
JavaScript output formats, the Faust compiler can generate LLVM bitcode, and
works with LLVM 2.7-3.2.</p>
</div>
<h3>Glasgow Haskell Compiler (GHC)</h3>
<div>
<p><a href="http://www.haskell.org/ghc/">GHC</a> is an open source compiler and
programming suite for Haskell, a lazy functional programming language. It
includes an optimizing static compiler generating good code for a variety of
platforms, together with an interactive system for convenient, quick
development.</p>
<p>GHC 7.0 and onwards include an LLVM code generator, supporting LLVM 2.8 and
later.</p>
</div>
<h3>Julia</h3>
<div>
<p><a href="https://github.com/JuliaLang/julia">Julia</a> is a high-level,
high-performance dynamic language for technical computing. It provides a
sophisticated compiler, distributed parallel execution, numerical accuracy,
and an extensive mathematical function library. The compiler uses type
inference to generate fast code without any type declarations, and uses
LLVM's optimization passes and JIT compiler. The
<a href="http://julialang.org/"> Julia Language</a> is designed
around multiple dispatch, giving programs a large degree of flexibility. It
is ready for use on many kinds of problems.</p>
</div>
<h3>LLVM D Compiler</h3>
<div>
<p><a href="https://github.com/ldc-developers/ldc">LLVM D Compiler</a> (LDC) is
a compiler for the D programming Language. It is based on the DMD frontend
and uses LLVM as backend.</p>
</div>
<h3>Open Shading Language</h3>
<div>
<p><a href="https://github.com/imageworks/OpenShadingLanguage/">Open Shading
Language (OSL)</a> is a small but rich language for programmable shading in
advanced global illumination renderers and other applications, ideal for
describing materials, lights, displacement, and pattern generation. It uses
LLVM to JIT complex shader networks to x86 code at runtime.</p>
<p>OSL was developed by Sony Pictures Imageworks for use in its in-house
renderer used for feature film animation and visual effects, and is
distributed as open source software with the "New BSD" license.
It has been used for all the shading on such films as The Amazing Spider-Man,
Men in Black III, Hotel Transylvania, and may other films in-progress,
and also has been incorporated into several commercial and open source
rendering products such as Blender, VRay, and Autodesk Beast.</p>
</div>
<h3>Portable OpenCL (pocl)</h3>
<div>
<p>In addition to producing an easily portable open source OpenCL
implementation, another major goal of <a href="http://pocl.sourceforge.net/">
pocl</a> is improving performance portability of OpenCL programs with
compiler optimizations, reducing the need for target-dependent manual
optimizations. An important part of pocl is a set of LLVM passes used to
statically parallelize multiple work-items with the kernel compiler, even in
the presence of work-group barriers. This enables static parallelization of
the fine-grained static concurrency in the work groups in multiple ways
(SIMD, VLIW, superscalar,...).</p>
</div>
<h3>Pure</h3>
<div>
<p><a href="http://pure-lang.googlecode.com/">Pure</a> is an
algebraic/functional programming language based on term rewriting. Programs
are collections of equations which are used to evaluate expressions in a
symbolic fashion. The interpreter uses LLVM as a backend to JIT-compile Pure
programs to fast native code. Pure offers dynamic typing, eager and lazy
evaluation, lexical closures, a hygienic macro system (also based on term
rewriting), built-in list and matrix support (including list and matrix
comprehensions) and an easy-to-use interface to C and other programming
languages (including the ability to load LLVM bitcode modules, and inline C,
C++, Fortran and Faust code in Pure programs if the corresponding
LLVM-enabled compilers are installed).</p>
<p>Pure version 0.56 has been tested and is known to work with LLVM 3.2 (and
continues to work with older LLVM releases >= 2.5).</p>
</div>
<h3>TTA-based Co-design Environment (TCE)</h3>
<div>
<p><a href="http://tce.cs.tut.fi/">TCE</a> is a toolset for designing
application-specific processors (ASP) based on the Transport triggered
architecture (TTA). The toolset provides a complete co-design flow from C/C++
programs down to synthesizable VHDL/Verilog and parallel program binaries.
Processor customization points include the register files, function units,
supported operations, and the interconnection network.</p>
<p>TCE uses Clang and LLVM for C/C++ language support, target independent
optimizations and also for parts of code generation. It generates new
LLVM-based code generators "on the fly" for the designed TTA processors and
loads them in to the compiler backend as runtime libraries to avoid
per-target recompilation of larger parts of the compiler chain.</p>
</div>
</div>
<!-- *********************************************************************** -->
<h2>
<a name="whatsnew">What's New in LLVM 3.2?</a>
</h2>
<!-- *********************************************************************** -->
<div>
<p>This release includes a huge number of bug fixes, performance tweaks and
minor improvements. Some of the major improvements and new features are
listed in this section.</p>
<!--=========================================================================-->
<h3>
<a name="majorfeatures">Major New Features</a>
</h3>
<div>
<!-- Features that need text if they're finished for 3.2:
ARM EHABI
combiner-aa?
strong phi elim
loop dependence analysis
CorrelatedValuePropagation
lib/Transforms/IPO/MergeFunctions.cpp => consider for 3.2.
Integrated assembler on by default for arm/thumb?
-->
<!-- Near dead:
Analysis/RegionInfo.h + Dom Frontiers
SparseBitVector: used in LiveVar.
llvm/lib/Archive - replace with lib object?
-->
<p>LLVM 3.2 includes several major changes and big features:</p>
<ul>
<li>Loop Vectorizer.</li>
<li>New implementation of SROA.</li>
<li>New NVPTX back-end (replacing existing PTX back-end) based on NVIDIA sources.</li>
</ul>
</div>
<!--=========================================================================-->
<h3>
<a name="coreimprovements">LLVM IR and Core Improvements</a>
</h3>
<div>
<p>LLVM IR has several new features for better support of new targets and that
expose new optimization opportunities:</p>
<ul>
<li>Thread local variables may have a specified TLS model. See the
<a href="LangRef.html#globalvars">Language Reference Manual</a>.</li>
<li>'TYPE_CODE_FUNCTION_OLD' type code and autoupgrade code for old function attributes format has been removed.</li>
<li>Internal representation of the Attributes class has been converted into a pointer to an
opaque object that's uniqued by and stored in the LLVMContext object.
The Attributes class then becomes a thin wrapper around this opaque object.</li>
</ul>
</div>
<!--=========================================================================-->
<h3>
<a name="optimizer">Optimizer Improvements</a>
</h3>
<div>
<p>In addition to many minor performance tweaks and bug fixes, this release
includes a few major enhancements and additions to the optimizers:</p>
<p> Loop Vectorizer - We've added a loop vectorizer and we are now able to
vectorize small loops. The loop vectorizer is disabled by default and
can be enabled using the <b>-mllvm -vectorize-loops</b> flag.
The SIMD vector width can be specified using the flag
<b>-mllvm -force-vector-width=4</b>.
The default value is <b>0</b> which means auto-select.
<br/>
We can now vectorize this function:
<pre class="doc_code">
unsigned sum_arrays(int *A, int *B, int start, int end) {
unsigned sum = 0;
for (int i = start; i &lt; end; ++i)
sum += A[i] + B[i] + i;
return sum;
}
</pre>
We vectorize under the following loops:
<ul>
<li>The inner most loops must have a single basic block.</li>
<li>The number of iterations are known before the loop starts to execute.</li>
<li>The loop counter needs to be incremented by one.</li>
<li>The loop trip count <b>can</b> be a variable.</li>
<li>Loops do <b>not</b> need to start at zero.</li>
<li>The induction variable can be used inside the loop.</li>
<li>Loop reductions are supported.</li>
<li>Arrays with affine access pattern do <b>not</b> need to be marked as 'noalias' and are checked at runtime.</li>
</ul>
</p>
<p>SROA - We&#8217;ve re-written SROA to be significantly more powerful and generate
code which is much more friendly to the rest of the optimization pipeline.
Previously this pass had scaling problems that required it to only operate on
relatively small aggregates, and at times it would mistakenly replace a large
aggregate with a single very large integer in order to make it a scalar SSA
value. The result was a large number of i1024 and i2048 values representing any
small stack buffer. These in turn slowed down many subsequent optimization
paths.</p>
<p>The new SROA pass uses a different algorithm that allows it to only promote to
scalars the pieces of the aggregate actively in use. Because of this it doesn&#8217;t
require any thresholds. It also always deduces the scalar values from the uses
of the aggregate rather than the specific LLVM type of the aggregate. These
features combine to both optimize more code with the pass but to improve the
compile time of many functions dramatically.</p>
<ul>
<li>Branch weight metadata is preserved through more of the optimizer.</li>
</ul>
</div>
<!--=========================================================================-->
<h3>
<a name="mc">MC Level Improvements</a>
</h3>
<div>
<p>The LLVM Machine Code (aka MC) subsystem was created to solve a number of
problems in the realm of assembly, disassembly, object file format handling,
and a number of other related areas that CPU instruction-set level tools work
in. For more information, please see the
<a href="http://blog.llvm.org/2010/04/intro-to-llvm-mc-project.html">Intro
to the LLVM MC Project Blog Post</a>.</p>
<ul>
<li> Added support for following assembler directives: <code>.ifb</code>, <code>.ifnb</code>, <code>.ifc</code>,
<code>.ifnc</code>, <code>.purgem</code>, <code>.rept</code> and <code>.version</code> (ELF) as well as Darwin specific
<code>.pushsection</code>, <code>.popsection</code> and <code>.previous</code> .</li>
<li>Enhanced handling of <code>.lcomm directive</code>.</li>
<li>MS style inline assembler: added implementation of the offset and TYPE operators.</li>
<li>Targets can specify minimum supported NOP size for NOP padding.</li>
<li>ELF improvements: added support for generating ELF objects on Windows.</li>
<li>MachO improvements: symbol-difference variables are marked as N_ABS, added direct-to-object attribute for data-in-code markers.</li>
<li>Added support for annotated disassembly output for x86 and arm targets.</li>
<li>Arm support has been improved by adding support for ARM TARGET2 relocation
and fixing hadling of ARM-style "$d.*" labels.</li>
<li>Implemented local-exec TLS on PowerPC.</li>
</ul>
</div>
<!--=========================================================================-->
<h3>
<a name="codegen">Target Independent Code Generator Improvements</a>
</h3>
<div>
<p>Stack Coloring - We have implemented a new optimization pass
to merge stack objects which are used in disjoin areas of the code.
This optimization reduces the required stack space significantly, in cases
where it is clear to the optimizer that the stack slot is not shared.
We use the lifetime markers to tell the codegen that a certain alloca
is used within a region.</p>
<p> We now merge consecutive loads and stores. </p>
<p>We have put a significant amount of work into the code generator
infrastructure, which allows us to implement more aggressive algorithms and
make it run faster:</p>
<p> We added new TableGen infrastructure to support bundling for
Very Long Instruction Word (VLIW) architectures. TableGen can now
automatically generate a deterministic finite automaton from a VLIW
target's schedule description which can be queried to determine
legal groupings of instructions in a bundle.</p>
<p> We have added a new target independent VLIW packetizer based on the
DFA infrastructure to group machine instructions into bundles.</p>
<p> We have added new TableGen infrastructure to support relationship maps
between instructions. This feature enables TableGen to automatically
construct a set of relation tables and query functions that can be used
to switch between various forms of instructions. For more information,
please refer to <a href="http://llvm.org/docs/HowToUseInstrMappings.html">
How To Use Instruction Mappings</a>.</p>
</div>
<h4>
<a name="blockplacement">Basic Block Placement</a>
</h4>
<div>
<p>A probability based block placement and code layout algorithm was added to
LLVM's code generator. This layout pass supports probabilities derived from
static heuristics as well as source code annotations such as
<code>__builtin_expect</code>.</p>
</div>
<!--=========================================================================-->
<h3>
<a name="x86">X86-32 and X86-64 Target Improvements</a>
</h3>
<div>
<p>New features and major changes in the X86 target include:</p>
<ul>
<li>Small codegen optimizations, especially for AVX2.</li>
</ul>
</div>
<!--=========================================================================-->
<h3>
<a name="ARM">ARM Target Improvements</a>
</h3>
<div>
<p>New features of the ARM target include:</p>
<ul>
<li>Support and performance tuning for the A6 'Swift' CPU.</li>
</ul>
<!--_________________________________________________________________________-->
<h4>
<a name="armintegratedassembler">ARM Integrated Assembler</a>
</h4>
<div>
<p>The ARM target now includes a full featured macro assembler, including
direct-to-object module support for clang. The assembler is currently enabled
by default for Darwin only pending testing and any additional necessary
platform specific support for Linux.</p>
<p>Full support is included for Thumb1, Thumb2 and ARM modes, along with
sub-target and CPU specific extensions for VFP2, VFP3 and NEON.</p>
<p>The assembler is Unified Syntax only (see ARM Architecural Reference Manual
for details). While there is some, and growing, support for pre-unfied
(divided) syntax, there are still significant gaps in that support.</p>
</div>
</div>
<!--=========================================================================-->
<h3>
<a name="MIPS">MIPS Target Improvements</a>
</h3>
<div>
<p>New features and major changes in the MIPS target include:</p>
<ul>
<li>Integrated assembler support:
MIPS32 works for both PIC and static, known limitation is the PR14456 where
R_MIPS_GPREL16 relocation is generated with the wrong addend.
MIPS64 support is incomplete, for example exception handling is not working.</li>
<li>Support for fast calling convention has been added.</li>
<li>Support for Android MIPS toolchain has been added to clang driver.</li>
<li>Added clang driver support for MIPS N32 ABI through "-mabi=n32" option.</li>
<li>MIPS32 and MIPS64 disassembler has been implemented.</li>
<li>Support for compiling programs with large GOTs (exceeding 64kB in size) has been added
through llc option "-mxgot".</li>
<li>Added experimental support for MIPS32 DSP intrinsics.</li>
<li>Experimental support for MIPS16 with following limitations: only soft float is supported,
C++ exceptions are not supported, large stack frames (> 32000 bytes) are not supported,
direct object code emission is not supported only .s .</li>
<li>Standalone assembler (llvm-mc): implementation is in progress and considered experimental.</li>
<li>All classic JIT and MCJIT tests pass on Little and Big Endian MIPS32 platforms.</li>
<li>Inline asm support: all common constraints and operand modifiers have been implemented.</li>
<li>Added tail call optimization support, use llc option "-enable-mips-tail-calls"
or clang options "-mllvm -enable-mips-tail-calls"to enable it.</li>
<li>Improved register allocation by removing registers $fp, $gp, $ra and $at from the list of reserved registers.</li>
<li>Long branch expansion pass has been implemented, which expands branch
instructions with offsets that do not fit in the 16-bit field.</li>
<li>Cavium Octeon II board is used for testing builds (llvm-mips-linux builder).</li>
</ul>
</div>
<!--=========================================================================-->
<h3>
<a name="PowerPC">PowerPC Target Improvements</a>
</h3>
<div>
<p>Many fixes and changes across LLVM (and Clang) for better compliance with
the 64-bit PowerPC ELF Application Binary Interface, interoperability with
GCC, and overall 64-bit PowerPC support. Some highlights include:</p>
<ul>
<li> MCJIT support added.</li>
<li> PPC64 relocation support and (small code model) TOC handling
added.</li>
<li> Parameter passing and return value fixes (alignment issues,
padding, varargs support, proper register usage, odd-sized
structure support, float support, extension of return values
for i32 return values).</li>
<li> Fixes in spill and reload code for vector registers.</li>
<li> C++ exception handling enabled.</li>
<li> Changes to remediate double-rounding compatibility issues with
respect to GCC behavior.</li>
<li> Refactoring to disentangle ppc64-elf-linux ABI from Darwin
ppc64 ABI support.</li>
<li> Assorted new test cases and test case fixes (endian and word
size issues).</li>
<li> Fixes for big-endian codegen bugs, instruction encodings, and
instruction constraints.</li>
<li> Implemented -integrated-as support.</li>
<li> Additional support for Altivec compare operations.</li>
<li> IBM long double support.</li>
</ul>
<p>There have also been code generation improvements for both 32- and 64-bit
code. Instruction scheduling support for the Freescale e500mc and e5500
cores has been added.</p>
</div>
<!--=========================================================================-->
<h3>
<a name="NVPTX">PTX/NVPTX Target Improvements</a>
</h3>
<div>
<p>The PTX back-end has been replaced by the NVPTX back-end, which is based on
the LLVM back-end used by NVIDIA in their CUDA (nvcc) and OpenCL compiler.
Some highlights include:</p>
<ul>
<li>Compatibility with PTX 3.1 and SM 3.5</li>
<li>Support for NVVM intrinsics as defined in the NVIDIA Compiler SDK</li>
<li>Full compatibility with old PTX back-end, with much greater coverage of
LLVM IR</li>
</ul>
<p>Please submit any back-end bugs to the LLVM Bugzilla site.</p>
</div>
<!--=========================================================================-->
<h3>
<a name="OtherTS">Other Target Specific Improvements</a>
</h3>
<div>
<ul>
<li>Added support for custom names for library functions in TargetLibraryInfo.</li>
</ul>
</div>
<!--=========================================================================-->
<h3>
<a name="changes">Major Changes and Removed Features</a>
</h3>
<div>
<p>If you're already an LLVM user or developer with out-of-tree changes based on
LLVM 3.2, this section lists some "gotchas" that you may run into upgrading
from the previous release.</p>
<ul>
<li>llvm-ld and llvm-stub have been removed, llvm-ld functionality can be partially replaced by
llvm-link | opt | {llc | as, llc -filetype=obj} | ld, or fully replaced by Clang. </li>
<li>MCJIT: added support for inline assembly (requires asm parser), added faux remote target execution to lli option '-remote-mcjit'.</li>
</ul>
</div>
<!--=========================================================================-->
<h3>
<a name="api_changes">Internal API Changes</a>
</h3>
<div>
<p>In addition, many APIs have changed in this release. Some of the major
LLVM API changes are:</p>
<p> We've added a new interface for allowing IR-level passes to access
target-specific information. A new IR-level pass, called
"TargetTransformInfo" provides a number of low-level interfaces.
LSR and LowerInvoke already use the new interface. </p>
<p> The TargetData structure has been renamed to DataLayout and moved to VMCore
to remove a dependency on Target. </p>
</div>
<!--=========================================================================-->
<h3>
<a name="tools_changes">Tools Changes</a>
</h3>
<div>
<p>In addition, some tools have changed in this release. Some of the changes are:</p>
<ul>
<li>opt: added support for '-mtriple' option.</li>
<li>llvm-mc : - added '-disassemble' support for '-show-inst' and '-show-encoding' options, added '-edis' option to produce annotated
disassembly output for X86 and ARM targets.</li>
<li>libprofile: allows the profile data file name to be specified by the LLVMPROF_OUTPUT environment variable.</li>
<li>llvm-objdump: has been changed to display available targets, '-arch' option accepts x86 and x86-64 as valid arch names.</li>
<li>llc and opt: added FMA formation from pairs of FADD + FMUL or FSUB + FMUL enabled by option '-enable-excess-fp-precision' or option '-enable-unsafe-fp-math',
option '-fp-contract' controls the creation by optimizations of fused FP by selecting Fast, Standard, or Strict mode.</li>
<li>llc: object file output from llc is no longer considered experimental.</li>
<li>gold plugin: handles Position Independent Executables.</li>
</ul>
</div>
<!-- *********************************************************************** -->
<h2>
<a name="knownproblems">Known Problems</a>
</h2>
<!-- *********************************************************************** -->
<div>
<p>LLVM is generally a production quality compiler, and is used by a broad range
of applications and shipping in many products. That said, not every
subsystem is as mature as the aggregate, particularly the more obscure
targets. If you run into a problem, please check
the <a href="http://llvm.org/bugs/">LLVM bug database</a> and submit a bug if
there isn't already one or ask on
the <a href="http://lists.cs.uiuc.edu/mailman/listinfo/llvmdev">LLVMdev
list</a>.</p>
<p>Known problem areas include:</p>
<ul>
<li>The CellSPU, MSP430, and XCore backends are experimental, and the CellSPU backend will be removed in LLVM 3.3.</li>
<li>The integrated assembler, disassembler, and JIT is not supported by
several targets. If an integrated assembler is not supported, then a
system assembler is required. For more details, see the <a
href="CodeGenerator.html#targetfeatures">Target Features Matrix</a>.
</li>
</ul>
</div>
<!-- *********************************************************************** -->
<h2>
<a name="additionalinfo">Additional Information</a>
</h2>
<!-- *********************************************************************** -->
<div>
<p>A wide variety of additional information is available on
the <a href="http://llvm.org/">LLVM web page</a>, in particular in
the <a href="http://llvm.org/docs/">documentation</a> section. The web page
also contains versions of the API documentation which is up-to-date with the
Subversion version of the source code. You can access versions of these
documents specific to this release by going into the "<tt>llvm/doc/</tt>"
directory in the LLVM tree.</p>
<p>If you have any questions or comments about LLVM, please feel free to contact
us via the <a href="http://llvm.org/docs/#maillist"> mailing lists</a>.</p>
</div>
<!-- *********************************************************************** -->
<hr>
<address>
<a href="http://jigsaw.w3.org/css-validator/check/referer"><img
src="http://jigsaw.w3.org/css-validator/images/vcss-blue" alt="Valid CSS"></a>
<a href="http://validator.w3.org/check/referer"><img
src="http://www.w3.org/Icons/valid-html401-blue" alt="Valid HTML 4.01"></a>
<a href="http://llvm.org/">LLVM Compiler Infrastructure</a><br>
Last modified: $Date: 2012-12-19 11:50:28 +0100 (Wed, 19 Dec 2012) $
</address>
</body>
</html>

144
docs/ReleaseNotes.rst Normal file
View File

@ -0,0 +1,144 @@
======================
LLVM 3.3 Release Notes
======================
.. contents::
:local:
.. warning::
These are in-progress notes for the upcoming LLVM 3.3 release. You may
prefer the `LLVM 3.2 Release Notes <http://llvm.org/releases/3.2/docs
/ReleaseNotes.html>`_.
Introduction
============
This document contains the release notes for the LLVM Compiler Infrastructure,
release 3.3. Here we describe the status of LLVM, including major improvements
from the previous release, improvements in various subprojects of LLVM, and
some of the current users of the code. All LLVM releases may be downloaded
from the `LLVM releases web site <http://llvm.org/releases/>`_.
For more information about LLVM, including information about the latest
release, please check out the `main LLVM web site <http://llvm.org/>`_. If you
have questions or comments, the `LLVM Developer's Mailing List
<http://lists.cs.uiuc.edu/mailman/listinfo/llvmdev>`_ is a good place to send
them.
Note that if you are reading this file from a Subversion checkout or the main
LLVM web page, this document applies to the *next* release, not the current
one. To see the release notes for a specific release, please see the `releases
page <http://llvm.org/releases/>`_.
Non-comprehensive list of changes in this release
=================================================
.. NOTE
For small 1-3 sentence descriptions, just add an entry at the end of
this list. If your description won't fit comfortably in one bullet
point (e.g. maybe you would like to give an example of the
functionality, or simply have a lot to talk about), see the `NOTE` below
for adding a new subsection.
* The CellSPU port has been removed. It can still be found in older versions.
* The IR-level extended linker APIs (for example, to link bitcode files out of
archives) have been removed. Any existing clients of these features should
move to using a linker with integrated LTO support.
* LLVM and Clang's documentation has been migrated to the `Sphinx
<http://sphinx-doc.org/>`_ documentation generation system which uses
easy-to-write reStructuredText. See `llvm/docs/README.txt` for more
information.
* TargetTransformInfo (TTI) is a new interface that can be used by IR-level
passes to obtain target-specific information, such as the costs of
instructions. Only "Lowering" passes such as LSR and the vectorizer are
allowed to use the TTI infrastructure.
* We've improved the X86 and ARM cost model.
* The Attributes classes have been completely rewritten and expanded. They now
support not only enumerated attributes and alignments, but "string"
attributes, which are useful for passing information to code generation. See
:doc:`HowToUseAttributes` for more details.
* TableGen's syntax for instruction selection patterns has been simplified.
Instead of specifying types indirectly with register classes, you should now
specify types directly in the input patterns. See ``SparcInstrInfo.td`` for
examples of the new syntax. The old syntax using register classes still
works, but it will be removed in a future LLVM release.
* ... next change ...
.. NOTE
If you would like to document a larger change, then you can add a
subsection about it right here. You can copy the following boilerplate
and un-indent it (the indentation causes it to be inside this comment).
Special New Feature
-------------------
Makes programs 10x faster by doing Special New Thing.
AArch64 target
--------------
We've added support for AArch64, ARM's 64-bit architecture. Development is still
in fairly early stages, but we expect successful compilation when:
- compiling standard compliant C99 and C++03 with Clang;
- using Linux as a target platform;
- where code + static data doesn't exceed 4GB in size (heap allocated data has
no limitation).
Some additional functionality is also implemented, notably DWARF debugging,
GNU-style thread local storage and inline assembly.
Hexagon Target
--------------
- Removed support for legacy hexagonv2 and hexagonv3 processor
architectures which are no longer in use. Currently supported
architectures are hexagonv4 and hexagonv5.
Loop Vectorizer
---------------
We've continued the work on the loop vectorizer. The loop vectorizer now
has the following features:
- Loops with unknown trip count.
- Runtime checks of pointers
- Reductions, Inductions
- If Conversion
- Pointer induction variables
- Reverse iterators
- Vectorization of mixed types
- Vectorization of function calls
- Partial unrolling during vectorization
R600 Backend
------------
The R600 backend was added in this release, it supports AMD GPUs
(HD2XXX - HD7XXX). This backend is used in AMD's Open Source
graphics / compute drivers which are developed as part of the `Mesa3D
<http://www.mesa3d.org>`_ project.
Additional Information
======================
A wide variety of additional information is available on the `LLVM web page
<http://llvm.org/>`_, in particular in the `documentation
<http://llvm.org/docs/>`_ section. The web page also contains versions of the
API documentation which is up-to-date with the Subversion version of the source
code. You can access versions of these documents specific to this release by
going into the ``llvm/docs/`` directory in the LLVM tree.
If you have any questions or comments about LLVM, please feel free to contact
us via the `mailing lists <http://llvm.org/docs/#maillist>`_.

View File

@ -1,5 +1,3 @@
.. _segmented_stacks:
========================
Segmented Stacks in LLVM
========================

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -2,8 +2,6 @@
Sphinx Quickstart Template
==========================
.. sectionauthor:: Sean Silva <silvas@purdue.edu>
Introduction and Quickstart
===========================
@ -24,7 +22,8 @@ reStructuredText syntax is useful when writing the document, so the last
~half of this document (starting with `Example Section`_) gives examples
which should cover 99% of use cases.
Let me say that again: focus on *content*.
Let me say that again: focus on *content*. But if you really need to verify
Sphinx's output, see ``docs/README.txt`` for information.
Once you have finished with the content, please send the ``.rst`` file to
llvm-commits for review.
@ -65,7 +64,7 @@ Your text can be *emphasized*, **bold**, or ``monospace``.
Use blank lines to separate paragraphs.
Headings (like ``Example Section`` just above) give your document
Headings (like ``Example Section`` just above) give your document its
structure. Use the same kind of adornments (e.g. ``======`` vs. ``------``)
as are used in this document. The adornment must be the same length as the
text above it. For Vim users, variations of ``yypVr=`` might be handy.
@ -86,7 +85,7 @@ Lists can be made like this:
#. This is a second list element.
#. They nest too.
#. Use indentation to create nested lists.
You can also use unordered lists.
@ -104,18 +103,54 @@ You can make blocks of code like this:
.. code-block:: c++
int main() {
return 0
return 0;
}
For a shell session, use a ``bash`` code block:
For a shell session, use a ``console`` code block (some existing docs use
``bash``):
.. code-block:: bash
.. code-block:: console
$ echo "Goodbye cruel world!"
$ rm -rf /
If you need to show LLVM IR use the ``llvm`` code block.
.. code-block:: llvm
define i32 @test1() {
entry:
ret i32 0
}
Some other common code blocks you might need are ``c``, ``objc``, ``make``,
and ``cmake``. If you need something beyond that, you can look at the `full
list`_ of supported code blocks.
.. _`full list`: http://pygments.org/docs/lexers/
However, don't waste time fiddling with syntax highlighting when you could
be adding meaningful content. When in doubt, show preformatted text
without any syntax highlighting like this:
::
.
+:.
..:: ::
.++:+:: ::+:.:.
.:+ :
::.::..:: .+.
..:+ :: :
......+:. ..
:++. .. :
.+:::+:: :
.. . .+ ::
+.: .::+.
...+. .: .
.++:..
...
Hopefully you won't need to be this deep
""""""""""""""""""""""""""""""""""""""""

View File

@ -1,316 +0,0 @@
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01//EN"
"http://www.w3.org/TR/html4/strict.dtd">
<html>
<head>
<meta http-equiv="Content-Type" content="text/html; charset=utf-8">
<title>System Library</title>
<link rel="stylesheet" href="_static/llvm.css" type="text/css">
</head>
<body>
<h1>System Library</h1>
<ul>
<li><a href="#abstract">Abstract</a></li>
<li><a href="#requirements">Keeping LLVM Portable</a>
<ol>
<li><a href="#headers">Don't Include System Headers</a></li>
<li><a href="#expose">Don't Expose System Headers</a></li>
<li><a href="#c_headers">Allow Standard C Header Files</a></li>
<li><a href="#cpp_headers">Allow Standard C++ Header Files</a></li>
<li><a href="#highlev">High-Level Interface</a></li>
<li><a href="#nofunc">No Exposed Functions</a></li>
<li><a href="#nodata">No Exposed Data</a></li>
<li><a href="#nodupl">No Duplicate Implementations</a></li>
<li><a href="#nounused">No Unused Functionality</a></li>
<li><a href="#virtuals">No Virtual Methods</a></li>
<li><a href="#softerrors">Minimize Soft Errors</a></li>
<li><a href="#throw_spec">No throw() Specifications</a></li>
<li><a href="#organization">Code Organization</a></li>
<li><a href="#semantics">Consistent Semantics</a></li>
<li><a href="#bug">Tracking Bugzilla Bug: 351</a></li>
</ol></li>
</ul>
<div class="doc_author">
<p>Written by <a href="mailto:rspencer@x10sys.com">Reid Spencer</a></p>
</div>
<!-- *********************************************************************** -->
<h2><a name="abstract">Abstract</a></h2>
<div>
<p>This document provides some details on LLVM's System Library, located in
the source at <tt>lib/System</tt> and <tt>include/llvm/System</tt>. The
library's purpose is to shield LLVM from the differences between operating
systems for the few services LLVM needs from the operating system. Much of
LLVM is written using portability features of standard C++. However, in a few
areas, system dependent facilities are needed and the System Library is the
wrapper around those system calls.</p>
<p>By centralizing LLVM's use of operating system interfaces, we make it
possible for the LLVM tool chain and runtime libraries to be more easily
ported to new platforms since (theoretically) only <tt>lib/System</tt> needs
to be ported. This library also unclutters the rest of LLVM from #ifdef use
and special cases for specific operating systems. Such uses are replaced
with simple calls to the interfaces provided in <tt>include/llvm/System</tt>.
</p>
<p>Note that the System Library is not intended to be a complete operating
system wrapper (such as the Adaptive Communications Environment (ACE) or
Apache Portable Runtime (APR)), but only provides the functionality necessary
to support LLVM.
<p>The System Library was written by Reid Spencer who formulated the
design based on similar work originating from the eXtensible Programming
System (XPS). Several people helped with the effort; especially,
Jeff Cohen and Henrik Bach on the Win32 port.</p>
</div>
<!-- *********************************************************************** -->
<h2>
<a name="requirements">Keeping LLVM Portable</a>
</h2>
<div>
<p>In order to keep LLVM portable, LLVM developers should adhere to a set of
portability rules associated with the System Library. Adherence to these rules
should help the System Library achieve its goal of shielding LLVM from the
variations in operating system interfaces and doing so efficiently. The
following sections define the rules needed to fulfill this objective.</p>
<!-- ======================================================================= -->
<h3><a name="headers">Don't Include System Headers</a></h3>
<div>
<p>Except in <tt>lib/System</tt>, no LLVM source code should directly
<tt>#include</tt> a system header. Care has been taken to remove all such
<tt>#includes</tt> from LLVM while <tt>lib/System</tt> was being
developed. Specifically this means that header files like "unistd.h",
"windows.h", "stdio.h", and "string.h" are forbidden to be included by LLVM
source code outside the implementation of <tt>lib/System</tt>.</p>
<p>To obtain system-dependent functionality, existing interfaces to the system
found in <tt>include/llvm/System</tt> should be used. If an appropriate
interface is not available, it should be added to <tt>include/llvm/System</tt>
and implemented in <tt>lib/System</tt> for all supported platforms.</p>
</div>
<!-- ======================================================================= -->
<h3><a name="expose">Don't Expose System Headers</a></h3>
<div>
<p>The System Library must shield LLVM from <em>all</em> system headers. To
obtain system level functionality, LLVM source must
<tt>#include "llvm/System/Thing.h"</tt> and nothing else. This means that
<tt>Thing.h</tt> cannot expose any system header files. This protects LLVM
from accidentally using system specific functionality and only allows it
via the <tt>lib/System</tt> interface.</p>
</div>
<!-- ======================================================================= -->
<h3><a name="c_headers">Use Standard C Headers</a></h3>
<div>
<p>The <em>standard</em> C headers (the ones beginning with "c") are allowed
to be exposed through the <tt>lib/System</tt> interface. These headers and
the things they declare are considered to be platform agnostic. LLVM source
files may include them directly or obtain their inclusion through
<tt>lib/System</tt> interfaces.</p>
</div>
<!-- ======================================================================= -->
<h3><a name="cpp_headers">Use Standard C++ Headers</a></h3>
<div>
<p>The <em>standard</em> C++ headers from the standard C++ library and
standard template library may be exposed through the <tt>lib/System</tt>
interface. These headers and the things they declare are considered to be
platform agnostic. LLVM source files may include them or obtain their
inclusion through lib/System interfaces.</p>
</div>
<!-- ======================================================================= -->
<h3><a name="highlev">High Level Interface</a></h3>
<div>
<p>The entry points specified in the interface of lib/System must be aimed at
completing some reasonably high level task needed by LLVM. We do not want to
simply wrap each operating system call. It would be preferable to wrap several
operating system calls that are always used in conjunction with one another by
LLVM.</p>
<p>For example, consider what is needed to execute a program, wait for it to
complete, and return its result code. On Unix, this involves the following
operating system calls: <tt>getenv, fork, execve,</tt> and <tt>wait</tt>. The
correct thing for lib/System to provide is a function, say
<tt>ExecuteProgramAndWait</tt>, that implements the functionality completely.
what we don't want is wrappers for the operating system calls involved.</p>
<p>There must <em>not</em> be a one-to-one relationship between operating
system calls and the System library's interface. Any such interface function
will be suspicious.</p>
</div>
<!-- ======================================================================= -->
<h3><a name="nounused">No Unused Functionality</a></h3>
<div>
<p>There must be no functionality specified in the interface of lib/System
that isn't actually used by LLVM. We're not writing a general purpose
operating system wrapper here, just enough to satisfy LLVM's needs. And, LLVM
doesn't need much. This design goal aims to keep the lib/System interface
small and understandable which should foster its actual use and adoption.</p>
</div>
<!-- ======================================================================= -->
<h3><a name="nodupl">No Duplicate Implementations</a></h3>
<div>
<p>The implementation of a function for a given platform must be written
exactly once. This implies that it must be possible to apply a function's
implementation to multiple operating systems if those operating systems can
share the same implementation. This rule applies to the set of operating
systems supported for a given class of operating system (e.g. Unix, Win32).
</p>
</div>
<!-- ======================================================================= -->
<h3><a name="virtuals">No Virtual Methods</a></h3>
<div>
<p>The System Library interfaces can be called quite frequently by LLVM. In
order to make those calls as efficient as possible, we discourage the use of
virtual methods. There is no need to use inheritance for implementation
differences, it just adds complexity. The <tt>#include</tt> mechanism works
just fine.</p>
</div>
<!-- ======================================================================= -->
<h3><a name="nofunc">No Exposed Functions</a></h3>
<div>
<p>Any functions defined by system libraries (i.e. not defined by lib/System)
must not be exposed through the lib/System interface, even if the header file
for that function is not exposed. This prevents inadvertent use of system
specific functionality.</p>
<p>For example, the <tt>stat</tt> system call is notorious for having
variations in the data it provides. <tt>lib/System</tt> must not declare
<tt>stat</tt> nor allow it to be declared. Instead it should provide its own
interface to discovering information about files and directories. Those
interfaces may be implemented in terms of <tt>stat</tt> but that is strictly
an implementation detail. The interface provided by the System Library must
be implemented on all platforms (even those without <tt>stat</tt>).</p>
</div>
<!-- ======================================================================= -->
<h3><a name="nodata">No Exposed Data</a></h3>
<div>
<p>Any data defined by system libraries (i.e. not defined by lib/System) must
not be exposed through the lib/System interface, even if the header file for
that function is not exposed. As with functions, this prevents inadvertent use
of data that might not exist on all platforms.</p>
</div>
<!-- ======================================================================= -->
<h3><a name="softerrors">Minimize Soft Errors</a></h3>
<div>
<p>Operating system interfaces will generally provide error results for every
little thing that could go wrong. In almost all cases, you can divide these
error results into two groups: normal/good/soft and abnormal/bad/hard. That
is, some of the errors are simply information like "file not found",
"insufficient privileges", etc. while other errors are much harder like
"out of space", "bad disk sector", or "system call interrupted". We'll call
the first group "<i>soft</i>" errors and the second group "<i>hard</i>"
errors.<p>
<p>lib/System must always attempt to minimize soft errors.
This is a design requirement because the
minimization of soft errors can affect the granularity and the nature of the
interface. In general, if you find that you're wanting to throw soft errors,
you must review the granularity of the interface because it is likely you're
trying to implement something that is too low level. The rule of thumb is to
provide interface functions that <em>can't</em> fail, except when faced with
hard errors.</p>
<p>For a trivial example, suppose we wanted to add an "OpenFileForWriting"
function. For many operating systems, if the file doesn't exist, attempting
to open the file will produce an error. However, lib/System should not
simply throw that error if it occurs because its a soft error. The problem
is that the interface function, OpenFileForWriting is too low level. It should
be OpenOrCreateFileForWriting. In the case of the soft "doesn't exist" error,
this function would just create it and then open it for writing.</p>
<p>This design principle needs to be maintained in lib/System because it
avoids the propagation of soft error handling throughout the rest of LLVM.
Hard errors will generally just cause a termination for an LLVM tool so don't
be bashful about throwing them.</p>
<p>Rules of thumb:</p>
<ol>
<li>Don't throw soft errors, only hard errors.</li>
<li>If you're tempted to throw a soft error, re-think the interface.</li>
<li>Handle internally the most common normal/good/soft error conditions
so the rest of LLVM doesn't have to.</li>
</ol>
</div>
<!-- ======================================================================= -->
<h3><a name="throw_spec">No throw Specifications</a></h3>
<div>
<p>None of the lib/System interface functions may be declared with C++
<tt>throw()</tt> specifications on them. This requirement makes sure that the
compiler does not insert additional exception handling code into the interface
functions. This is a performance consideration: lib/System functions are at
the bottom of many call chains and as such can be frequently called. We
need them to be as efficient as possible. However, no routines in the
system library should actually throw exceptions.</p>
</div>
<!-- ======================================================================= -->
<h3><a name="organization">Code Organization</a></h3>
<div>
<p>Implementations of the System Library interface are separated by their
general class of operating system. Currently only Unix and Win32 classes are
defined but more could be added for other operating system classifications.
To distinguish which implementation to compile, the code in lib/System uses
the LLVM_ON_UNIX and LLVM_ON_WIN32 #defines provided via configure through the
llvm/Config/config.h file. Each source file in lib/System, after implementing
the generic (operating system independent) functionality needs to include the
correct implementation using a set of <tt>#if defined(LLVM_ON_XYZ)</tt>
directives. For example, if we had lib/System/File.cpp, we'd expect to see in
that file:</p>
<pre><tt>
#if defined(LLVM_ON_UNIX)
#include "Unix/File.cpp"
#endif
#if defined(LLVM_ON_WIN32)
#include "Win32/File.cpp"
#endif
</tt></pre>
<p>The implementation in lib/System/Unix/File.cpp should handle all Unix
variants. The implementation in lib/System/Win32/File.cpp should handle all
Win32 variants. What this does is quickly differentiate the basic class of
operating system that will provide the implementation. The specific details
for a given platform must still be determined through the use of
<tt>#ifdef</tt>.</p>
</div>
<!-- ======================================================================= -->
<h3><a name="semantics">Consistent Semantics</a></h3>
<div>
<p>The implementation of a lib/System interface can vary drastically between
platforms. That's okay as long as the end result of the interface function
is the same. For example, a function to create a directory is pretty straight
forward on all operating system. System V IPC on the other hand isn't even
supported on all platforms. Instead of "supporting" System V IPC, lib/System
should provide an interface to the basic concept of inter-process
communications. The implementations might use System V IPC if that was
available or named pipes, or whatever gets the job done effectively for a
given operating system. In all cases, the interface and the implementation
must be semantically consistent. </p>
</div>
<!-- ======================================================================= -->
<h3><a name="bug">Bug 351</a></h3>
<div>
<p>See <a href="http://llvm.org/PR351">bug 351</a>
for further details on the progress of this work</p>
</div>
</div>
<!-- *********************************************************************** -->
<hr>
<address>
<a href="http://jigsaw.w3.org/css-validator/check/referer"><img
src="http://jigsaw.w3.org/css-validator/images/vcss-blue" alt="Valid CSS"></a>
<a href="http://validator.w3.org/check/referer"><img
src="http://www.w3.org/Icons/valid-html401-blue" alt="Valid HTML 4.01"></a>
<a href="mailto:rspencer@x10sys.com">Reid Spencer</a><br>
<a href="http://llvm.org/">LLVM Compiler Infrastructure</a><br>
Last modified: $Date: 2012-04-19 22:20:34 +0200 (Thu, 19 Apr 2012) $
</address>
</body>
</html>

247
docs/SystemLibrary.rst Normal file
View File

@ -0,0 +1,247 @@
==============
System Library
==============
Abstract
========
This document provides some details on LLVM's System Library, located in the
source at ``lib/System`` and ``include/llvm/System``. The library's purpose is
to shield LLVM from the differences between operating systems for the few
services LLVM needs from the operating system. Much of LLVM is written using
portability features of standard C++. However, in a few areas, system dependent
facilities are needed and the System Library is the wrapper around those system
calls.
By centralizing LLVM's use of operating system interfaces, we make it possible
for the LLVM tool chain and runtime libraries to be more easily ported to new
platforms since (theoretically) only ``lib/System`` needs to be ported. This
library also unclutters the rest of LLVM from #ifdef use and special cases for
specific operating systems. Such uses are replaced with simple calls to the
interfaces provided in ``include/llvm/System``.
Note that the System Library is not intended to be a complete operating system
wrapper (such as the Adaptive Communications Environment (ACE) or Apache
Portable Runtime (APR)), but only provides the functionality necessary to
support LLVM.
The System Library was written by Reid Spencer who formulated the design based
on similar work originating from the eXtensible Programming System (XPS).
Several people helped with the effort; especially, Jeff Cohen and Henrik Bach
on the Win32 port.
Keeping LLVM Portable
=====================
In order to keep LLVM portable, LLVM developers should adhere to a set of
portability rules associated with the System Library. Adherence to these rules
should help the System Library achieve its goal of shielding LLVM from the
variations in operating system interfaces and doing so efficiently. The
following sections define the rules needed to fulfill this objective.
Don't Include System Headers
----------------------------
Except in ``lib/System``, no LLVM source code should directly ``#include`` a
system header. Care has been taken to remove all such ``#includes`` from LLVM
while ``lib/System`` was being developed. Specifically this means that header
files like "``unistd.h``", "``windows.h``", "``stdio.h``", and "``string.h``"
are forbidden to be included by LLVM source code outside the implementation of
``lib/System``.
To obtain system-dependent functionality, existing interfaces to the system
found in ``include/llvm/System`` should be used. If an appropriate interface is
not available, it should be added to ``include/llvm/System`` and implemented in
``lib/System`` for all supported platforms.
Don't Expose System Headers
---------------------------
The System Library must shield LLVM from **all** system headers. To obtain
system level functionality, LLVM source must ``#include "llvm/System/Thing.h"``
and nothing else. This means that ``Thing.h`` cannot expose any system header
files. This protects LLVM from accidentally using system specific functionality
and only allows it via the ``lib/System`` interface.
Use Standard C Headers
----------------------
The **standard** C headers (the ones beginning with "c") are allowed to be
exposed through the ``lib/System`` interface. These headers and the things they
declare are considered to be platform agnostic. LLVM source files may include
them directly or obtain their inclusion through ``lib/System`` interfaces.
Use Standard C++ Headers
------------------------
The **standard** C++ headers from the standard C++ library and standard
template library may be exposed through the ``lib/System`` interface. These
headers and the things they declare are considered to be platform agnostic.
LLVM source files may include them or obtain their inclusion through
``lib/System`` interfaces.
High Level Interface
--------------------
The entry points specified in the interface of ``lib/System`` must be aimed at
completing some reasonably high level task needed by LLVM. We do not want to
simply wrap each operating system call. It would be preferable to wrap several
operating system calls that are always used in conjunction with one another by
LLVM.
For example, consider what is needed to execute a program, wait for it to
complete, and return its result code. On Unix, this involves the following
operating system calls: ``getenv``, ``fork``, ``execve``, and ``wait``. The
correct thing for ``lib/System`` to provide is a function, say
``ExecuteProgramAndWait``, that implements the functionality completely. what
we don't want is wrappers for the operating system calls involved.
There must **not** be a one-to-one relationship between operating system
calls and the System library's interface. Any such interface function will be
suspicious.
No Unused Functionality
-----------------------
There must be no functionality specified in the interface of ``lib/System``
that isn't actually used by LLVM. We're not writing a general purpose operating
system wrapper here, just enough to satisfy LLVM's needs. And, LLVM doesn't
need much. This design goal aims to keep the ``lib/System`` interface small and
understandable which should foster its actual use and adoption.
No Duplicate Implementations
----------------------------
The implementation of a function for a given platform must be written exactly
once. This implies that it must be possible to apply a function's
implementation to multiple operating systems if those operating systems can
share the same implementation. This rule applies to the set of operating
systems supported for a given class of operating system (e.g. Unix, Win32).
No Virtual Methods
------------------
The System Library interfaces can be called quite frequently by LLVM. In order
to make those calls as efficient as possible, we discourage the use of virtual
methods. There is no need to use inheritance for implementation differences, it
just adds complexity. The ``#include`` mechanism works just fine.
No Exposed Functions
--------------------
Any functions defined by system libraries (i.e. not defined by ``lib/System``)
must not be exposed through the ``lib/System`` interface, even if the header
file for that function is not exposed. This prevents inadvertent use of system
specific functionality.
For example, the ``stat`` system call is notorious for having variations in the
data it provides. ``lib/System`` must not declare ``stat`` nor allow it to be
declared. Instead it should provide its own interface to discovering
information about files and directories. Those interfaces may be implemented in
terms of ``stat`` but that is strictly an implementation detail. The interface
provided by the System Library must be implemented on all platforms (even those
without ``stat``).
No Exposed Data
---------------
Any data defined by system libraries (i.e. not defined by ``lib/System``) must
not be exposed through the ``lib/System`` interface, even if the header file
for that function is not exposed. As with functions, this prevents inadvertent
use of data that might not exist on all platforms.
Minimize Soft Errors
--------------------
Operating system interfaces will generally provide error results for every
little thing that could go wrong. In almost all cases, you can divide these
error results into two groups: normal/good/soft and abnormal/bad/hard. That is,
some of the errors are simply information like "file not found", "insufficient
privileges", etc. while other errors are much harder like "out of space", "bad
disk sector", or "system call interrupted". We'll call the first group "*soft*"
errors and the second group "*hard*" errors.
``lib/System`` must always attempt to minimize soft errors. This is a design
requirement because the minimization of soft errors can affect the granularity
and the nature of the interface. In general, if you find that you're wanting to
throw soft errors, you must review the granularity of the interface because it
is likely you're trying to implement something that is too low level. The rule
of thumb is to provide interface functions that **can't** fail, except when
faced with hard errors.
For a trivial example, suppose we wanted to add an "``OpenFileForWriting``"
function. For many operating systems, if the file doesn't exist, attempting to
open the file will produce an error. However, ``lib/System`` should not simply
throw that error if it occurs because its a soft error. The problem is that the
interface function, ``OpenFileForWriting`` is too low level. It should be
``OpenOrCreateFileForWriting``. In the case of the soft "doesn't exist" error,
this function would just create it and then open it for writing.
This design principle needs to be maintained in ``lib/System`` because it
avoids the propagation of soft error handling throughout the rest of LLVM.
Hard errors will generally just cause a termination for an LLVM tool so don't
be bashful about throwing them.
Rules of thumb:
#. Don't throw soft errors, only hard errors.
#. If you're tempted to throw a soft error, re-think the interface.
#. Handle internally the most common normal/good/soft error conditions
so the rest of LLVM doesn't have to.
No throw Specifications
-----------------------
None of the ``lib/System`` interface functions may be declared with C++
``throw()`` specifications on them. This requirement makes sure that the
compiler does not insert additional exception handling code into the interface
functions. This is a performance consideration: ``lib/System`` functions are at
the bottom of many call chains and as such can be frequently called. We need
them to be as efficient as possible. However, no routines in the system
library should actually throw exceptions.
Code Organization
-----------------
Implementations of the System Library interface are separated by their general
class of operating system. Currently only Unix and Win32 classes are defined
but more could be added for other operating system classifications. To
distinguish which implementation to compile, the code in ``lib/System`` uses
the ``LLVM_ON_UNIX`` and ``LLVM_ON_WIN32`` ``#defines`` provided via configure
through the ``llvm/Config/config.h`` file. Each source file in ``lib/System``,
after implementing the generic (operating system independent) functionality
needs to include the correct implementation using a set of
``#if defined(LLVM_ON_XYZ)`` directives. For example, if we had
``lib/System/File.cpp``, we'd expect to see in that file:
.. code-block:: c++
#if defined(LLVM_ON_UNIX)
#include "Unix/File.cpp"
#endif
#if defined(LLVM_ON_WIN32)
#include "Win32/File.cpp"
#endif
The implementation in ``lib/System/Unix/File.cpp`` should handle all Unix
variants. The implementation in ``lib/System/Win32/File.cpp`` should handle all
Win32 variants. What this does is quickly differentiate the basic class of
operating system that will provide the implementation. The specific details for
a given platform must still be determined through the use of ``#ifdef``.
Consistent Semantics
--------------------
The implementation of a ``lib/System`` interface can vary drastically between
platforms. That's okay as long as the end result of the interface function is
the same. For example, a function to create a directory is pretty straight
forward on all operating system. System V IPC on the other hand isn't even
supported on all platforms. Instead of "supporting" System V IPC,
``lib/System`` should provide an interface to the basic concept of
inter-process communications. The implementations might use System V IPC if
that was available or named pipes, or whatever gets the job done effectively
for a given operating system. In all cases, the interface and the
implementation must be semantically consistent.

383
docs/TableGen/LangRef.rst Normal file
View File

@ -0,0 +1,383 @@
===========================
TableGen Language Reference
===========================
.. sectionauthor:: Sean Silva <silvas@purdue.edu>
.. contents::
:local:
.. warning::
This document is extremely rough. If you find something lacking, please
fix it, file a documentation bug, or ask about it on llvmdev.
Introduction
============
This document is meant to be a normative spec about the TableGen language
in and of itself (i.e. how to understand a given construct in terms of how
it affects the final set of records represented by the TableGen file). If
you are unsure if this document is really what you are looking for, please
read :doc:`/TableGenFundamentals` first.
Notation
========
The lexical and syntax notation used here is intended to imitate
`Python's`_. In particular, for lexical definitions, the productions
operate at the character level and there is no implied whitespace between
elements. The syntax definitions operate at the token level, so there is
implied whitespace between tokens.
.. _`Python's`: http://docs.python.org/py3k/reference/introduction.html#notation
Lexical Analysis
================
TableGen supports BCPL (``// ...``) and nestable C-style (``/* ... */``)
comments.
The following is a listing of the basic punctuation tokens::
- + [ ] { } ( ) < > : ; . = ? #
Numeric literals take one of the following forms:
.. TableGen actually will lex some pretty strange sequences an interpret
them as numbers. What is shown here is an attempt to approximate what it
"should" accept.
.. productionlist::
TokInteger: `DecimalInteger` | `HexInteger` | `BinInteger`
DecimalInteger: ["+" | "-"] ("0"..."9")+
HexInteger: "0x" ("0"..."9" | "a"..."f" | "A"..."F")+
BinInteger: "0b" ("0" | "1")+
One aspect to note is that the :token:`DecimalInteger` token *includes* the
``+`` or ``-``, as opposed to having ``+`` and ``-`` be unary operators as
most languages do.
TableGen has identifier-like tokens:
.. productionlist::
ualpha: "a"..."z" | "A"..."Z" | "_"
TokIdentifier: ("0"..."9")* `ualpha` (`ualpha` | "0"..."9")*
TokVarName: "$" `ualpha` (`ualpha` | "0"..."9")*
Note that unlike most languages, TableGen allows :token:`TokIdentifier` to
begin with a number. In case of ambiguity, a token will be interpreted as a
numeric literal rather than an identifier.
TableGen also has two string-like literals:
.. productionlist::
TokString: '"' <non-'"' characters and C-like escapes> '"'
TokCodeFragment: "[{" <shortest text not containing "}]"> "}]"
.. note::
The current implementation accepts the following C-like escapes::
\\ \' \" \t \n
TableGen also has the following keywords::
bit bits class code dag
def foreach defm field in
int let list multiclass string
TableGen also has "bang operators" which have a
wide variety of meanings:
.. productionlist::
BangOperator: one of
:!eq !if !head !tail !con
:!add !shl !sra !srl
:!cast !empty !subst !foreach !strconcat
Syntax
======
TableGen has an ``include`` mechanism. It does not play a role in the
syntax per se, since it is lexically replaced with the contents of the
included file.
.. productionlist::
IncludeDirective: "include" `TokString`
TableGen's top-level production consists of "objects".
.. productionlist::
TableGenFile: `Object`*
Object: `Class` | `Def` | `Defm` | `Let` | `MultiClass` | `Foreach`
``class``\es
------------
.. productionlist::
Class: "class" `TokIdentifier` [`TemplateArgList`] `ObjectBody`
A ``class`` declaration creates a record which other records can inherit
from. A class can be parametrized by a list of "template arguments", whose
values can be used in the class body.
A given class can only be defined once. A ``class`` declaration is
considered to define the class if any of the following is true:
.. break ObjectBody into its consituents so that they are present here?
#. The :token:`TemplateArgList` is present.
#. The :token:`Body` in the :token:`ObjectBody` is present and is not empty.
#. The :token:`BaseClassList` in the :token:`ObjectBody` is present.
You can declare an empty class by giving and empty :token:`TemplateArgList`
and an empty :token:`ObjectBody`. This can serve as a restricted form of
forward declaration: note that records deriving from the forward-declared
class will inherit no fields from it since the record expansion is done
when the record is parsed.
.. productionlist::
TemplateArgList: "<" `Declaration` ("," `Declaration`)* ">"
Declarations
------------
.. Omitting mention of arcane "field" prefix to discourage its use.
The declaration syntax is pretty much what you would expect as a C++
programmer.
.. productionlist::
Declaration: `Type` `TokIdentifier` ["=" `Value`]
It assigns the value to the identifer.
Types
-----
.. productionlist::
Type: "string" | "code" | "bit" | "int" | "dag"
:| "bits" "<" `TokInteger` ">"
:| "list" "<" `Type` ">"
:| `ClassID`
ClassID: `TokIdentifier`
Both ``string`` and ``code`` correspond to the string type; the difference
is purely to indicate programmer intention.
The :token:`ClassID` must identify a class that has been previously
declared or defined.
Values
------
.. productionlist::
Value: `SimpleValue` `ValueSuffix`*
ValueSuffix: "{" `RangeList` "}"
:| "[" `RangeList` "]"
:| "." `TokIdentifier`
RangeList: `RangePiece` ("," `RangePiece`)*
RangePiece: `TokInteger`
:| `TokInteger` "-" `TokInteger`
:| `TokInteger` `TokInteger`
The peculiar last form of :token:`RangePiece` is due to the fact that the
"``-``" is included in the :token:`TokInteger`, hence ``1-5`` gets lexed as
two consecutive :token:`TokInteger`'s, with values ``1`` and ``-5``,
instead of "1", "-", and "5".
The :token:`RangeList` can be thought of as specifying "list slice" in some
contexts.
:token:`SimpleValue` has a number of forms:
.. productionlist::
SimpleValue: `TokIdentifier`
The value will be the variable referenced by the identifier. It can be one
of:
.. The code for this is exceptionally abstruse. These examples are a
best-effort attempt.
* name of a ``def``, such as the use of ``Bar`` in::
def Bar : SomeClass {
int X = 5;
}
def Foo {
SomeClass Baz = Bar;
}
* value local to a ``def``, such as the use of ``Bar`` in::
def Foo {
int Bar = 5;
int Baz = Bar;
}
* a template arg of a ``class``, such as the use of ``Bar`` in::
class Foo<int Bar> {
int Baz = Bar;
}
* value local to a ``multiclass``, such as the use of ``Bar`` in::
multiclass Foo {
int Bar = 5;
int Baz = Bar;
}
* a template arg to a ``multiclass``, such as the use of ``Bar`` in::
multiclass Foo<int Bar> {
int Baz = Bar;
}
.. productionlist::
SimpleValue: `TokInteger`
This represents the numeric value of the integer.
.. productionlist::
SimpleValue: `TokString`+
Multiple adjacent string literals are concatenated like in C/C++. The value
is the concatenation of the strings.
.. productionlist::
SimpleValue: `TokCodeFragment`
The value is the string value of the code fragment.
.. productionlist::
SimpleValue: "?"
``?`` represents an "unset" initializer.
.. productionlist::
SimpleValue: "{" `ValueList` "}"
ValueList: [`ValueListNE`]
ValueListNE: `Value` ("," `Value`)*
This represents a sequence of bits, as would be used to initialize a
``bits<n>`` field (where ``n`` is the number of bits).
.. productionlist::
SimpleValue: `ClassID` "<" `ValueListNE` ">"
This generates a new anonymous record definition (as would be created by an
unnamed ``def`` inheriting from the given class with the given template
arguments) and the value is the value of that record definition.
.. productionlist::
SimpleValue: "[" `ValueList` "]" ["<" `Type` ">"]
A list initializer. The optional :token:`Type` can be used to indicate a
specific element type, otherwise the element type will be deduced from the
given values.
.. The initial `DagArg` of the dag must start with an identifier or
!cast, but this is more of an implementation detail and so for now just
leave it out.
.. productionlist::
SimpleValue: "(" `DagArg` `DagArgList` ")"
DagArgList: `DagArg` ("," `DagArg`)*
DagArg: `Value` [":" `TokVarName`] | `TokVarName`
The initial :token:`DagArg` is called the "operator" of the dag.
.. productionlist::
SimpleValue: `BangOperator` ["<" `Type` ">"] "(" `ValueListNE` ")"
Bodies
------
.. productionlist::
ObjectBody: `BaseClassList` `Body`
BaseClassList: [":" `BaseClassListNE`]
BaseClassListNE: `SubClassRef` ("," `SubClassRef`)*
SubClassRef: (`ClassID` | `MultiClassID`) ["<" `ValueList` ">"]
DefmID: `TokIdentifier`
The version with the :token:`MultiClassID` is only valid in the
:token:`BaseClassList` of a ``defm``.
The :token:`MultiClassID` should be the name of a ``multiclass``.
.. put this somewhere else
It is after parsing the base class list that the "let stack" is applied.
.. productionlist::
Body: ";" | "{" BodyList "}"
BodyList: BodyItem*
BodyItem: `Declaration` ";"
:| "let" `TokIdentifier` [`RangeList`] "=" `Value` ";"
The ``let`` form allows overriding the value of an inherited field.
``def``
-------
.. TODO::
There can be pastes in the names here, like ``#NAME#``. Look into that
and document it (it boils down to ParseIDValue with IDParseMode ==
ParseNameMode). ParseObjectName calls into the general ParseValue, with
the only different from "arbitrary expression parsing" being IDParseMode
== Mode.
.. productionlist::
Def: "def" `TokIdentifier` `ObjectBody`
Defines a record whose name is given by the :token:`TokIdentifier`. The
fields of the record are inherited from the base classes and defined in the
body.
Special handling occurs if this ``def`` appears inside a ``multiclass`` or
a ``foreach``.
``defm``
--------
.. productionlist::
Defm: "defm" `TokIdentifier` ":" `BaseClassListNE` ";"
Note that in the :token:`BaseClassList`, all of the ``multiclass``'s must
precede any ``class``'s that appear.
``foreach``
-----------
.. productionlist::
Foreach: "foreach" `Declaration` "in" "{" `Object`* "}"
:| "foreach" `Declaration` "in" `Object`
The value assigned to the variable in the declaration is iterated over and
the object or object list is reevaluated with the variable set at each
iterated value.
Top-Level ``let``
-----------------
.. productionlist::
Let: "let" `LetList` "in" "{" `Object`* "}"
:| "let" `LetList` "in" `Object`
LetList: `LetItem` ("," `LetItem`)*
LetItem: `TokIdentifier` [`RangeList`] "=" `Value`
This is effectively equivalent to ``let`` inside the body of a record
except that it applies to multiple records at a time. The bindings are
applied at the end of parsing the base classes of a record.
``multiclass``
--------------
.. productionlist::
MultiClass: "multiclass" `TokIdentifier` [`TemplateArgList`]
: [":" `BaseMultiClassList`] "{" `MultiClassObject`+ "}"
BaseMultiClassList: `MultiClassID` ("," `MultiClassID`)*
MultiClassID: `TokIdentifier`
MultiClassObject: `Def` | `Defm` | `Let` | `Foreach`

View File

@ -1,5 +1,3 @@
.. _tablegen:
=====================
TableGen Fundamentals
=====================
@ -120,16 +118,16 @@ this (at the time of this writing):
}
...
This definition corresponds to a 32-bit register-register add instruction in the
X86. The string after the '``def``' string indicates the name of the
record---"``ADD32rr``" in this case---and the comment at the end of the line
indicates the superclasses of the definition. The body of the record contains
all of the data that TableGen assembled for the record, indicating that the
instruction is part of the "X86" namespace, the pattern indicating how the the
instruction should be emitted into the assembly file, that it is a two-address
instruction, has a particular encoding, etc. The contents and semantics of the
information in the record is specific to the needs of the X86 backend, and is
only shown as an example.
This definition corresponds to the 32-bit register-register ``add`` instruction
of the x86 architecture. ``def ADD32rr`` defines a record named
``ADD32rr``, and the comment at the end of the line indicates the superclasses
of the definition. The body of the record contains all of the data that
TableGen assembled for the record, indicating that the instruction is part of
the "X86" namespace, the pattern indicating how the instruction should be
emitted into the assembly file, that it is a two-address instruction, has a
particular encoding, etc. The contents and semantics of the information in the
record are specific to the needs of the X86 backend, and are only shown as an
example.
As you can see, a lot of information is needed for every instruction supported
by the code generator, and specifying it all manually would be unmaintainable,
@ -152,13 +150,12 @@ factor out the common features that instructions of its class share. A key
feature of TableGen is that it allows the end-user to define the abstractions
they prefer to use when describing their information.
Each def record has a special entry called "``NAME``." This is the name of the
def ("``ADD32rr``" above). In the general case def names can be formed from
various kinds of string processing expressions and ``NAME`` resolves to the
Each ``def`` record has a special entry called "NAME". This is the name of the
record ("``ADD32rr``" above). In the general case ``def`` names can be formed
from various kinds of string processing expressions and ``NAME`` resolves to the
final value obtained after resolving all of those expressions. The user may
refer to ``NAME`` anywhere she desires to use the ultimate name of the def.
``NAME`` should not be defined anywhere else in user code to avoid conflict
problems.
refer to ``NAME`` anywhere she desires to use the ultimate name of the ``def``.
``NAME`` should not be defined anywhere else in user code to avoid conflicts.
Running TableGen
----------------
@ -794,6 +791,10 @@ Expressions used by code generator to describe instructions and isel patterns:
TableGen backends
=================
Until we get a step-by-step HowTo for writing TableGen backends, you can at
least grab the boilerplate (build system, new files, etc.) from Clang's
r173931.
TODO: How they work, how to write one. This section should not contain details
about any particular backend, except maybe ``-print-enums`` as an example. This
should highlight the APIs in ``TableGen/Record.h``.

View File

@ -1,351 +0,0 @@
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01//EN"
"http://www.w3.org/TR/html4/strict.dtd">
<html>
<head>
<meta http-equiv="Content-Type" content="text/html; charset=utf-8">
<title>LLVM test-suite Makefile Guide</title>
<link rel="stylesheet" href="_static/llvm.css" type="text/css">
</head>
<body>
<h1>
LLVM test-suite Makefile Guide
</h1>
<ol>
<li><a href="#overview">Overview</a></li>
<li><a href="#testsuitestructure">Test suite structure</a></li>
<li><a href="#testsuiterun">Running the test suite</a>
<ul>
<li><a href="#testsuiteexternal">Configuring External Tests</a></li>
<li><a href="#testsuitetests">Running different tests</a></li>
<li><a href="#testsuiteoutput">Generating test output</a></li>
<li><a href="#testsuitecustom">Writing custom tests for test-suite</a></li>
</ul>
</li>
</ol>
<div class="doc_author">
<p>Written by John T. Criswell, Daniel Dunbar, Reid Spencer, and Tanya Lattner</p>
</div>
<!--=========================================================================-->
<h2><a name="overview">Overview</a></h2>
<!--=========================================================================-->
<div>
<p>This document describes the features of the Makefile-based LLVM
test-suite. This way of interacting with the test-suite is deprecated in favor
of running the test-suite using LNT, but may continue to prove useful for some
users. See the Testing
Guide's <a href="TestingGuide.html#testsuitequickstart">test-suite
Quickstart</a> section for more information.</p>
</div>
<!--=========================================================================-->
<h2><a name="testsuitestructure">Test suite Structure</a></h2>
<!--=========================================================================-->
<div>
<p>The <tt>test-suite</tt> module contains a number of programs that can be compiled
with LLVM and executed. These programs are compiled using the native compiler
and various LLVM backends. The output from the program compiled with the
native compiler is assumed correct; the results from the other programs are
compared to the native program output and pass if they match.</p>
<p>When executing tests, it is usually a good idea to start out with a subset of
the available tests or programs. This makes test run times smaller at first and
later on this is useful to investigate individual test failures. To run some
test only on a subset of programs, simply change directory to the programs you
want tested and run <tt>gmake</tt> there. Alternatively, you can run a different
test using the <tt>TEST</tt> variable to change what tests or run on the
selected programs (see below for more info).</p>
<p>In addition for testing correctness, the <tt>test-suite</tt> directory also
performs timing tests of various LLVM optimizations. It also records
compilation times for the compilers and the JIT. This information can be
used to compare the effectiveness of LLVM's optimizations and code
generation.</p>
<p><tt>test-suite</tt> tests are divided into three types of tests: MultiSource,
SingleSource, and External.</p>
<ul>
<li><tt>test-suite/SingleSource</tt>
<p>The SingleSource directory contains test programs that are only a single
source file in size. These are usually small benchmark programs or small
programs that calculate a particular value. Several such programs are grouped
together in each directory.</p></li>
<li><tt>test-suite/MultiSource</tt>
<p>The MultiSource directory contains subdirectories which contain entire
programs with multiple source files. Large benchmarks and whole applications
go here.</p></li>
<li><tt>test-suite/External</tt>
<p>The External directory contains Makefiles for building code that is external
to (i.e., not distributed with) LLVM. The most prominent members of this
directory are the SPEC 95 and SPEC 2000 benchmark suites. The <tt>External</tt>
directory does not contain these actual tests, but only the Makefiles that know
how to properly compile these programs from somewhere else. The presence and
location of these external programs is configured by the test-suite
<tt>configure</tt> script.</p></li>
</ul>
<p>Each tree is then subdivided into several categories, including applications,
benchmarks, regression tests, code that is strange grammatically, etc. These
organizations should be relatively self explanatory.</p>
<p>Some tests are known to fail. Some are bugs that we have not fixed yet;
others are features that we haven't added yet (or may never add). In the
regression tests, the result for such tests will be XFAIL (eXpected FAILure).
In this way, you can tell the difference between an expected and unexpected
failure.</p>
<p>The tests in the test suite have no such feature at this time. If the
test passes, only warnings and other miscellaneous output will be generated. If
a test fails, a large &lt;program&gt; FAILED message will be displayed. This
will help you separate benign warnings from actual test failures.</p>
</div>
<!--=========================================================================-->
<h2><a name="testsuiterun">Running the test suite</a></h2>
<!--=========================================================================-->
<div>
<p>First, all tests are executed within the LLVM object directory tree. They
<i>are not</i> executed inside of the LLVM source tree. This is because the
test suite creates temporary files during execution.</p>
<p>To run the test suite, you need to use the following steps:</p>
<ol>
<li><tt>cd</tt> into the <tt>llvm/projects</tt> directory in your source tree.
</li>
<li><p>Check out the <tt>test-suite</tt> module with:</p>
<div class="doc_code">
<pre>
% svn co http://llvm.org/svn/llvm-project/test-suite/trunk test-suite
</pre>
</div>
<p>This will get the test suite into <tt>llvm/projects/test-suite</tt>.</p>
</li>
<li><p>Configure and build <tt>llvm</tt>.</p></li>
<li><p>Configure and build <tt>llvm-gcc</tt>.</p></li>
<li><p>Install <tt>llvm-gcc</tt> somewhere.</p></li>
<li><p><em>Re-configure</em> <tt>llvm</tt> from the top level of
each build tree (LLVM object directory tree) in which you want
to run the test suite, just as you do before building LLVM.</p>
<p>During the <em>re-configuration</em>, you must either: (1)
have <tt>llvm-gcc</tt> you just built in your path, or (2)
specify the directory where your just-built <tt>llvm-gcc</tt> is
installed using <tt>--with-llvmgccdir=$LLVM_GCC_DIR</tt>.</p>
<p>You must also tell the configure machinery that the test suite
is available so it can be configured for your build tree:</p>
<div class="doc_code">
<pre>
% cd $LLVM_OBJ_ROOT ; $LLVM_SRC_ROOT/configure [--with-llvmgccdir=$LLVM_GCC_DIR]
</pre>
</div>
<p>[Remember that <tt>$LLVM_GCC_DIR</tt> is the directory where you
<em>installed</em> llvm-gcc, not its src or obj directory.]</p>
</li>
<li><p>You can now run the test suite from your build tree as follows:</p>
<div class="doc_code">
<pre>
% cd $LLVM_OBJ_ROOT/projects/test-suite
% make
</pre>
</div>
</li>
</ol>
<p>Note that the second and third steps only need to be done once. After you
have the suite checked out and configured, you don't need to do it again (unless
the test code or configure script changes).</p>
<!-- _______________________________________________________________________ -->
<h3>
<a name="testsuiteexternal">Configuring External Tests</a>
</h3>
<!-- _______________________________________________________________________ -->
<div>
<p>In order to run the External tests in the <tt>test-suite</tt>
module, you must specify <i>--with-externals</i>. This
must be done during the <em>re-configuration</em> step (see above),
and the <tt>llvm</tt> re-configuration must recognize the
previously-built <tt>llvm-gcc</tt>. If any of these is missing or
neglected, the External tests won't work.</p>
<dl>
<dt><i>--with-externals</i></dt>
<dt><i>--with-externals=&lt;<tt>directory</tt>&gt;</i></dt>
</dl>
This tells LLVM where to find any external tests. They are expected to be
in specifically named subdirectories of &lt;<tt>directory</tt>&gt;.
If <tt>directory</tt> is left unspecified,
<tt>configure</tt> uses the default value
<tt>/home/vadve/shared/benchmarks/speccpu2000/benchspec</tt>.
Subdirectory names known to LLVM include:
<dl>
<dt>spec95</dt>
<dt>speccpu2000</dt>
<dt>speccpu2006</dt>
<dt>povray31</dt>
</dl>
Others are added from time to time, and can be determined from
<tt>configure</tt>.
</div>
<!-- _______________________________________________________________________ -->
<h3>
<a name="testsuitetests">Running different tests</a>
</h3>
<!-- _______________________________________________________________________ -->
<div>
<p>In addition to the regular "whole program" tests, the <tt>test-suite</tt>
module also provides a mechanism for compiling the programs in different ways.
If the variable TEST is defined on the <tt>gmake</tt> command line, the test system will
include a Makefile named <tt>TEST.&lt;value of TEST variable&gt;.Makefile</tt>.
This Makefile can modify build rules to yield different results.</p>
<p>For example, the LLVM nightly tester uses <tt>TEST.nightly.Makefile</tt> to
create the nightly test reports. To run the nightly tests, run <tt>gmake
TEST=nightly</tt>.</p>
<p>There are several TEST Makefiles available in the tree. Some of them are
designed for internal LLVM research and will not work outside of the LLVM
research group. They may still be valuable, however, as a guide to writing your
own TEST Makefile for any optimization or analysis passes that you develop with
LLVM.</p>
</div>
<!-- _______________________________________________________________________ -->
<h3>
<a name="testsuiteoutput">Generating test output</a>
</h3>
<!-- _______________________________________________________________________ -->
<div>
<p>There are a number of ways to run the tests and generate output. The most
simple one is simply running <tt>gmake</tt> with no arguments. This will
compile and run all programs in the tree using a number of different methods
and compare results. Any failures are reported in the output, but are likely
drowned in the other output. Passes are not reported explicitly.</p>
<p>Somewhat better is running <tt>gmake TEST=sometest test</tt>, which runs
the specified test and usually adds per-program summaries to the output
(depending on which sometest you use). For example, the <tt>nightly</tt> test
explicitly outputs TEST-PASS or TEST-FAIL for every test after each program.
Though these lines are still drowned in the output, it's easy to grep the
output logs in the Output directories.</p>
<p>Even better are the <tt>report</tt> and <tt>report.format</tt> targets
(where <tt>format</tt> is one of <tt>html</tt>, <tt>csv</tt>, <tt>text</tt> or
<tt>graphs</tt>). The exact contents of the report are dependent on which
<tt>TEST</tt> you are running, but the text results are always shown at the
end of the run and the results are always stored in the
<tt>report.&lt;type&gt;.format</tt> file (when running with
<tt>TEST=&lt;type&gt;</tt>).
The <tt>report</tt> also generate a file called
<tt>report.&lt;type&gt;.raw.out</tt> containing the output of the entire test
run.
</div>
<!-- _______________________________________________________________________ -->
<h3>
<a name="testsuitecustom">Writing custom tests for the test suite</a>
</h3>
<!-- _______________________________________________________________________ -->
<div>
<p>Assuming you can run the test suite, (e.g. "<tt>gmake TEST=nightly report</tt>"
should work), it is really easy to run optimizations or code generator
components against every program in the tree, collecting statistics or running
custom checks for correctness. At base, this is how the nightly tester works,
it's just one example of a general framework.</p>
<p>Lets say that you have an LLVM optimization pass, and you want to see how
many times it triggers. First thing you should do is add an LLVM
<a href="ProgrammersManual.html#Statistic">statistic</a> to your pass, which
will tally counts of things you care about.</p>
<p>Following this, you can set up a test and a report that collects these and
formats them for easy viewing. This consists of two files, a
"<tt>test-suite/TEST.XXX.Makefile</tt>" fragment (where XXX is the name of your
test) and a "<tt>test-suite/TEST.XXX.report</tt>" file that indicates how to
format the output into a table. There are many example reports of various
levels of sophistication included with the test suite, and the framework is very
general.</p>
<p>If you are interested in testing an optimization pass, check out the
"libcalls" test as an example. It can be run like this:<p>
<div class="doc_code">
<pre>
% cd llvm/projects/test-suite/MultiSource/Benchmarks # or some other level
% make TEST=libcalls report
</pre>
</div>
<p>This will do a bunch of stuff, then eventually print a table like this:</p>
<div class="doc_code">
<pre>
Name | total | #exit |
...
FreeBench/analyzer/analyzer | 51 | 6 |
FreeBench/fourinarow/fourinarow | 1 | 1 |
FreeBench/neural/neural | 19 | 9 |
FreeBench/pifft/pifft | 5 | 3 |
MallocBench/cfrac/cfrac | 1 | * |
MallocBench/espresso/espresso | 52 | 12 |
MallocBench/gs/gs | 4 | * |
Prolangs-C/TimberWolfMC/timberwolfmc | 302 | * |
Prolangs-C/agrep/agrep | 33 | 12 |
Prolangs-C/allroots/allroots | * | * |
Prolangs-C/assembler/assembler | 47 | * |
Prolangs-C/bison/mybison | 74 | * |
...
</pre>
</div>
<p>This basically is grepping the -stats output and displaying it in a table.
You can also use the "TEST=libcalls report.html" target to get the table in HTML
form, similarly for report.csv and report.tex.</p>
<p>The source for this is in test-suite/TEST.libcalls.*. The format is pretty
simple: the Makefile indicates how to run the test (in this case,
"<tt>opt -simplify-libcalls -stats</tt>"), and the report contains one line for
each column of the output. The first value is the header for the column and the
second is the regex to grep the output of the command for. There are lots of
example reports that can do fancy stuff.</p>
</div>
</div>
<!-- *********************************************************************** -->
<hr>
<address>
<a href="http://jigsaw.w3.org/css-validator/check/referer"><img
src="http://jigsaw.w3.org/css-validator/images/vcss-blue" alt="Valid CSS"></a>
<a href="http://validator.w3.org/check/referer"><img
src="http://www.w3.org/Icons/valid-html401-blue" alt="Valid HTML 4.01"></a>
John T. Criswell, Daniel Dunbar, Reid Spencer, and Tanya Lattner<br>
<a href="http://llvm.org/">The LLVM Compiler Infrastructure</a><br>
Last modified: $Date$
</address>
</body>
</html>

Some files were not shown because too many files have changed in this diff Show More