Update LLVM sources to r73879.

This commit is contained in:
ed 2009-06-22 08:08:12 +00:00
parent db89e312d9
commit a4c19d68f1
337 changed files with 11235 additions and 2600 deletions

View File

@ -81,14 +81,23 @@ if( LLVM_TARGETS_TO_BUILD STREQUAL "all" )
set( LLVM_TARGETS_TO_BUILD ${LLVM_ALL_TARGETS} )
endif()
set(LLVM_ENUM_TARGETS "")
foreach(c ${LLVM_TARGETS_TO_BUILD})
list(FIND LLVM_ALL_TARGETS ${c} idx)
if( idx LESS 0 )
message(FATAL_ERROR "The target `${c}' does not exists.
It should be one of\n${LLVM_ALL_TARGETS}")
else()
set(LLVM_ENUM_TARGETS "${LLVM_ENUM_TARGETS}LLVM_TARGET(${c})\n")
endif()
endforeach(c)
# Produce llvm/Config/Targets.def
configure_file(
${LLVM_MAIN_INCLUDE_DIR}/llvm/Config/Targets.def.in
${LLVM_BINARY_DIR}/include/llvm/Config/Targets.def
)
set(llvm_builded_incs_dir ${LLVM_BINARY_DIR}/include/llvm)
# The USE_EXPLICIT_DEPENDENCIES variable will be TRUE to indicate that
@ -250,14 +259,23 @@ add_subdirectory(lib/Linker)
add_subdirectory(lib/Analysis)
add_subdirectory(lib/Analysis/IPA)
foreach(t ${LLVM_TARGETS_TO_BUILD})
set(LLVM_ENUM_ASM_PRINTERS "")
foreach(t ${LLVM_TARGETS_TO_BUILD})
message(STATUS "Targeting ${t}")
add_subdirectory(lib/Target/${t})
if( EXISTS ${LLVM_MAIN_SRC_DIR}/lib/Target/${t}/AsmPrinter/CMakeLists.txt )
add_subdirectory(lib/Target/${t}/AsmPrinter)
endif( EXISTS ${LLVM_MAIN_SRC_DIR}/lib/Target/${t}/AsmPrinter/CMakeLists.txt )
add_subdirectory(lib/Target/${t}/AsmPrinter)
set(LLVM_ENUM_ASM_PRINTERS
"${LLVM_ENUM_ASM_PRINTERS}LLVM_ASM_PRINTER(${t})\n")
endif( EXISTS ${LLVM_MAIN_SRC_DIR}/lib/Target/${t}/AsmPrinter/CMakeLists.txt )
endforeach(t)
# Produce llvm/Config/AsmPrinters.def
configure_file(
${LLVM_MAIN_INCLUDE_DIR}/llvm/Config/AsmPrinters.def.in
${LLVM_BINARY_DIR}/include/llvm/Config/AsmPrinters.def
)
add_subdirectory(lib/ExecutionEngine)
add_subdirectory(lib/ExecutionEngine/Interpreter)
add_subdirectory(lib/ExecutionEngine/JIT)
@ -269,7 +287,10 @@ add_subdirectory(lib/Archive)
add_subdirectory(projects)
add_subdirectory(tools)
add_subdirectory(examples)
option(LLVM_EXAMPLES "Build LLVM example programs." OFF)
if (LLVM_EXAMPLES)
add_subdirectory(examples)
endif ()
install(DIRECTORY include
DESTINATION .

View File

@ -134,6 +134,8 @@ install-libs: install
#------------------------------------------------------------------------
FilesToConfig := \
include/llvm/Config/config.h \
include/llvm/Config/Targets.def \
include/llvm/Config/AsmPrinters.def \
include/llvm/Support/DataTypes.h \
include/llvm/ADT/iterator.h
FilesToConfigPATH := $(addprefix $(LLVM_OBJ_ROOT)/,$(FilesToConfig))

View File

@ -287,10 +287,8 @@ endif
ifdef ENABLE_COVERAGE
BuildMode := $(BuildMode)+Coverage
# These only go to .NoRelink because otherwise we will end up
# linking -lgcov into the .o libraries that get built.
CXX.Flags.NoRelink += -ftest-coverage -fprofile-arcs
C.Flags.NoRelink += -ftest-coverage -fprofile-arcs
CXX.Flags += -ftest-coverage -fprofile-arcs
C.Flags += -ftest-coverage -fprofile-arcs
endif
# If DISABLE_ASSERTIONS=1 is specified (make command line or configured),
@ -315,7 +313,6 @@ endif
# defined/on.
ifdef LOADABLE_MODULE
SHARED_LIBRARY := 1
DONT_BUILD_RELINKED := 1
LINK_LIBS_IN_SHARED := 1
endif
@ -522,10 +519,10 @@ endif
#----------------------------------------------------------
ifndef NO_PEDANTIC
CompileCommonOpts += -pedantic
CompileCommonOpts += -pedantic -Wno-long-long
endif
CompileCommonOpts += -Wall -W -Wwrite-strings -Wno-long-long \
-Wunused -Wno-unused-parameter $(EXTRA_OPTIONS)
CompileCommonOpts += -Wall -W -Wno-unused-parameter -Wwrite-strings \
$(EXTRA_OPTIONS)
ifeq ($(OS),HP-UX)
CompileCommonOpts := -D_REENTRANT -D_HPUX_SOURCE
@ -548,10 +545,8 @@ ifdef UNIVERSAL
endif
UNIVERSAL_ARCH_OPTIONS := $(UNIVERSAL_ARCH:%=-arch %)
CompileCommonOpts += $(UNIVERSAL_ARCH_OPTIONS)
Relink.Flags := $(UNIVERSAL_ARCH_OPTIONS)
ifdef UNIVERSAL_SDK_PATH
CompileCommonOpts += -isysroot $(UNIVERSAL_SDK_PATH)
Relink.Flags += -isysroot $(UNIVERSAL_SDK_PATH)
endif
# Building universal cannot compute dependencies automatically.
@ -582,27 +577,23 @@ CPP.Flags += $(sort -I$(PROJ_OBJ_DIR) -I$(PROJ_SRC_DIR) \
$(CPP.BaseFlags)
ifeq ($(BUILD_COMPONENT), 1)
Compile.C = $(BUILD_CC) $(CPP.Flags) $(C.Flags) $(C.Flags.NoRelink) \
Compile.C = $(BUILD_CC) $(CPP.Flags) $(C.Flags) \
$(TargetCommonOpts) $(CompileCommonOpts) -c
Compile.CXX = $(BUILD_CXX) $(CPP.Flags) $(CXX.Flags) $(CXX.Flags.NoRelink) \
Compile.CXX = $(BUILD_CXX) $(CPP.Flags) $(CXX.Flags) \
$(TargetCommonOpts) $(CompileCommonOpts) -c
Preprocess.CXX= $(BUILD_CXX) $(CPP.Flags) $(TargetCommonOpts) \
$(CompileCommonOpts) $(CXX.Flags) $(CXX.Flags.NoRelink) -E
Link = $(BUILD_CXX) $(CPP.Flags) $(CXX.Flags) $(CXX.Flags.NoRelink) \
$(CompileCommonOpts) $(CXX.Flags) -E
Link = $(BUILD_CXX) $(CPP.Flags) $(CXX.Flags) \
$(TargetCommonOpts) $(CompileCommonOpts) $(LD.Flags) $(Strip)
Relink = $(BUILD_CXX) $(CPP.Flags) $(CXX.Flags) $(TargetCommonOpts) \
$(CompileCommonOpts) $(Relink.Flags)
else
Compile.C = $(CC) $(CPP.Flags) $(C.Flags) $(C.Flags.NoRelink) \
Compile.C = $(CC) $(CPP.Flags) $(C.Flags) \
$(TargetCommonOpts) $(CompileCommonOpts) -c
Compile.CXX = $(CXX) $(CPP.Flags) $(CXX.Flags) $(CXX.Flags.NoRelink) \
Compile.CXX = $(CXX) $(CPP.Flags) $(CXX.Flags) \
$(TargetCommonOpts) $(CompileCommonOpts) -c
Preprocess.CXX= $(CXX) $(CPP.Flags) $(TargetCommonOpts) \
$(CompileCommonOpts) $(CXX.Flags) $(CXX.Flags.NoRelink) -E
Link = $(CXX) $(CPP.Flags) $(CXX.Flags) $(CXX.Flags.NoRelink) \
$(CompileCommonOpts) $(CXX.Flags) -E
Link = $(CXX) $(CPP.Flags) $(CXX.Flags) \
$(TargetCommonOpts) $(CompileCommonOpts) $(LD.Flags) $(Strip)
Relink = $(CXX) $(CPP.Flags) $(CXX.Flags) $(TargetCommonOpts) \
$(CompileCommonOpts) $(Relink.Flags)
endif
BCCompile.C = $(LLVMGCCWITHPATH) $(CPP.Flags) $(C.Flags) \
@ -1048,48 +1039,13 @@ endif
endif
#---------------------------------------------------------
# ReLinked Library Targets:
# If the user explicitly requests a relinked library with
# BUILD_RELINKED, provide it. Otherwise, if they specify
# neither of BUILD_ARCHIVE or DONT_BUILD_RELINKED, give
# them one.
# Library Targets:
# If neither BUILD_ARCHIVE or LOADABLE_MODULE are specified, default to
# building an archive.
#---------------------------------------------------------
ifndef BUILD_ARCHIVE
ifndef DONT_BUILD_RELINKED
BUILD_RELINKED = 1
endif
endif
ifdef BUILD_RELINKED
all-local:: $(LibName.O)
$(LibName.O): $(ObjectsO) $(LibDir)/.dir
$(Echo) Linking $(BuildMode) Object Library $(notdir $@)
$(Verb) $(Relink) -r -nodefaultlibs -nostdlib -nostartfiles -o $@ $(ObjectsO)
clean-local::
ifneq ($(strip $(LibName.O)),)
-$(Verb) $(RM) -f $(LibName.O)
endif
ifdef NO_INSTALL
install-local::
$(Echo) Install circumvented with NO_INSTALL
uninstall-local::
$(Echo) Uninstall circumvented with NO_INSTALL
else
DestRelinkedLib = $(PROJ_libdir)/$(LIBRARYNAME).o
install-local:: $(DestRelinkedLib)
$(DestRelinkedLib): $(LibName.O) $(PROJ_libdir)
$(Echo) Installing $(BuildMode) Object Library $(DestRelinkedLib)
$(Verb) $(INSTALL) $(LibName.O) $(DestRelinkedLib)
uninstall-local::
$(Echo) Uninstalling $(BuildMode) Object Library $(DestRelinkedLib)
-$(Verb) $(RM) -f $(DestRelinkedLib)
ifndef LOADABLE_MODULE
BUILD_ARCHIVE = 1
endif
endif

View File

@ -234,6 +234,13 @@ if test "$llvm_cv_target_arch" = "Unknown" ; then
AC_MSG_WARN([Configuring LLVM for an unknown target archicture])
fi
# Determine the LLVM native architecture for the target
case "$llvm_cv_target_arch" in
x86) LLVM_NATIVE_ARCH="X86" ;;
x86_64) LLVM_NATIVE_ARCH="X86" ;;
*) LLVM_NATIVE_ARCH="$llvm_cv_target_arch" ;;
esac
dnl Define a substitution, ARCH, for the target architecture
AC_SUBST(ARCH,$llvm_cv_target_arch)
@ -436,6 +443,28 @@ case "$enableval" in
esac
AC_SUBST(TARGETS_TO_BUILD,$TARGETS_TO_BUILD)
# Determine whether we are building LLVM support for the native architecture.
# If so, define LLVM_NATIVE_ARCH to that LLVM target.
for a_target in $TARGETS_TO_BUILD; do
if test "$a_target" = "$LLVM_NATIVE_ARCH"; then
AC_DEFINE_UNQUOTED(LLVM_NATIVE_ARCH,$LLVM_NATIVE_ARCH,
[LLVM architecture name for the native architecture, if available])
fi
done
# Build the LLVM_TARGET and LLVM_ASM_PRINTER macro uses for
# Targets.def and AsmPrinters.def.
LLVM_ENUM_TARGETS=""
LLVM_ENUM_ASM_PRINTERS=""
for target_to_build in $TARGETS_TO_BUILD; do
LLVM_ENUM_TARGETS="LLVM_TARGET($target_to_build) $LLVM_ENUM_TARGETS"
if test -f ${srcdir}/lib/Target/${target_to_build}/AsmPrinter/Makefile ; then
LLVM_ENUM_ASM_PRINTERS="LLVM_ASM_PRINTER($target_to_build) $LLVM_ENUM_ASM_PRINTERS";
fi
done
AC_SUBST(LLVM_ENUM_TARGETS)
AC_SUBST(LLVM_ENUM_ASM_PRINTERS)
dnl Prevent the CBackend from using printf("%a") for floating point so older
dnl C compilers that cannot deal with the 0x0p+0 hex floating point format
dnl can still compile the CBE's output
@ -796,6 +825,9 @@ if test "$ENABLE_THREADS" -eq 1 ; then
AC_SEARCH_LIBS(pthread_mutex_lock,pthread,
AC_DEFINE([HAVE_PTHREAD_MUTEX_LOCK],[1],
[Have pthread_mutex_lock]))
AC_SEARCH_LIBS(pthread_rwlock_init,pthread,
AC_DEFINE([HAVE_PTHREAD_RWLOCK_INIT],[1],
[Have pthread_rwlock_init]))
fi
dnl Allow extra x86-disassembler library
@ -919,6 +951,8 @@ AC_LINK_IFELSE(
volatile unsigned long val = 1;
__sync_synchronize();
__sync_val_compare_and_swap(&val, 1, 0);
__sync_add_and_fetch(&val, 1);
__sync_sub_and_fetch(&val, 1);
return 0;
}
]]),
@ -1108,6 +1142,8 @@ dnl you MUST also update Makefile.rules so that the variable FilesToConfig
dnl contains the same list of files as AC_CONFIG_HEADERS below. This ensures the
dnl files can be updated automatically when their *.in sources change.
AC_CONFIG_HEADERS([include/llvm/Config/config.h])
AC_CONFIG_FILES([include/llvm/Config/Targets.def])
AC_CONFIG_FILES([include/llvm/Config/AsmPrinters.def])
AC_CONFIG_HEADERS([include/llvm/Support/DataTypes.h])
AC_CONFIG_HEADERS([include/llvm/ADT/iterator.h])

View File

@ -78,6 +78,50 @@ include(GetTargetTriple)
get_target_triple(LLVM_HOSTTRIPLE)
message(STATUS "LLVM_HOSTTRIPLE: ${LLVM_HOSTTRIPLE}")
# Determine the native architecture.
# FIXME: this will have to change for cross-compiling.
string(REGEX MATCH "^[^-]*" LLVM_NATIVE_ARCH ${LLVM_HOSTTRIPLE})
if (LLVM_NATIVE_ARCH MATCHES "i[2-6]86")
set(LLVM_NATIVE_ARCH X86)
elseif (LLVM_NATIVE_ARCH STREQUAL amd64)
set(LLVM_NATIVE_ARCH X86)
elseif (LLVM_NATIVE_ARCH STREQUAL x86_64)
set(LLVM_NATIVE_ARCH X86)
elseif (LLVM_NATIVE_ARCH MATCHES "sparc")
set(LLVM_NATIVE_ARCH Sparc)
elseif (LLVM_NATIVE_ARCH MATCHES "powerpc")
set(LLVM_NATIVE_ARCH PowerPC)
elseif (LLVM_NATIVE_ARCH MATCHES "alpha")
set(LLVM_NATIVE_ARCH Alpha)
elseif (LLVM_NATIVE_ARCH MATCHES "ia64")
set(LLVM_NATIVE_ARCH IA64)
elseif (LLVM_NATIVE_ARCH MATCHES "arm")
set(LLVM_NATIVE_ARCH ARM)
elseif (LLVM_NATIVE_ARCH MATCHES "mips")
set(LLVM_NATIVE_ARCH Mips)
elseif (LLVM_NATIVE_ARCH MATCHES "pic16")
set(LLVM_NATIVE_ARCH "PIC16")
elseif (LLVM_NATIVE_ARCH MATCHES "xcore")
set(LLVM_NATIVE_ARCH XCore)
elseif (LLVM_NATIVE_ARCH MATCHES "msp430")
set(LLVM_NATIVE_ARCH MSP430)
else ()
message(STATUS
"Unknown architecture ${LLVM_NATIVE_ARCH}; lli will not JIT code")
set(LLVM_NATIVE_ARCH)
endif ()
if (LLVM_NATIVE_ARCH)
list(FIND LLVM_TARGETS_TO_BUILD ${LLVM_NATIVE_ARCH} NATIVE_ARCH_IDX)
if (NATIVE_ARCH_IDX EQUAL -1)
message(STATUS
"Native target ${LLVM_NATIVE_ARCH} is not selected; lli will not JIT code")
set(LLVM_NATIVE_ARCH)
else ()
message(STATUS "Native target architecture is ${LLVM_NATIVE_ARCH}")
endif ()
endif()
if( MINGW )
set(HAVE_LIBIMAGEHLP 1)
set(HAVE_LIBPSAPI 1)

201
configure vendored
View File

@ -841,6 +841,8 @@ ENABLE_DOXYGEN
ENABLE_THREADS
ENABLE_PIC
TARGETS_TO_BUILD
LLVM_ENUM_TARGETS
LLVM_ENUM_ASM_PRINTERS
ENABLE_CBE_PRINTF_A
EXTRA_OPTIONS
BINUTILS_INCDIR
@ -2401,6 +2403,13 @@ if test "$llvm_cv_target_arch" = "Unknown" ; then
echo "$as_me: WARNING: Configuring LLVM for an unknown target archicture" >&2;}
fi
# Determine the LLVM native architecture for the target
case "$llvm_cv_target_arch" in
x86) LLVM_NATIVE_ARCH="X86" ;;
x86_64) LLVM_NATIVE_ARCH="X86" ;;
*) LLVM_NATIVE_ARCH="$llvm_cv_target_arch" ;;
esac
ARCH=$llvm_cv_target_arch
@ -4959,6 +4968,31 @@ esac
TARGETS_TO_BUILD=$TARGETS_TO_BUILD
# Determine whether we are building LLVM support for the native architecture.
# If so, define LLVM_NATIVE_ARCH to that LLVM target.
for a_target in $TARGETS_TO_BUILD; do
if test "$a_target" = "$LLVM_NATIVE_ARCH"; then
cat >>confdefs.h <<_ACEOF
#define LLVM_NATIVE_ARCH $LLVM_NATIVE_ARCH
_ACEOF
fi
done
# Build the LLVM_TARGET and LLVM_ASM_PRINTER macro uses for
# Targets.def and AsmPrinters.def.
LLVM_ENUM_TARGETS=""
LLVM_ENUM_ASM_PRINTERS=""
for target_to_build in $TARGETS_TO_BUILD; do
LLVM_ENUM_TARGETS="LLVM_TARGET($target_to_build) $LLVM_ENUM_TARGETS"
if test -f ${srcdir}/lib/Target/${target_to_build}/AsmPrinter/Makefile ; then
LLVM_ENUM_ASM_PRINTERS="LLVM_ASM_PRINTER($target_to_build) $LLVM_ENUM_ASM_PRINTERS";
fi
done
# Check whether --enable-cbe-printf-a was given.
if test "${enable_cbe_printf_a+set}" = set; then
enableval=$enable_cbe_printf_a;
@ -10594,7 +10628,7 @@ else
lt_dlunknown=0; lt_dlno_uscore=1; lt_dlneed_uscore=2
lt_status=$lt_dlunknown
cat > conftest.$ac_ext <<EOF
#line 10597 "configure"
#line 10631 "configure"
#include "confdefs.h"
#if HAVE_DLFCN_H
@ -12738,7 +12772,7 @@ ia64-*-hpux*)
;;
*-*-irix6*)
# Find out which ABI we are using.
echo '#line 12741 "configure"' > conftest.$ac_ext
echo '#line 12775 "configure"' > conftest.$ac_ext
if { (eval echo "$as_me:$LINENO: \"$ac_compile\"") >&5
(eval $ac_compile) 2>&5
ac_status=$?
@ -14456,11 +14490,11 @@ else
-e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \
-e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \
-e 's:$: $lt_compiler_flag:'`
(eval echo "\"\$as_me:14459: $lt_compile\"" >&5)
(eval echo "\"\$as_me:14493: $lt_compile\"" >&5)
(eval "$lt_compile" 2>conftest.err)
ac_status=$?
cat conftest.err >&5
echo "$as_me:14463: \$? = $ac_status" >&5
echo "$as_me:14497: \$? = $ac_status" >&5
if (exit $ac_status) && test -s "$ac_outfile"; then
# The compiler can only warn and ignore the option if not recognized
# So say no if there are warnings other than the usual output.
@ -14724,11 +14758,11 @@ else
-e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \
-e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \
-e 's:$: $lt_compiler_flag:'`
(eval echo "\"\$as_me:14727: $lt_compile\"" >&5)
(eval echo "\"\$as_me:14761: $lt_compile\"" >&5)
(eval "$lt_compile" 2>conftest.err)
ac_status=$?
cat conftest.err >&5
echo "$as_me:14731: \$? = $ac_status" >&5
echo "$as_me:14765: \$? = $ac_status" >&5
if (exit $ac_status) && test -s "$ac_outfile"; then
# The compiler can only warn and ignore the option if not recognized
# So say no if there are warnings other than the usual output.
@ -14828,11 +14862,11 @@ else
-e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \
-e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \
-e 's:$: $lt_compiler_flag:'`
(eval echo "\"\$as_me:14831: $lt_compile\"" >&5)
(eval echo "\"\$as_me:14865: $lt_compile\"" >&5)
(eval "$lt_compile" 2>out/conftest.err)
ac_status=$?
cat out/conftest.err >&5
echo "$as_me:14835: \$? = $ac_status" >&5
echo "$as_me:14869: \$? = $ac_status" >&5
if (exit $ac_status) && test -s out/conftest2.$ac_objext
then
# The compiler can only warn and ignore the option if not recognized
@ -17280,7 +17314,7 @@ else
lt_dlunknown=0; lt_dlno_uscore=1; lt_dlneed_uscore=2
lt_status=$lt_dlunknown
cat > conftest.$ac_ext <<EOF
#line 17283 "configure"
#line 17317 "configure"
#include "confdefs.h"
#if HAVE_DLFCN_H
@ -17380,7 +17414,7 @@ else
lt_dlunknown=0; lt_dlno_uscore=1; lt_dlneed_uscore=2
lt_status=$lt_dlunknown
cat > conftest.$ac_ext <<EOF
#line 17383 "configure"
#line 17417 "configure"
#include "confdefs.h"
#if HAVE_DLFCN_H
@ -19748,11 +19782,11 @@ else
-e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \
-e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \
-e 's:$: $lt_compiler_flag:'`
(eval echo "\"\$as_me:19751: $lt_compile\"" >&5)
(eval echo "\"\$as_me:19785: $lt_compile\"" >&5)
(eval "$lt_compile" 2>conftest.err)
ac_status=$?
cat conftest.err >&5
echo "$as_me:19755: \$? = $ac_status" >&5
echo "$as_me:19789: \$? = $ac_status" >&5
if (exit $ac_status) && test -s "$ac_outfile"; then
# The compiler can only warn and ignore the option if not recognized
# So say no if there are warnings other than the usual output.
@ -19852,11 +19886,11 @@ else
-e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \
-e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \
-e 's:$: $lt_compiler_flag:'`
(eval echo "\"\$as_me:19855: $lt_compile\"" >&5)
(eval echo "\"\$as_me:19889: $lt_compile\"" >&5)
(eval "$lt_compile" 2>out/conftest.err)
ac_status=$?
cat out/conftest.err >&5
echo "$as_me:19859: \$? = $ac_status" >&5
echo "$as_me:19893: \$? = $ac_status" >&5
if (exit $ac_status) && test -s out/conftest2.$ac_objext
then
# The compiler can only warn and ignore the option if not recognized
@ -21422,11 +21456,11 @@ else
-e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \
-e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \
-e 's:$: $lt_compiler_flag:'`
(eval echo "\"\$as_me:21425: $lt_compile\"" >&5)
(eval echo "\"\$as_me:21459: $lt_compile\"" >&5)
(eval "$lt_compile" 2>conftest.err)
ac_status=$?
cat conftest.err >&5
echo "$as_me:21429: \$? = $ac_status" >&5
echo "$as_me:21463: \$? = $ac_status" >&5
if (exit $ac_status) && test -s "$ac_outfile"; then
# The compiler can only warn and ignore the option if not recognized
# So say no if there are warnings other than the usual output.
@ -21526,11 +21560,11 @@ else
-e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \
-e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \
-e 's:$: $lt_compiler_flag:'`
(eval echo "\"\$as_me:21529: $lt_compile\"" >&5)
(eval echo "\"\$as_me:21563: $lt_compile\"" >&5)
(eval "$lt_compile" 2>out/conftest.err)
ac_status=$?
cat out/conftest.err >&5
echo "$as_me:21533: \$? = $ac_status" >&5
echo "$as_me:21567: \$? = $ac_status" >&5
if (exit $ac_status) && test -s out/conftest2.$ac_objext
then
# The compiler can only warn and ignore the option if not recognized
@ -23761,11 +23795,11 @@ else
-e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \
-e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \
-e 's:$: $lt_compiler_flag:'`
(eval echo "\"\$as_me:23764: $lt_compile\"" >&5)
(eval echo "\"\$as_me:23798: $lt_compile\"" >&5)
(eval "$lt_compile" 2>conftest.err)
ac_status=$?
cat conftest.err >&5
echo "$as_me:23768: \$? = $ac_status" >&5
echo "$as_me:23802: \$? = $ac_status" >&5
if (exit $ac_status) && test -s "$ac_outfile"; then
# The compiler can only warn and ignore the option if not recognized
# So say no if there are warnings other than the usual output.
@ -24029,11 +24063,11 @@ else
-e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \
-e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \
-e 's:$: $lt_compiler_flag:'`
(eval echo "\"\$as_me:24032: $lt_compile\"" >&5)
(eval echo "\"\$as_me:24066: $lt_compile\"" >&5)
(eval "$lt_compile" 2>conftest.err)
ac_status=$?
cat conftest.err >&5
echo "$as_me:24036: \$? = $ac_status" >&5
echo "$as_me:24070: \$? = $ac_status" >&5
if (exit $ac_status) && test -s "$ac_outfile"; then
# The compiler can only warn and ignore the option if not recognized
# So say no if there are warnings other than the usual output.
@ -24133,11 +24167,11 @@ else
-e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \
-e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \
-e 's:$: $lt_compiler_flag:'`
(eval echo "\"\$as_me:24136: $lt_compile\"" >&5)
(eval echo "\"\$as_me:24170: $lt_compile\"" >&5)
(eval "$lt_compile" 2>out/conftest.err)
ac_status=$?
cat out/conftest.err >&5
echo "$as_me:24140: \$? = $ac_status" >&5
echo "$as_me:24174: \$? = $ac_status" >&5
if (exit $ac_status) && test -s out/conftest2.$ac_objext
then
# The compiler can only warn and ignore the option if not recognized
@ -27894,6 +27928,109 @@ cat >>confdefs.h <<\_ACEOF
#define HAVE_PTHREAD_MUTEX_LOCK 1
_ACEOF
fi
{ echo "$as_me:$LINENO: checking for library containing pthread_rwlock_init" >&5
echo $ECHO_N "checking for library containing pthread_rwlock_init... $ECHO_C" >&6; }
if test "${ac_cv_search_pthread_rwlock_init+set}" = set; then
echo $ECHO_N "(cached) $ECHO_C" >&6
else
ac_func_search_save_LIBS=$LIBS
cat >conftest.$ac_ext <<_ACEOF
/* confdefs.h. */
_ACEOF
cat confdefs.h >>conftest.$ac_ext
cat >>conftest.$ac_ext <<_ACEOF
/* end confdefs.h. */
/* Override any GCC internal prototype to avoid an error.
Use char because int might match the return type of a GCC
builtin and then its argument prototype would still apply. */
#ifdef __cplusplus
extern "C"
#endif
char pthread_rwlock_init ();
int
main ()
{
return pthread_rwlock_init ();
;
return 0;
}
_ACEOF
for ac_lib in '' pthread; do
if test -z "$ac_lib"; then
ac_res="none required"
else
ac_res=-l$ac_lib
LIBS="-l$ac_lib $ac_func_search_save_LIBS"
fi
rm -f conftest.$ac_objext conftest$ac_exeext
if { (ac_try="$ac_link"
case "(($ac_try" in
*\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
*) ac_try_echo=$ac_try;;
esac
eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5
(eval "$ac_link") 2>conftest.er1
ac_status=$?
grep -v '^ *+' conftest.er1 >conftest.err
rm -f conftest.er1
cat conftest.err >&5
echo "$as_me:$LINENO: \$? = $ac_status" >&5
(exit $ac_status); } &&
{ ac_try='test -z "$ac_c_werror_flag" || test ! -s conftest.err'
{ (case "(($ac_try" in
*\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
*) ac_try_echo=$ac_try;;
esac
eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5
(eval "$ac_try") 2>&5
ac_status=$?
echo "$as_me:$LINENO: \$? = $ac_status" >&5
(exit $ac_status); }; } &&
{ ac_try='test -s conftest$ac_exeext'
{ (case "(($ac_try" in
*\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
*) ac_try_echo=$ac_try;;
esac
eval "echo \"\$as_me:$LINENO: $ac_try_echo\"") >&5
(eval "$ac_try") 2>&5
ac_status=$?
echo "$as_me:$LINENO: \$? = $ac_status" >&5
(exit $ac_status); }; }; then
ac_cv_search_pthread_rwlock_init=$ac_res
else
echo "$as_me: failed program was:" >&5
sed 's/^/| /' conftest.$ac_ext >&5
fi
rm -f core conftest.err conftest.$ac_objext \
conftest$ac_exeext
if test "${ac_cv_search_pthread_rwlock_init+set}" = set; then
break
fi
done
if test "${ac_cv_search_pthread_rwlock_init+set}" = set; then
:
else
ac_cv_search_pthread_rwlock_init=no
fi
rm conftest.$ac_ext
LIBS=$ac_func_search_save_LIBS
fi
{ echo "$as_me:$LINENO: result: $ac_cv_search_pthread_rwlock_init" >&5
echo "${ECHO_T}$ac_cv_search_pthread_rwlock_init" >&6; }
ac_res=$ac_cv_search_pthread_rwlock_init
if test "$ac_res" != no; then
test "$ac_res" = "none required" || LIBS="$ac_res $LIBS"
cat >>confdefs.h <<\_ACEOF
#define HAVE_PTHREAD_RWLOCK_INIT 1
_ACEOF
fi
fi
@ -33642,6 +33779,8 @@ int main() {
volatile unsigned long val = 1;
__sync_synchronize();
__sync_val_compare_and_swap(&val, 1, 0);
__sync_add_and_fetch(&val, 1);
__sync_sub_and_fetch(&val, 1);
return 0;
}
@ -34120,6 +34259,10 @@ fi
ac_config_headers="$ac_config_headers include/llvm/Config/config.h"
ac_config_files="$ac_config_files include/llvm/Config/Targets.def"
ac_config_files="$ac_config_files include/llvm/Config/AsmPrinters.def"
ac_config_headers="$ac_config_headers include/llvm/Support/DataTypes.h"
ac_config_headers="$ac_config_headers include/llvm/ADT/iterator.h"
@ -34746,6 +34889,8 @@ for ac_config_target in $ac_config_targets
do
case $ac_config_target in
"include/llvm/Config/config.h") CONFIG_HEADERS="$CONFIG_HEADERS include/llvm/Config/config.h" ;;
"include/llvm/Config/Targets.def") CONFIG_FILES="$CONFIG_FILES include/llvm/Config/Targets.def" ;;
"include/llvm/Config/AsmPrinters.def") CONFIG_FILES="$CONFIG_FILES include/llvm/Config/AsmPrinters.def" ;;
"include/llvm/Support/DataTypes.h") CONFIG_HEADERS="$CONFIG_HEADERS include/llvm/Support/DataTypes.h" ;;
"include/llvm/ADT/iterator.h") CONFIG_HEADERS="$CONFIG_HEADERS include/llvm/ADT/iterator.h" ;;
"Makefile.config") CONFIG_FILES="$CONFIG_FILES Makefile.config" ;;
@ -34914,6 +35059,8 @@ ENABLE_DOXYGEN!$ENABLE_DOXYGEN$ac_delim
ENABLE_THREADS!$ENABLE_THREADS$ac_delim
ENABLE_PIC!$ENABLE_PIC$ac_delim
TARGETS_TO_BUILD!$TARGETS_TO_BUILD$ac_delim
LLVM_ENUM_TARGETS!$LLVM_ENUM_TARGETS$ac_delim
LLVM_ENUM_ASM_PRINTERS!$LLVM_ENUM_ASM_PRINTERS$ac_delim
ENABLE_CBE_PRINTF_A!$ENABLE_CBE_PRINTF_A$ac_delim
EXTRA_OPTIONS!$EXTRA_OPTIONS$ac_delim
BINUTILS_INCDIR!$BINUTILS_INCDIR$ac_delim
@ -34924,8 +35071,6 @@ NM!$NM$ac_delim
ifGNUmake!$ifGNUmake$ac_delim
LN_S!$LN_S$ac_delim
CMP!$CMP$ac_delim
CP!$CP$ac_delim
DATE!$DATE$ac_delim
_ACEOF
if test `sed -n "s/.*$ac_delim\$/X/p" conf$$subs.sed | grep -c X` = 97; then
@ -34967,6 +35112,8 @@ _ACEOF
ac_delim='%!_!# '
for ac_last_try in false false false false false :; do
cat >conf$$subs.sed <<_ACEOF
CP!$CP$ac_delim
DATE!$DATE$ac_delim
FIND!$FIND$ac_delim
MKDIR!$MKDIR$ac_delim
MV!$MV$ac_delim
@ -35048,7 +35195,7 @@ LIBOBJS!$LIBOBJS$ac_delim
LTLIBOBJS!$LTLIBOBJS$ac_delim
_ACEOF
if test `sed -n "s/.*$ac_delim\$/X/p" conf$$subs.sed | grep -c X` = 79; then
if test `sed -n "s/.*$ac_delim\$/X/p" conf$$subs.sed | grep -c X` = 81; then
break
elif $ac_last_try; then
{ { echo "$as_me:$LINENO: error: could not make $CONFIG_STATUS" >&5

View File

@ -1773,6 +1773,8 @@ define fastcc i32 @tailcaller(i32 %in1, i32 %in2) {
<li><b>i386-pc-mingw32msvc</b> &mdash; MingW crosscompiler on Linux</li>
<li><b>i686-apple-darwin*</b> &mdash; Apple Darwin on X86</li>
<li><b>x86_64-unknown-linux-gnu</b> &mdash; Linux</li>
</ul>
</div>
@ -2116,7 +2118,7 @@ MOVSX32rm16 -&gt; movsx, 32-bit register, 16-bit memory
<a href="mailto:sabre@nondot.org">Chris Lattner</a><br>
<a href="http://llvm.org">The LLVM Compiler Infrastructure</a><br>
Last modified: $Date: 2009-05-13 23:33:08 +0200 (Wed, 13 May 2009) $
Last modified: $Date: 2009-06-15 12:17:44 +0000 (Mon, 15 Jun 2009) $
</address>
</body>

View File

@ -1116,9 +1116,9 @@ command-line parser sees <b><tt>cl::init</tt></b>, it knows where to put the
initial value. (You will get an error at runtime if you don't put them in
the right order.)</li>
<li><a name="cl::location">The <b><tt>cl::location</tt></b></a> attribute where to
store the value for a parsed command line option if using external storage. See
the section on <a href="#storage">Internal vs External Storage</a> for more
<li><a name="cl::location">The <b><tt>cl::location</tt></b></a> attribute where
to store the value for a parsed command line option if using external storage.
See the section on <a href="#storage">Internal vs External Storage</a> for more
information.</li>
<li><a name="cl::aliasopt">The <b><tt>cl::aliasopt</tt></b></a> attribute
@ -1146,6 +1146,11 @@ specify macro options where the option name doesn't equal the enum name. For
this macro, the first argument is the enum value, the second is the flag name,
and the second is the description.</li>
</ol>
You will get a compile time error if you try to use cl::values with a parser
that does not support it.</li>
<li><a name="cl::multi_val">The <b><tt>cl::multi_val</tt></b></a>
attribute specifies that this option takes has multiple values
(example: <tt>-sectalign segname sectname sectvalue</tt>). This
@ -1156,12 +1161,6 @@ types). It is allowed to use all of the usual modifiers on
multi-valued options (besides <tt>cl::ValueDisallowed</tt>,
obviously).</li>
</ol>
You will get a compile time error if you try to use cl::values with a parser
that does not support it.</li>
</ul>
</div>
@ -1973,7 +1972,7 @@ tutorial.</p>
<a href="mailto:sabre@nondot.org">Chris Lattner</a><br>
<a href="http://llvm.org">LLVM Compiler Infrastructure</a><br>
Last modified: $Date: 2009-04-08 05:43:51 +0200 (Wed, 08 Apr 2009) $
Last modified: $Date: 2009-06-17 03:09:39 +0000 (Wed, 17 Jun 2009) $
</address>
</body>

View File

@ -21,21 +21,22 @@ The ReST source lives in the directory 'tools/llvmc/doc'. -->
<li><a class="reference internal" href="#compiling-with-llvmc" id="id5">Compiling with LLVMC</a></li>
<li><a class="reference internal" href="#predefined-options" id="id6">Predefined options</a></li>
<li><a class="reference internal" href="#compiling-llvmc-plugins" id="id7">Compiling LLVMC plugins</a></li>
<li><a class="reference internal" href="#customizing-llvmc-the-compilation-graph" id="id8">Customizing LLVMC: the compilation graph</a></li>
<li><a class="reference internal" href="#describing-options" id="id9">Describing options</a><ul>
<li><a class="reference internal" href="#external-options" id="id10">External options</a></li>
<li><a class="reference internal" href="#compiling-standalone-llvmc-based-drivers" id="id8">Compiling standalone LLVMC-based drivers</a></li>
<li><a class="reference internal" href="#customizing-llvmc-the-compilation-graph" id="id9">Customizing LLVMC: the compilation graph</a></li>
<li><a class="reference internal" href="#describing-options" id="id10">Describing options</a><ul>
<li><a class="reference internal" href="#external-options" id="id11">External options</a></li>
</ul>
</li>
<li><a class="reference internal" href="#conditional-evaluation" id="id11">Conditional evaluation</a></li>
<li><a class="reference internal" href="#writing-a-tool-description" id="id12">Writing a tool description</a><ul>
<li><a class="reference internal" href="#actions" id="id13">Actions</a></li>
<li><a class="reference internal" href="#conditional-evaluation" id="id12">Conditional evaluation</a></li>
<li><a class="reference internal" href="#writing-a-tool-description" id="id13">Writing a tool description</a><ul>
<li><a class="reference internal" href="#actions" id="id14">Actions</a></li>
</ul>
</li>
<li><a class="reference internal" href="#language-map" id="id14">Language map</a></li>
<li><a class="reference internal" href="#more-advanced-topics" id="id15">More advanced topics</a><ul>
<li><a class="reference internal" href="#hooks-and-environment-variables" id="id16">Hooks and environment variables</a></li>
<li><a class="reference internal" href="#how-plugins-are-loaded" id="id17">How plugins are loaded</a></li>
<li><a class="reference internal" href="#debugging" id="id18">Debugging</a></li>
<li><a class="reference internal" href="#language-map" id="id15">Language map</a></li>
<li><a class="reference internal" href="#more-advanced-topics" id="id16">More advanced topics</a><ul>
<li><a class="reference internal" href="#hooks-and-environment-variables" id="id17">Hooks and environment variables</a></li>
<li><a class="reference internal" href="#how-plugins-are-loaded" id="id18">How plugins are loaded</a></li>
<li><a class="reference internal" href="#debugging" id="id19">Debugging</a></li>
</ul>
</li>
</ul>
@ -56,7 +57,7 @@ abstract graph. The structure of this graph is completely determined
by plugins, which can be either statically or dynamically linked. This
makes it possible to easily adapt LLVMC for other purposes - for
example, as a build tool for game resources.</p>
<p>Because LLVMC employs <a class="reference external" href="http://llvm.cs.uiuc.edu/docs/TableGenFundamentals.html">TableGen</a> as its configuration language, you
<p>Because LLVMC employs <a class="reference external" href="http://llvm.org/docs/TableGenFundamentals.html">TableGen</a> as its configuration language, you
need to be familiar with it to customize LLVMC.</p>
</div>
<div class="section" id="compiling-with-llvmc">
@ -70,12 +71,12 @@ $ llvmc -O3 -Wall hello.cpp
$ ./a.out
hello
</pre>
<p>One nice feature of LLVMC is that one doesn't have to distinguish
between different compilers for different languages (think <tt class="docutils literal"><span class="pre">g++</span></tt> and
<tt class="docutils literal"><span class="pre">gcc</span></tt>) - the right toolchain is chosen automatically based on input
language names (which are, in turn, determined from file
extensions). If you want to force files ending with &quot;.c&quot; to compile as
C++, use the <tt class="docutils literal"><span class="pre">-x</span></tt> option, just like you would do it with <tt class="docutils literal"><span class="pre">gcc</span></tt>:</p>
<p>One nice feature of LLVMC is that one doesn't have to distinguish between
different compilers for different languages (think <tt class="docutils literal"><span class="pre">g++</span></tt> vs. <tt class="docutils literal"><span class="pre">gcc</span></tt>) - the
right toolchain is chosen automatically based on input language names (which
are, in turn, determined from file extensions). If you want to force files
ending with &quot;.c&quot; to compile as C++, use the <tt class="docutils literal"><span class="pre">-x</span></tt> option, just like you would
do it with <tt class="docutils literal"><span class="pre">gcc</span></tt>:</p>
<pre class="literal-block">
$ # hello.c is really a C++ file
$ llvmc -x c++ hello.c
@ -110,16 +111,17 @@ until the next -x option.</li>
<li><tt class="docutils literal"><span class="pre">-v</span></tt> - Enable verbose mode, i.e. print out all executed commands.</li>
<li><tt class="docutils literal"><span class="pre">--check-graph</span></tt> - Check the compilation for common errors like mismatched
output/input language names, multiple default edges and cycles. Because of
plugins, these checks can't be performed at compile-time. Exit with code zero if
no errors were found, and return the number of found errors otherwise. Hidden
option, useful for debugging LLVMC plugins.</li>
plugins, these checks can't be performed at compile-time. Exit with code zero
if no errors were found, and return the number of found errors
otherwise. Hidden option, useful for debugging LLVMC plugins.</li>
<li><tt class="docutils literal"><span class="pre">--view-graph</span></tt> - Show a graphical representation of the compilation graph
and exit. Requires that you have <tt class="docutils literal"><span class="pre">dot</span></tt> and <tt class="docutils literal"><span class="pre">gv</span></tt> programs installed. Hidden
option, useful for debugging LLVMC plugins.</li>
<li><tt class="docutils literal"><span class="pre">--write-graph</span></tt> - Write a <tt class="docutils literal"><span class="pre">compilation-graph.dot</span></tt> file in the current
directory with the compilation graph description in Graphviz format (identical
to the file used by the <tt class="docutils literal"><span class="pre">--view-graph</span></tt> option). The <tt class="docutils literal"><span class="pre">-o</span></tt> option can be used
to set the output file name. Hidden option, useful for debugging LLVMC plugins.</li>
to the file used by the <tt class="docutils literal"><span class="pre">--view-graph</span></tt> option). The <tt class="docutils literal"><span class="pre">-o</span></tt> option can be
used to set the output file name. Hidden option, useful for debugging LLVMC
plugins.</li>
<li><tt class="docutils literal"><span class="pre">--save-temps</span></tt> - Write temporary files to the current directory
and do not delete them on exit. Hidden option, useful for debugging.</li>
<li><tt class="docutils literal"><span class="pre">--help</span></tt>, <tt class="docutils literal"><span class="pre">--help-hidden</span></tt>, <tt class="docutils literal"><span class="pre">--version</span></tt> - These options have
@ -154,33 +156,58 @@ generic:</p>
<pre class="literal-block">
$ mv Simple.td MyPlugin.td
</pre>
<p>Note that the plugin source directory must be placed under
<tt class="docutils literal"><span class="pre">$LLVMC_DIR/plugins</span></tt> to make use of the existing build
infrastructure. To build a version of the LLVMC executable called
<tt class="docutils literal"><span class="pre">mydriver</span></tt> with your plugin compiled in, use the following command:</p>
<pre class="literal-block">
$ cd $LLVMC_DIR
$ make BUILTIN_PLUGINS=MyPlugin DRIVER_NAME=mydriver
</pre>
<p>To build your plugin as a dynamic library, just <tt class="docutils literal"><span class="pre">cd</span></tt> to its source
directory and run <tt class="docutils literal"><span class="pre">make</span></tt>. The resulting file will be called
<tt class="docutils literal"><span class="pre">LLVMC$(LLVMC_PLUGIN).$(DLL_EXTENSION)</span></tt> (in our case,
<tt class="docutils literal"><span class="pre">LLVMCMyPlugin.so</span></tt>). This library can be then loaded in with the
<tt class="docutils literal"><span class="pre">plugin_llvmc_$(LLVMC_PLUGIN).$(DLL_EXTENSION)</span></tt> (in our case,
<tt class="docutils literal"><span class="pre">plugin_llvmc_MyPlugin.so</span></tt>). This library can be then loaded in with the
<tt class="docutils literal"><span class="pre">-load</span></tt> option. Example:</p>
<pre class="literal-block">
$ cd $LLVMC_DIR/plugins/Simple
$ make
$ llvmc -load $LLVM_DIR/Release/lib/LLVMCSimple.so
$ llvmc -load $LLVM_DIR/Release/lib/plugin_llvmc_Simple.so
</pre>
</div>
<div class="section" id="compiling-standalone-llvmc-based-drivers">
<h1><a class="toc-backref" href="#id8">Compiling standalone LLVMC-based drivers</a></h1>
<p>By default, the <tt class="docutils literal"><span class="pre">llvmc</span></tt> executable consists of a driver core plus several
statically linked plugins (<tt class="docutils literal"><span class="pre">Base</span></tt> and <tt class="docutils literal"><span class="pre">Clang</span></tt> at the moment). You can
produce a standalone LLVMC-based driver executable by linking the core with your
own plugins. The recommended way to do this is by starting with the provided
<tt class="docutils literal"><span class="pre">Skeleton</span></tt> example (<tt class="docutils literal"><span class="pre">$LLVMC_DIR/example/Skeleton</span></tt>):</p>
<pre class="literal-block">
$ cd $LLVMC_DIR/example/
$ cp -r Skeleton mydriver
$ cd mydriver
$ vim Makefile
[...]
$ make
</pre>
<p>If you're compiling LLVM with different source and object directories, then you
must perform the following additional steps before running <tt class="docutils literal"><span class="pre">make</span></tt>:</p>
<pre class="literal-block">
# LLVMC_SRC_DIR = $LLVM_SRC_DIR/tools/llvmc/
# LLVMC_OBJ_DIR = $LLVM_OBJ_DIR/tools/llvmc/
$ cp $LLVMC_SRC_DIR/example/mydriver/Makefile \
$LLVMC_OBJ_DIR/example/mydriver/
$ cd $LLVMC_OBJ_DIR/example/mydriver
$ make
</pre>
<p>Another way to do the same thing is by using the following command:</p>
<pre class="literal-block">
$ cd $LLVMC_DIR
$ make LLVMC_BUILTIN_PLUGINS=MyPlugin LLVMC_BASED_DRIVER_NAME=mydriver
</pre>
<p>This works with both srcdir == objdir and srcdir != objdir, but assumes that the
plugin source directory was placed under <tt class="docutils literal"><span class="pre">$LLVMC_DIR/plugins</span></tt>.</p>
<p>Sometimes, you will want a 'bare-bones' version of LLVMC that has no
built-in plugins. It can be compiled with the following command:</p>
<pre class="literal-block">
$ cd $LLVMC_DIR
$ make BUILTIN_PLUGINS=&quot;&quot;
$ make LLVMC_BUILTIN_PLUGINS=&quot;&quot;
</pre>
</div>
<div class="section" id="customizing-llvmc-the-compilation-graph">
<h1><a class="toc-backref" href="#id8">Customizing LLVMC: the compilation graph</a></h1>
<h1><a class="toc-backref" href="#id9">Customizing LLVMC: the compilation graph</a></h1>
<p>Each TableGen configuration file should include the common
definitions:</p>
<pre class="literal-block">
@ -248,7 +275,7 @@ debugging), run <tt class="docutils literal"><span class="pre">llvmc</span> <spa
<tt class="docutils literal"><span class="pre">gsview</span></tt> installed for this to work properly.</p>
</div>
<div class="section" id="describing-options">
<h1><a class="toc-backref" href="#id9">Describing options</a></h1>
<h1><a class="toc-backref" href="#id10">Describing options</a></h1>
<p>Command-line options that the plugin supports are defined by using an
<tt class="docutils literal"><span class="pre">OptionList</span></tt>:</p>
<pre class="literal-block">
@ -317,7 +344,7 @@ the <tt class="docutils literal"><span class="pre">one_or_more</span></tt> and <
</li>
</ul>
<div class="section" id="external-options">
<h2><a class="toc-backref" href="#id10">External options</a></h2>
<h2><a class="toc-backref" href="#id11">External options</a></h2>
<p>Sometimes, when linking several plugins together, one plugin needs to
access options defined in some other plugin. Because of the way
options are implemented, such options must be marked as
@ -332,7 +359,7 @@ for. Example:</p>
</div>
</div>
<div class="section" id="conditional-evaluation">
<span id="case"></span><h1><a class="toc-backref" href="#id11">Conditional evaluation</a></h1>
<span id="case"></span><h1><a class="toc-backref" href="#id12">Conditional evaluation</a></h1>
<p>The 'case' construct is the main means by which programmability is
achieved in LLVMC. It can be used to calculate edge weights, program
actions and modify the shell commands to be executed. The 'case'
@ -412,7 +439,7 @@ one of its arguments returns true. Example: <tt class="docutils literal"><span c
</ul>
</div>
<div class="section" id="writing-a-tool-description">
<h1><a class="toc-backref" href="#id12">Writing a tool description</a></h1>
<h1><a class="toc-backref" href="#id13">Writing a tool description</a></h1>
<p>As was said earlier, nodes in the compilation graph represent tools,
which are described separately. A tool definition looks like this
(taken from the <tt class="docutils literal"><span class="pre">include/llvm/CompilerDriver/Tools.td</span></tt> file):</p>
@ -454,7 +481,7 @@ below).</li>
</li>
</ul>
<div class="section" id="actions">
<h2><a class="toc-backref" href="#id13">Actions</a></h2>
<h2><a class="toc-backref" href="#id14">Actions</a></h2>
<p>A tool often needs to react to command-line options, and this is
precisely what the <tt class="docutils literal"><span class="pre">actions</span></tt> property is for. The next example
illustrates this feature:</p>
@ -515,7 +542,7 @@ Example: <tt class="docutils literal"><span class="pre">(unpack_values</span> <s
</div>
</div>
<div class="section" id="language-map">
<h1><a class="toc-backref" href="#id14">Language map</a></h1>
<h1><a class="toc-backref" href="#id15">Language map</a></h1>
<p>If you are adding support for a new language to LLVMC, you'll need to
modify the language map, which defines mappings from file extensions
to language names. It is used to choose the proper toolchain(s) for a
@ -538,9 +565,9 @@ multiple output languages, for nodes &quot;inside&quot; the graph the input and
output languages should match. This is enforced at compile-time.</p>
</div>
<div class="section" id="more-advanced-topics">
<h1><a class="toc-backref" href="#id15">More advanced topics</a></h1>
<h1><a class="toc-backref" href="#id16">More advanced topics</a></h1>
<div class="section" id="hooks-and-environment-variables">
<span id="hooks"></span><h2><a class="toc-backref" href="#id16">Hooks and environment variables</a></h2>
<span id="hooks"></span><h2><a class="toc-backref" href="#id17">Hooks and environment variables</a></h2>
<p>Normally, LLVMC executes programs from the system <tt class="docutils literal"><span class="pre">PATH</span></tt>. Sometimes,
this is not sufficient: for example, we may want to specify tool paths
or names in the configuration file. This can be easily achieved via
@ -573,7 +600,7 @@ the <tt class="docutils literal"><span class="pre">case</span></tt> expression (
</pre>
</div>
<div class="section" id="how-plugins-are-loaded">
<span id="priorities"></span><h2><a class="toc-backref" href="#id17">How plugins are loaded</a></h2>
<span id="priorities"></span><h2><a class="toc-backref" href="#id18">How plugins are loaded</a></h2>
<p>It is possible for LLVMC plugins to depend on each other. For example,
one can create edges between nodes defined in some other plugin. To
make this work, however, that plugin should be loaded first. To
@ -589,7 +616,7 @@ with 0. Therefore, the plugin with the highest priority value will be
loaded last.</p>
</div>
<div class="section" id="debugging">
<h2><a class="toc-backref" href="#id18">Debugging</a></h2>
<h2><a class="toc-backref" href="#id19">Debugging</a></h2>
<p>When writing LLVMC plugins, it can be useful to get a visual view of
the resulting compilation graph. This can be achieved via the command
line option <tt class="docutils literal"><span class="pre">--view-graph</span></tt>. This command assumes that <a class="reference external" href="http://www.graphviz.org/">Graphviz</a> and
@ -615,7 +642,7 @@ errors as its status code.</p>
<a href="mailto:foldr@codedgers.com">Mikhail Glushenkov</a><br />
<a href="http://llvm.org">LLVM Compiler Infrastructure</a><br />
Last modified: $Date: 2009-05-06 03:41:47 +0200 (Wed, 06 May 2009) $
Last modified: $Date: 2009-06-17 02:56:48 +0000 (Wed, 17 Jun 2009) $
</address></div>
</div>
</div>

View File

@ -48,23 +48,28 @@ command-line LLVMC usage, refer to the <tt class="docutils literal"><span class=
</div>
<div class="section" id="using-llvmc-to-generate-toolchain-drivers">
<h1><a class="toc-backref" href="#id3">Using LLVMC to generate toolchain drivers</a></h1>
<p>LLVMC plugins are written mostly using <a class="reference external" href="http://llvm.cs.uiuc.edu/docs/TableGenFundamentals.html">TableGen</a>, so you need to
<p>LLVMC plugins are written mostly using <a class="reference external" href="http://llvm.org/docs/TableGenFundamentals.html">TableGen</a>, so you need to
be familiar with it to get anything done.</p>
<p>Start by compiling <tt class="docutils literal"><span class="pre">plugins/Simple/Simple.td</span></tt>, which is a primitive
wrapper for <tt class="docutils literal"><span class="pre">gcc</span></tt>:</p>
<p>Start by compiling <tt class="docutils literal"><span class="pre">example/Simple</span></tt>, which is a primitive wrapper for
<tt class="docutils literal"><span class="pre">gcc</span></tt>:</p>
<pre class="literal-block">
$ cd $LLVM_DIR/tools/llvmc
$ make DRIVER_NAME=mygcc BUILTIN_PLUGINS=Simple
$ cp -r example/Simple plugins/Simple
# NB: A less verbose way to compile standalone LLVMC-based drivers is
# described in the reference manual.
$ make LLVMC_BASED_DRIVER_NAME=mygcc LLVMC_BUILTIN_PLUGINS=Simple
$ cat &gt; hello.c
[...]
$ mygcc hello.c
$ ./hello.out
Hello
</pre>
<p>Here we link our plugin with the LLVMC core statically to form an
executable file called <tt class="docutils literal"><span class="pre">mygcc</span></tt>. It is also possible to build our
plugin as a standalone dynamic library; this is described in the
reference manual.</p>
<p>Here we link our plugin with the LLVMC core statically to form an executable
file called <tt class="docutils literal"><span class="pre">mygcc</span></tt>. It is also possible to build our plugin as a dynamic
library to be loaded by the <tt class="docutils literal"><span class="pre">llvmc</span></tt> executable (or any other LLVMC-based
standalone driver); this is described in the reference manual.</p>
<p>Contents of the file <tt class="docutils literal"><span class="pre">Simple.td</span></tt> look like this:</p>
<pre class="literal-block">
// Include common definitions

View File

@ -108,7 +108,7 @@
<li>Patches should be made with this command:
<div class="doc_code">
<pre>
svn diff -x -u
svn diff
</pre>
</div>
or with the utility <tt>utils/mkpatch</tt>, which makes it easy to read
@ -592,7 +592,7 @@ Changes</a></div>
Written by the
<a href="mailto:llvm-oversight@cs.uiuc.edu">LLVM Oversight Group</a><br>
<a href="http://llvm.org">The LLVM Compiler Infrastructure</a><br>
Last modified: $Date: 2009-04-05 14:38:44 +0200 (Sun, 05 Apr 2009) $
Last modified: $Date: 2009-06-15 04:18:54 +0000 (Mon, 15 Jun 2009) $
</address>
</body>
</html>

View File

@ -1106,8 +1106,9 @@ an <tt>ssp</tt> attribute, then the resulting function will have
an <tt>sspreq</tt> attribute.</dd>
<dt><tt>noredzone</tt></dt>
<dd>This attribute indicates that the code generator should not enforce red zone
mandated by target specific ABI.</dd>
<dd>This attribute indicates that the code generator should not use a
red zone, even if the target-specific ABI normally permits it.
</dd>
<dt><tt>noimplicitfloat</tt></dt>
<dd>This attributes disables implicit floating point instructions.</dd>
@ -3531,9 +3532,10 @@ address space (address space zero).</p>
bytes of memory from the operating system and returns a pointer of the
appropriate type to the program. If "NumElements" is specified, it is the
number of elements allocated, otherwise "NumElements" is defaulted to be one.
If a constant alignment is specified, the value result of the allocation is guaranteed to
be aligned to at least that boundary. If not specified, or if zero, the target can
choose to align the allocation on any convenient boundary.</p>
If a constant alignment is specified, the value result of the allocation is
guaranteed to be aligned to at least that boundary. If not specified, or if
zero, the target can choose to align the allocation on any convenient boundary
compatible with the type.</p>
<p>'<tt>type</tt>' must be a sized type.</p>
@ -3624,9 +3626,10 @@ space (address space zero).</p>
bytes of memory on the runtime stack, returning a pointer of the
appropriate type to the program. If "NumElements" is specified, it is the
number of elements allocated, otherwise "NumElements" is defaulted to be one.
If a constant alignment is specified, the value result of the allocation is guaranteed
to be aligned to at least that boundary. If not specified, or if zero, the target
can choose to align the allocation on any convenient boundary.</p>
If a constant alignment is specified, the value result of the allocation is
guaranteed to be aligned to at least that boundary. If not specified, or if
zero, the target can choose to align the allocation on any convenient boundary
compatible with the type.</p>
<p>'<tt>type</tt>' may be any sized type.</p>
@ -6428,9 +6431,6 @@ on any integer bit width.</p>
<h5>Overview:</h5>
<p><i><b>Warning:</b> '<tt>llvm.umul.with.overflow</tt>' is badly broken. It is
actively being fixed, but it should not currently be used!</i></p>
<p>The '<tt>llvm.umul.with.overflow</tt>' family of intrinsic functions perform
a unsigned multiplication of the two arguments, and indicate whether an overflow
occurred during the unsigned multiplication.</p>
@ -7221,7 +7221,7 @@ declare void @llvm.stackprotector( i8* &lt;guard&gt;, i8** &lt;slot&gt; )
<a href="mailto:sabre@nondot.org">Chris Lattner</a><br>
<a href="http://llvm.org">The LLVM Compiler Infrastructure</a><br>
Last modified: $Date: 2009-06-12 21:45:19 +0200 (Fri, 12 Jun 2009) $
Last modified: $Date: 2009-06-20 13:26:06 +0000 (Sat, 20 Jun 2009) $
</address>
</body>

View File

@ -232,17 +232,16 @@
Normally, the makefile system will build all the software into a single
<tt>libname.o</tt> (pre-linked) object. This means the library is not
searchable and that the distinction between compilation units has been
dissolved. Optionally, you can ask for a shared library (.so), archive library
(.a) or to not have the default (relinked) library built. For example:</p>
dissolved. Optionally, you can ask for a shared library (.so) or archive
library (.a) built. Archive libraries are the default. For example:</p>
<pre><tt>
LIBRARYNAME = mylib
SHARED_LIBRARY = 1
ARCHIVE_LIBRARY = 1
DONT_BUILD_RELINKED = 1
</tt></pre>
<p>says to build a library named "mylib" with both a shared library
(<tt>mylib.so</tt>) and an archive library (<tt>mylib.a</tt>) version but
not to build the relinked object (<tt>mylib.o</tt>). The contents of all the
(<tt>mylib.so</tt>) and an archive library (<tt>mylib.a</tt>) version. The
contents of all the
libraries produced will be the same, they are just constructed differently.
Note that you normally do not need to specify the sources involved. The LLVM
Makefile system will infer the source files from the contents of the source
@ -307,8 +306,6 @@
on.</li>
<li>The <a href="#LINK_LIBS_IN_SHARED">LINK_LIBS_IN_SHARED</a> variable
is turned on.</li>
<li>The <a href="#DONT_BUILD_RELINKED">DONT_BUILD_RELINKED</a> variable
is turned on.</li>
</ol>
<p>A loadable module is loaded by LLVM via the facilities of libtool's libltdl
library which is part of <tt>lib/System</tt> implementation.</p>
@ -637,11 +634,6 @@
<dd>If set to any value, causes the makefiles to <b>not</b> automatically
generate dependencies when running the compiler. Use of this feature is
discouraged and it may be removed at a later date.</dd>
<dt><a name="DONT_BUILD_RELINKED"><tt>DONT_BUILD_RELINKED</tt></a></dt>
<dd>If set to any value, causes a relinked library (.o) not to be built. By
default, libraries are built as re-linked since most LLVM libraries are
needed in their entirety and re-linked libraries will be linked more quickly
than equivalent archive libraries.</dd>
<dt><a name="ENABLE_OPTIMIZED"><tt>ENABLE_OPTIMIZED</tt></a></dt>
<dd>If set to any value, causes the build to generate optimized objects,
libraries and executables. This alters the flags specified to the compilers
@ -960,7 +952,6 @@
DestArchiveLib
DestBitcodeLib
DestModule
DestRelinkedLib
DestSharedLib
DestTool
DistAlways
@ -1004,7 +995,6 @@
ProjUsedLibs
Ranlib
RecursiveTargets
Relink
SrcMakefiles
Strip
StripWarnMsg
@ -1026,7 +1016,7 @@
<a href="mailto:rspencer@x10sys.com">Reid Spencer</a><br>
<a href="http://llvm.org">The LLVM Compiler Infrastructure</a><br>
Last modified: $Date: 2009-04-26 00:08:52 +0200 (Sun, 26 Apr 2009) $
Last modified: $Date: 2009-06-16 23:00:42 +0000 (Tue, 16 Jun 2009) $
</address>
</body>
</html>

View File

@ -2,6 +2,7 @@
"http://www.w3.org/TR/html4/strict.dtd">
<html>
<head>
<meta http-equiv="Content-type" content="text/html;charset=UTF-8">
<title>LLVM Programmer's Manual</title>
<link rel="stylesheet" href="llvm.css" type="text/css">
</head>
@ -129,6 +130,15 @@ with another <tt>Value</tt></a> </li>
</ul>
</li>
<li><a href="#threading">Threads and LLVM</a>
<ul>
<li><a href="#startmultithreaded">Entering and Exiting Multithreaded Mode
</a></li>
<li><a href="#shutdown">Ending execution with <tt>llvm_shutdown()</tt></a></li>
<li><a href="#managedstatic">Lazy initialization with <tt>ManagedStatic</tt></a></li>
</ul>
</li>
<li><a href="#advanced">Advanced Topics</a>
<ul>
<li><a href="#TypeResolve">LLVM Type Resolution</a>
@ -176,8 +186,9 @@ with another <tt>Value</tt></a> </li>
<p>Written by <a href="mailto:sabre@nondot.org">Chris Lattner</a>,
<a href="mailto:dhurjati@cs.uiuc.edu">Dinakar Dhurjati</a>,
<a href="mailto:ggreif@gmail.com">Gabor Greif</a>,
<a href="mailto:jstanley@cs.uiuc.edu">Joel Stanley</a> and
<a href="mailto:rspencer@x10sys.com">Reid Spencer</a></p>
<a href="mailto:jstanley@cs.uiuc.edu">Joel Stanley</a>,
<a href="mailto:rspencer@x10sys.com">Reid Spencer</a> and
<a href="mailto:owen@apple.com">Owen Anderson</a></p>
</div>
<!-- *********************************************************************** -->
@ -2118,7 +2129,7 @@ FunctionType *ft = TypeBuilder&lt;types::i&lt;8&gt;(types::i&lt;32&gt;*), true&g
<div class="doc_code">
<pre>
std::vector<const Type*> params;
std::vector&lt;const Type*&gt; params;
params.push_back(PointerType::getUnqual(Type::Int32Ty));
FunctionType *ft = FunctionType::get(Type::Int8Ty, params, false);
</pre>
@ -2129,6 +2140,123 @@ comment</a> for more details.</p>
</div>
<!-- *********************************************************************** -->
<div class="doc_section">
<a name="threading">Threads and LLVM</a>
</div>
<!-- *********************************************************************** -->
<div class="doc_text">
<p>
This section describes the interaction of the LLVM APIs with multithreading,
both on the part of client applications, and in the JIT, in the hosted
application.
</p>
<p>
Note that LLVM's support for multithreading is still relatively young. Up
through version 2.5, the execution of threaded hosted applications was
supported, but not threaded client access to the APIs. While this use case is
now supported, clients <em>must</em> adhere to the guidelines specified below to
ensure proper operation in multithreaded mode.
</p>
<p>
Note that, on Unix-like platforms, LLVM requires the presence of GCC's atomic
intrinsics in order to support threaded operation. If you need a
multhreading-capable LLVM on a platform without a suitably modern system
compiler, consider compiling LLVM and LLVM-GCC in single-threaded mode, and
using the resultant compiler to build a copy of LLVM with multithreading
support.
</p>
</div>
<!-- ======================================================================= -->
<div class="doc_subsection">
<a name="startmultithreaded">Entering and Exiting Multithreaded Mode</a>
</div>
<div class="doc_text">
<p>
In order to properly protect its internal data structures while avoiding
excessive locking overhead in the single-threaded case, the LLVM must intialize
certain data structures necessary to provide guards around its internals. To do
so, the client program must invoke <tt>llvm_start_multithreaded()</tt> before
making any concurrent LLVM API calls. To subsequently tear down these
structures, use the <tt>llvm_stop_multithreaded()</tt> call. You can also use
the <tt>llvm_is_multithreaded()</tt> call to check the status of multithreaded
mode.
</p>
<p>
Note that both of these calls must be made <em>in isolation</em>. That is to
say that no other LLVM API calls may be executing at any time during the
execution of <tt>llvm_start_multithreaded()</tt> or <tt>llvm_stop_multithreaded
</tt>. It's is the client's responsibility to enforce this isolation.
</p>
<p>
The return value of <tt>llvm_start_multithreaded()</tt> indicates the success or
failure of the initialization. Failure typically indicates that your copy of
LLVM was built without multithreading support, typically because GCC atomic
intrinsics were not found in your system compiler. In this case, the LLVM API
will not be safe for concurrent calls. However, it <em>will</em> be safe for
hosting threaded applications in the JIT, though care must be taken to ensure
that side exits and the like do not accidentally result in concurrent LLVM API
calls.
</p>
</div>
<!-- ======================================================================= -->
<div class="doc_subsection">
<a name="shutdown">Ending Execution with <tt>llvm_shutdown()</tt></a>
</div>
<div class="doc_text">
<p>
When you are done using the LLVM APIs, you should call <tt>llvm_shutdown()</tt>
to deallocate memory used for internal structures. This will also invoke
<tt>llvm_stop_multithreaded()</tt> if LLVM is operating in multithreaded mode.
As such, <tt>llvm_shutdown()</tt> requires the same isolation guarantees as
<tt>llvm_stop_multithreaded()</tt>.
</p>
<p>
Note that, if you use scope-based shutdown, you can use the
<tt>llvm_shutdown_obj</tt> class, which calls <tt>llvm_shutdown()</tt> in its
destructor.
</div>
<!-- ======================================================================= -->
<div class="doc_subsection">
<a name="managedstatic">Lazy Initialization with <tt>ManagedStatic</tt></a>
</div>
<div class="doc_text">
<p>
<tt>ManagedStatic</tt> is a utility class in LLVM used to implement static
initialization of static resources, such as the global type tables. Before the
invocation of <tt>llvm_shutdown()</tt>, it implements a simple lazy
initialization scheme. Once <tt>llvm_start_multithreaded()</tt> returns,
however, it uses double-checked locking to implement thread-safe lazy
initialization.
</p>
<p>
Note that, because no other threads are allowed to issue LLVM API calls before
<tt>llvm_start_multithreaded()</tt> returns, it is possible to have
<tt>ManagedStatic</tt>s of <tt>llvm::sys::Mutex</tt>s.
</p>
<p>
The <tt>llvm_acquire_global_lock()</tt> and <tt>llvm_release_global_lock</tt>
APIs provide access to the global lock used to implement the double-checked
locking for lazy initialization. These should only be used internally to LLVM,
and only if you know what you're doing!
</p>
</div>
<!-- *********************************************************************** -->
<div class="doc_section">
<a name="advanced">Advanced Topics</a>
@ -3430,7 +3558,7 @@ never change at runtime).</p>
<p><tt>#include "<a
href="/doxygen/BasicBlock_8h-source.html">llvm/BasicBlock.h</a>"</tt><br>
doxygen info: <a href="/doxygen/structllvm_1_1BasicBlock.html">BasicBlock
doxygen info: <a href="/doxygen/classllvm_1_1BasicBlock.html">BasicBlock
Class</a><br>
Superclass: <a href="#Value"><tt>Value</tt></a></p>
@ -3536,7 +3664,7 @@ arguments. An argument has a pointer to the parent Function.</p>
<a href="mailto:dhurjati@cs.uiuc.edu">Dinakar Dhurjati</a> and
<a href="mailto:sabre@nondot.org">Chris Lattner</a><br>
<a href="http://llvm.org">The LLVM Compiler Infrastructure</a><br>
Last modified: $Date: 2009-05-01 22:40:51 +0200 (Fri, 01 May 2009) $
Last modified: $Date: 2009-06-17 21:12:26 +0000 (Wed, 17 Jun 2009) $
</address>
</body>

View File

@ -1537,7 +1537,7 @@ need some way to free analysis results when they are no longer useful. The
<p>If you are writing an analysis or any other pass that retains a significant
amount of state (for use by another pass which "requires" your pass and uses the
<a href="#getAnalysis">getAnalysis</a> method) you should implement
<tt>releaseMEmory</tt> to, well, release the memory allocated to maintain this
<tt>releaseMemory</tt> to, well, release the memory allocated to maintain this
internal state. This method is called after the <tt>run*</tt> method for the
class, before the next call of <tt>run*</tt> in your pass.</p>
@ -1821,7 +1821,7 @@ Despite that, we have kept the LLVM passes SMP ready, and you should too.</p>
<a href="mailto:sabre@nondot.org">Chris Lattner</a><br>
<a href="http://llvm.org">The LLVM Compiler Infrastructure</a><br>
Last modified: $Date: 2009-02-18 06:09:16 +0100 (Wed, 18 Feb 2009) $
Last modified: $Date: 2009-06-15 18:22:49 +0000 (Mon, 15 Jun 2009) $
</address>
</body>

View File

@ -34,6 +34,7 @@
#include "llvm/ExecutionEngine/JIT.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/ManagedStatic.h"
#include "llvm/Target/TargetSelect.h"
#include <fstream>
#include <iostream>
using namespace llvm;
@ -135,6 +136,8 @@ int main(int argc, char **argv) {
//Write it out
if (JIT) {
InitializeNativeTarget();
std::cout << "------- Running JIT -------\n";
ExistingModuleProvider *mp = new ExistingModuleProvider(mod);
ExecutionEngine *ee = ExecutionEngine::create(mp, false);

View File

@ -42,11 +42,15 @@
#include "llvm/ExecutionEngine/JIT.h"
#include "llvm/ExecutionEngine/Interpreter.h"
#include "llvm/ExecutionEngine/GenericValue.h"
#include "llvm/Target/TargetSelect.h"
#include "llvm/Support/ManagedStatic.h"
#include "llvm/Support/raw_ostream.h"
using namespace llvm;
int main() {
InitializeNativeTarget();
// Create some module to put our function into it.
Module *M = new Module("test");

View File

@ -5,6 +5,7 @@
#include "llvm/PassManager.h"
#include "llvm/Analysis/Verifier.h"
#include "llvm/Target/TargetData.h"
#include "llvm/Target/TargetSelect.h"
#include "llvm/Transforms/Scalar.h"
#include "llvm/Support/IRBuilder.h"
#include <cstdio>
@ -1081,6 +1082,8 @@ double printd(double X) {
//===----------------------------------------------------------------------===//
int main() {
InitializeNativeTarget();
// Install standard binary operators.
// 1 is lowest precedence.
BinopPrecedence['='] = 2;

View File

@ -26,6 +26,7 @@
#include "llvm/ExecutionEngine/JIT.h"
#include "llvm/ExecutionEngine/Interpreter.h"
#include "llvm/ExecutionEngine/GenericValue.h"
#include "llvm/Target/TargetSelect.h"
#include <iostream>
using namespace llvm;
@ -229,8 +230,9 @@ void* callFunc( void* param )
return (void*)(intptr_t)gv.IntVal.getZExtValue();
}
int main()
{
int main() {
InitializeNativeTarget();
// Create some module to put our function into it.
Module *M = new Module("test");

View File

@ -18,6 +18,7 @@
#include "llvm/ADT/GraphTraits.h"
#include "llvm/ADT/iterator.h"
#include "llvm/ADT/SmallPtrSet.h"
#include <set>
#include <stack>
#include <vector>
@ -39,9 +40,9 @@ public:
};
template<class GraphT,
class SetType = std::set<typename GraphTraits<GraphT>::NodeType*>,
bool ExtStorage = false,
class GT = GraphTraits<GraphT> >
class SetType = llvm::SmallPtrSet<typename GraphTraits<GraphT>::NodeType*, 8>,
bool ExtStorage = false,
class GT = GraphTraits<GraphT> >
class po_iterator : public forward_iterator<typename GT::NodeType, ptrdiff_t>,
public po_iterator_storage<SetType, ExtStorage> {
typedef forward_iterator<typename GT::NodeType, ptrdiff_t> super;

View File

@ -49,6 +49,7 @@ public:
enum OSType {
UnknownOS,
AuroraUX,
Darwin,
DragonFly,
FreeBSD,

View File

@ -35,9 +35,9 @@ class IVStrideUse : public CallbackVH, public ilist_node<IVStrideUse> {
public:
IVStrideUse(IVUsersOfOneStride *parent,
const SCEVHandle &offset,
Instruction* U, Value *O, bool issigned)
Instruction* U, Value *O)
: CallbackVH(U), Parent(parent), Offset(offset),
OperandValToReplace(O), IsSigned(issigned),
OperandValToReplace(O),
IsUseOfPostIncrementedValue(false) {
}
@ -57,8 +57,7 @@ public:
/// getOffset - Return the offset to add to a theoeretical induction
/// variable that starts at zero and counts up by the stride to compute
/// the value for the use. This always has the same type as the stride,
/// which may need to be casted to match the type of the use.
/// the value for the use. This always has the same type as the stride.
SCEVHandle getOffset() const { return Offset; }
/// setOffset - Assign a new offset to this use.
@ -78,13 +77,6 @@ public:
OperandValToReplace = Op;
}
/// isSigned - The stride (and thus also the Offset) of this use may be in
/// a narrower type than the use itself (OperandValToReplace->getType()).
/// When this is the case, isSigned() indicates whether the IV expression
/// should be signed-extended instead of zero-extended to fit the type of
/// the use.
bool isSigned() const { return IsSigned; }
/// isUseOfPostIncrementedValue - True if this should use the
/// post-incremented version of this IV, not the preincremented version.
/// This can only be set in special cases, such as the terminating setcc
@ -110,10 +102,6 @@ private:
/// that this IVStrideUse is representing.
WeakVH OperandValToReplace;
/// IsSigned - Determines whether the replacement value is sign or
/// zero extended to the type of the use.
bool IsSigned;
/// IsUseOfPostIncrementedValue - True if this should use the
/// post-incremented version of this IV, not the preincremented version.
bool IsUseOfPostIncrementedValue;
@ -170,9 +158,8 @@ public:
/// initial value and the operand that uses the IV.
ilist<IVStrideUse> Users;
void addUser(const SCEVHandle &Offset,Instruction *User, Value *Operand,
bool isSigned) {
Users.push_back(new IVStrideUse(this, Offset, User, Operand, isSigned));
void addUser(const SCEVHandle &Offset, Instruction *User, Value *Operand) {
Users.push_back(new IVStrideUse(this, Offset, User, Operand));
}
};

View File

@ -25,6 +25,7 @@
#include "llvm/Analysis/LoopInfo.h"
#include "llvm/Support/DataTypes.h"
#include "llvm/Support/ValueHandle.h"
#include "llvm/ADT/DenseMap.h"
#include <iosfwd>
namespace llvm {
@ -34,6 +35,7 @@ namespace llvm {
class SCEVHandle;
class ScalarEvolution;
class TargetData;
template<> struct DenseMapInfo<SCEVHandle>;
/// SCEV - This class represents an analyzed expression in the program. These
/// are reference-counted opaque objects that the client is not allowed to
@ -44,18 +46,22 @@ namespace llvm {
mutable unsigned RefCount;
friend class SCEVHandle;
friend class DenseMapInfo<SCEVHandle>;
void addRef() const { ++RefCount; }
void dropRef() const {
if (--RefCount == 0)
delete this;
}
const ScalarEvolution* parent;
SCEV(const SCEV &); // DO NOT IMPLEMENT
void operator=(const SCEV &); // DO NOT IMPLEMENT
protected:
virtual ~SCEV();
public:
explicit SCEV(unsigned SCEVTy) : SCEVType(SCEVTy), RefCount(0) {}
explicit SCEV(unsigned SCEVTy, const ScalarEvolution* p) :
SCEVType(SCEVTy), RefCount(0), parent(p) {}
unsigned getSCEVType() const { return SCEVType; }
@ -123,7 +129,7 @@ namespace llvm {
/// None of the standard SCEV operations are valid on this class, it is just a
/// marker.
struct SCEVCouldNotCompute : public SCEV {
SCEVCouldNotCompute();
SCEVCouldNotCompute(const ScalarEvolution* p);
~SCEVCouldNotCompute();
// None of these methods are valid for this object.
@ -197,6 +203,31 @@ namespace llvm {
template<> struct simplify_type<SCEVHandle>
: public simplify_type<const SCEVHandle> {};
// Specialize DenseMapInfo for SCEVHandle so that SCEVHandle may be used
// as a key in DenseMaps.
template<>
struct DenseMapInfo<SCEVHandle> {
static inline SCEVHandle getEmptyKey() {
static SCEVCouldNotCompute Empty(0);
if (Empty.RefCount == 0)
Empty.addRef();
return &Empty;
}
static inline SCEVHandle getTombstoneKey() {
static SCEVCouldNotCompute Tombstone(0);
if (Tombstone.RefCount == 0)
Tombstone.addRef();
return &Tombstone;
}
static unsigned getHashValue(const SCEVHandle &Val) {
return DenseMapInfo<const SCEV *>::getHashValue(Val);
}
static bool isEqual(const SCEVHandle &LHS, const SCEVHandle &RHS) {
return LHS == RHS;
}
static bool isPod() { return false; }
};
/// ScalarEvolution - This class is the main scalar evolution driver. Because
/// client code (intentionally) can't do much with the SCEV objects directly,
/// they must ask this class for services.
@ -301,6 +332,13 @@ namespace llvm {
const SCEVHandle &SymName,
const SCEVHandle &NewVal);
/// getBECount - Subtract the end and start values and divide by the step,
/// rounding up, to get the number of times the backedge is executed. Return
/// CouldNotCompute if an intermediate computation overflows.
SCEVHandle getBECount(const SCEVHandle &Start,
const SCEVHandle &End,
const SCEVHandle &Step);
/// getBackedgeTakenInfo - Return the BackedgeTakenInfo for the given
/// loop, lazily computing new values if the loop hasn't been analyzed
/// yet.
@ -310,6 +348,31 @@ namespace llvm {
/// loop will iterate.
BackedgeTakenInfo ComputeBackedgeTakenCount(const Loop *L);
/// ComputeBackedgeTakenCountFromExit - Compute the number of times the
/// backedge of the specified loop will execute if it exits via the
/// specified block.
BackedgeTakenInfo ComputeBackedgeTakenCountFromExit(const Loop *L,
BasicBlock *ExitingBlock);
/// ComputeBackedgeTakenCountFromExitCond - Compute the number of times the
/// backedge of the specified loop will execute if its exit condition
/// were a conditional branch of ExitCond, TBB, and FBB.
BackedgeTakenInfo
ComputeBackedgeTakenCountFromExitCond(const Loop *L,
Value *ExitCond,
BasicBlock *TBB,
BasicBlock *FBB);
/// ComputeBackedgeTakenCountFromExitCondICmp - Compute the number of
/// times the backedge of the specified loop will execute if its exit
/// condition were a conditional branch of the ICmpInst ExitCond, TBB,
/// and FBB.
BackedgeTakenInfo
ComputeBackedgeTakenCountFromExitCondICmp(const Loop *L,
ICmpInst *ExitCond,
BasicBlock *TBB,
BasicBlock *FBB);
/// ComputeLoadConstantCompareBackedgeTakenCount - Given an exit condition
/// of 'icmp op load X, cst', try to see if we can compute the trip count.
SCEVHandle
@ -390,28 +453,29 @@ namespace llvm {
SCEVHandle getConstant(ConstantInt *V);
SCEVHandle getConstant(const APInt& Val);
SCEVHandle getConstant(const Type *Ty, uint64_t V, bool isSigned = false);
SCEVHandle getTruncateExpr(const SCEVHandle &Op, const Type *Ty);
SCEVHandle getZeroExtendExpr(const SCEVHandle &Op, const Type *Ty);
SCEVHandle getSignExtendExpr(const SCEVHandle &Op, const Type *Ty);
SCEVHandle getAnyExtendExpr(const SCEVHandle &Op, const Type *Ty);
SCEVHandle getAddExpr(std::vector<SCEVHandle> &Ops);
SCEVHandle getAddExpr(SmallVectorImpl<SCEVHandle> &Ops);
SCEVHandle getAddExpr(const SCEVHandle &LHS, const SCEVHandle &RHS) {
std::vector<SCEVHandle> Ops;
SmallVector<SCEVHandle, 2> Ops;
Ops.push_back(LHS);
Ops.push_back(RHS);
return getAddExpr(Ops);
}
SCEVHandle getAddExpr(const SCEVHandle &Op0, const SCEVHandle &Op1,
const SCEVHandle &Op2) {
std::vector<SCEVHandle> Ops;
SmallVector<SCEVHandle, 3> Ops;
Ops.push_back(Op0);
Ops.push_back(Op1);
Ops.push_back(Op2);
return getAddExpr(Ops);
}
SCEVHandle getMulExpr(std::vector<SCEVHandle> &Ops);
SCEVHandle getMulExpr(SmallVectorImpl<SCEVHandle> &Ops);
SCEVHandle getMulExpr(const SCEVHandle &LHS, const SCEVHandle &RHS) {
std::vector<SCEVHandle> Ops;
SmallVector<SCEVHandle, 2> Ops;
Ops.push_back(LHS);
Ops.push_back(RHS);
return getMulExpr(Ops);
@ -419,17 +483,19 @@ namespace llvm {
SCEVHandle getUDivExpr(const SCEVHandle &LHS, const SCEVHandle &RHS);
SCEVHandle getAddRecExpr(const SCEVHandle &Start, const SCEVHandle &Step,
const Loop *L);
SCEVHandle getAddRecExpr(std::vector<SCEVHandle> &Operands,
SCEVHandle getAddRecExpr(SmallVectorImpl<SCEVHandle> &Operands,
const Loop *L);
SCEVHandle getAddRecExpr(const std::vector<SCEVHandle> &Operands,
SCEVHandle getAddRecExpr(const SmallVectorImpl<SCEVHandle> &Operands,
const Loop *L) {
std::vector<SCEVHandle> NewOp(Operands);
SmallVector<SCEVHandle, 4> NewOp(Operands.begin(), Operands.end());
return getAddRecExpr(NewOp, L);
}
SCEVHandle getSMaxExpr(const SCEVHandle &LHS, const SCEVHandle &RHS);
SCEVHandle getSMaxExpr(std::vector<SCEVHandle> Operands);
SCEVHandle getSMaxExpr(SmallVectorImpl<SCEVHandle> &Operands);
SCEVHandle getUMaxExpr(const SCEVHandle &LHS, const SCEVHandle &RHS);
SCEVHandle getUMaxExpr(std::vector<SCEVHandle> Operands);
SCEVHandle getUMaxExpr(SmallVectorImpl<SCEVHandle> &Operands);
SCEVHandle getSMinExpr(const SCEVHandle &LHS, const SCEVHandle &RHS);
SCEVHandle getUMinExpr(const SCEVHandle &LHS, const SCEVHandle &RHS);
SCEVHandle getUnknown(Value *V);
SCEVHandle getCouldNotCompute();
@ -481,6 +547,12 @@ namespace llvm {
/// specified signed integer value and return a SCEV for the constant.
SCEVHandle getIntegerSCEV(int Val, const Type *Ty);
/// getUMaxFromMismatchedTypes - Promote the operands to the wider of
/// the types using zero-extension, and then perform a umax operation
/// with them.
SCEVHandle getUMaxFromMismatchedTypes(const SCEVHandle &LHS,
const SCEVHandle &RHS);
/// hasSCEV - Return true if the SCEV for this value has already been
/// computed.
bool hasSCEV(Value *V) const;
@ -539,6 +611,20 @@ namespace llvm {
/// is deleted.
void forgetLoopBackedgeTakenCount(const Loop *L);
/// GetMinTrailingZeros - Determine the minimum number of zero bits that S is
/// guaranteed to end in (at every loop iteration). It is, at the same time,
/// the minimum number of times S is divisible by 2. For example, given {4,+,8}
/// it returns 2. If S is guaranteed to be 0, it returns the bitwidth of S.
uint32_t GetMinTrailingZeros(const SCEVHandle &S);
/// GetMinLeadingZeros - Determine the minimum number of zero bits that S is
/// guaranteed to begin with (at every loop iteration).
uint32_t GetMinLeadingZeros(const SCEVHandle &S);
/// GetMinSignBits - Determine the minimum number of sign bits that S is
/// guaranteed to begin with.
uint32_t GetMinSignBits(const SCEVHandle &S);
virtual bool runOnFunction(Function &F);
virtual void releaseMemory();
virtual void getAnalysisUsage(AnalysisUsage &AU) const;

View File

@ -36,7 +36,8 @@ namespace llvm {
friend class ScalarEvolution;
ConstantInt *V;
explicit SCEVConstant(ConstantInt *v) : SCEV(scConstant), V(v) {}
explicit SCEVConstant(ConstantInt *v, const ScalarEvolution* p) :
SCEV(scConstant, p), V(v) {}
virtual ~SCEVConstant();
public:
@ -79,7 +80,8 @@ namespace llvm {
SCEVHandle Op;
const Type *Ty;
SCEVCastExpr(unsigned SCEVTy, const SCEVHandle &op, const Type *ty);
SCEVCastExpr(unsigned SCEVTy, const SCEVHandle &op, const Type *ty,
const ScalarEvolution* p);
virtual ~SCEVCastExpr();
public:
@ -112,7 +114,8 @@ namespace llvm {
class SCEVTruncateExpr : public SCEVCastExpr {
friend class ScalarEvolution;
SCEVTruncateExpr(const SCEVHandle &op, const Type *ty);
SCEVTruncateExpr(const SCEVHandle &op, const Type *ty,
const ScalarEvolution* p);
virtual ~SCEVTruncateExpr();
public:
@ -141,7 +144,8 @@ namespace llvm {
class SCEVZeroExtendExpr : public SCEVCastExpr {
friend class ScalarEvolution;
SCEVZeroExtendExpr(const SCEVHandle &op, const Type *ty);
SCEVZeroExtendExpr(const SCEVHandle &op, const Type *ty,
const ScalarEvolution* p);
virtual ~SCEVZeroExtendExpr();
public:
@ -170,7 +174,8 @@ namespace llvm {
class SCEVSignExtendExpr : public SCEVCastExpr {
friend class ScalarEvolution;
SCEVSignExtendExpr(const SCEVHandle &op, const Type *ty);
SCEVSignExtendExpr(const SCEVHandle &op, const Type *ty,
const ScalarEvolution* p);
virtual ~SCEVSignExtendExpr();
public:
@ -199,10 +204,11 @@ namespace llvm {
///
class SCEVNAryExpr : public SCEV {
protected:
std::vector<SCEVHandle> Operands;
SmallVector<SCEVHandle, 8> Operands;
SCEVNAryExpr(enum SCEVTypes T, const std::vector<SCEVHandle> &ops)
: SCEV(T), Operands(ops) {}
SCEVNAryExpr(enum SCEVTypes T, const SmallVectorImpl<SCEVHandle> &ops,
const ScalarEvolution* p)
: SCEV(T, p), Operands(ops.begin(), ops.end()) {}
virtual ~SCEVNAryExpr() {}
public:
@ -212,8 +218,8 @@ namespace llvm {
return Operands[i];
}
const std::vector<SCEVHandle> &getOperands() const { return Operands; }
typedef std::vector<SCEVHandle>::const_iterator op_iterator;
const SmallVectorImpl<SCEVHandle> &getOperands() const { return Operands; }
typedef SmallVectorImpl<SCEVHandle>::const_iterator op_iterator;
op_iterator op_begin() const { return Operands.begin(); }
op_iterator op_end() const { return Operands.end(); }
@ -259,8 +265,10 @@ namespace llvm {
///
class SCEVCommutativeExpr : public SCEVNAryExpr {
protected:
SCEVCommutativeExpr(enum SCEVTypes T, const std::vector<SCEVHandle> &ops)
: SCEVNAryExpr(T, ops) {}
SCEVCommutativeExpr(enum SCEVTypes T,
const SmallVectorImpl<SCEVHandle> &ops,
const ScalarEvolution* p)
: SCEVNAryExpr(T, ops, p) {}
~SCEVCommutativeExpr();
public:
@ -289,8 +297,9 @@ namespace llvm {
class SCEVAddExpr : public SCEVCommutativeExpr {
friend class ScalarEvolution;
explicit SCEVAddExpr(const std::vector<SCEVHandle> &ops)
: SCEVCommutativeExpr(scAddExpr, ops) {
explicit SCEVAddExpr(const SmallVectorImpl<SCEVHandle> &ops,
const ScalarEvolution* p)
: SCEVCommutativeExpr(scAddExpr, ops, p) {
}
public:
@ -309,8 +318,9 @@ namespace llvm {
class SCEVMulExpr : public SCEVCommutativeExpr {
friend class ScalarEvolution;
explicit SCEVMulExpr(const std::vector<SCEVHandle> &ops)
: SCEVCommutativeExpr(scMulExpr, ops) {
explicit SCEVMulExpr(const SmallVectorImpl<SCEVHandle> &ops,
const ScalarEvolution* p)
: SCEVCommutativeExpr(scMulExpr, ops, p) {
}
public:
@ -331,8 +341,9 @@ namespace llvm {
friend class ScalarEvolution;
SCEVHandle LHS, RHS;
SCEVUDivExpr(const SCEVHandle &lhs, const SCEVHandle &rhs)
: SCEV(scUDivExpr), LHS(lhs), RHS(rhs) {}
SCEVUDivExpr(const SCEVHandle &lhs, const SCEVHandle &rhs,
const ScalarEvolution* p)
: SCEV(scUDivExpr, p), LHS(lhs), RHS(rhs) {}
virtual ~SCEVUDivExpr();
public:
@ -387,8 +398,9 @@ namespace llvm {
const Loop *L;
SCEVAddRecExpr(const std::vector<SCEVHandle> &ops, const Loop *l)
: SCEVNAryExpr(scAddRecExpr, ops), L(l) {
SCEVAddRecExpr(const SmallVectorImpl<SCEVHandle> &ops, const Loop *l,
const ScalarEvolution* p)
: SCEVNAryExpr(scAddRecExpr, ops, p), L(l) {
for (size_t i = 0, e = Operands.size(); i != e; ++i)
assert(Operands[i]->isLoopInvariant(l) &&
"Operands of AddRec must be loop-invariant!");
@ -404,7 +416,7 @@ namespace llvm {
/// of degree N, it returns a chrec of degree N-1.
SCEVHandle getStepRecurrence(ScalarEvolution &SE) const {
if (isAffine()) return getOperand(1);
return SE.getAddRecExpr(std::vector<SCEVHandle>(op_begin()+1,op_end()),
return SE.getAddRecExpr(SmallVector<SCEVHandle, 3>(op_begin()+1,op_end()),
getLoop());
}
@ -463,8 +475,9 @@ namespace llvm {
class SCEVSMaxExpr : public SCEVCommutativeExpr {
friend class ScalarEvolution;
explicit SCEVSMaxExpr(const std::vector<SCEVHandle> &ops)
: SCEVCommutativeExpr(scSMaxExpr, ops) {
explicit SCEVSMaxExpr(const SmallVectorImpl<SCEVHandle> &ops,
const ScalarEvolution* p)
: SCEVCommutativeExpr(scSMaxExpr, ops, p) {
}
public:
@ -484,8 +497,9 @@ namespace llvm {
class SCEVUMaxExpr : public SCEVCommutativeExpr {
friend class ScalarEvolution;
explicit SCEVUMaxExpr(const std::vector<SCEVHandle> &ops)
: SCEVCommutativeExpr(scUMaxExpr, ops) {
explicit SCEVUMaxExpr(const SmallVectorImpl<SCEVHandle> &ops,
const ScalarEvolution* p)
: SCEVCommutativeExpr(scUMaxExpr, ops, p) {
}
public:
@ -508,7 +522,8 @@ namespace llvm {
friend class ScalarEvolution;
Value *V;
explicit SCEVUnknown(Value *v) : SCEV(scUnknown), V(v) {}
explicit SCEVUnknown(Value *v, const ScalarEvolution* p) :
SCEV(scUnknown, p), V(v) {}
protected:
~SCEVUnknown();

View File

@ -57,7 +57,18 @@ namespace CallingConv {
/// X86_FastCall - 'fast' analog of X86_StdCall. Passes first two arguments
/// in ECX:EDX registers, others - via stack. Callee is responsible for
/// stack cleaning.
X86_FastCall = 65
X86_FastCall = 65,
/// ARM_APCS - ARM Procedure Calling Standard calling convention (obsolete,
/// but still used on some targets).
ARM_APCS = 66,
/// ARM_AAPCS - ARM Architecture Procedure Calling Standard calling
/// convention (aka EABI). Soft float variant.
ARM_AAPCS = 67,
/// ARM_AAPCS_VFP - Same as ARM_AAPCS, but uses hard floating point ABI.
ARM_AAPCS_VFP = 68
};
} // End CallingConv namespace

View File

@ -16,7 +16,6 @@
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/SmallSet.h"
#include "llvm/CodeGen/DebugLoc.h"
#include "llvm/CodeGen/SelectionDAGNodes.h"
namespace llvm {

View File

@ -29,29 +29,112 @@
namespace llvm {
class MachineInstr;
class MachineRegisterInfo;
class TargetRegisterInfo;
struct LiveInterval;
/// VNInfo - If the value number definition is undefined (e.g. phi
/// merge point), it contains ~0u,x. If the value number is not in use, it
/// contains ~1u,x to indicate that the value # is not used.
/// def - Instruction # of the definition.
/// - or reg # of the definition if it's a stack slot liveinterval.
/// copy - Copy iff val# is defined by a copy; zero otherwise.
/// hasPHIKill - One or more of the kills are PHI nodes.
/// redefByEC - Re-defined by early clobber somewhere during the live range.
/// kills - Instruction # of the kills.
struct VNInfo {
/// VNInfo - Value Number Information.
/// This class holds information about a machine level values, including
/// definition and use points.
///
/// Care must be taken in interpreting the def index of the value. The
/// following rules apply:
///
/// If the isDefAccurate() method returns false then def does not contain the
/// index of the defining MachineInstr, or even (necessarily) to a
/// MachineInstr at all. In general such a def index is not meaningful
/// and should not be used. The exception is that, for values originally
/// defined by PHI instructions, after PHI elimination def will contain the
/// index of the MBB in which the PHI originally existed. This can be used
/// to insert code (spills or copies) which deals with the value, which will
/// be live in to the block.
class VNInfo {
private:
enum {
HAS_PHI_KILL = 1,
REDEF_BY_EC = 1 << 1,
IS_PHI_DEF = 1 << 2,
IS_UNUSED = 1 << 3,
IS_DEF_ACCURATE = 1 << 4
};
unsigned char flags;
public:
/// The ID number of this value.
unsigned id;
/// The index of the defining instruction (if isDefAccurate() returns true).
unsigned def;
MachineInstr *copy;
bool hasPHIKill : 1;
bool redefByEC : 1;
SmallVector<unsigned, 4> kills;
VNInfo()
: id(~1U), def(~1U), copy(0), hasPHIKill(false), redefByEC(false) {}
: flags(IS_UNUSED), id(~1U), def(0), copy(0) {}
/// VNInfo constructor.
/// d is presumed to point to the actual defining instr. If it doesn't
/// setIsDefAccurate(false) should be called after construction.
VNInfo(unsigned i, unsigned d, MachineInstr *c)
: id(i), def(d), copy(c), hasPHIKill(false), redefByEC(false) {}
: flags(IS_DEF_ACCURATE), id(i), def(d), copy(c) {}
/// VNInfo construtor, copies values from orig, except for the value number.
VNInfo(unsigned i, const VNInfo &orig)
: flags(orig.flags), id(i), def(orig.def), copy(orig.copy),
kills(orig.kills) {}
/// Used for copying value number info.
unsigned getFlags() const { return flags; }
void setFlags(unsigned flags) { this->flags = flags; }
/// Returns true if one or more kills are PHI nodes.
bool hasPHIKill() const { return flags & HAS_PHI_KILL; }
void setHasPHIKill(bool hasKill) {
if (hasKill)
flags |= HAS_PHI_KILL;
else
flags &= ~HAS_PHI_KILL;
}
/// Returns true if this value is re-defined by an early clobber somewhere
/// during the live range.
bool hasRedefByEC() const { return flags & REDEF_BY_EC; }
void setHasRedefByEC(bool hasRedef) {
if (hasRedef)
flags |= REDEF_BY_EC;
else
flags &= ~REDEF_BY_EC;
}
/// Returns true if this value is defined by a PHI instruction (or was,
/// PHI instrucions may have been eliminated).
bool isPHIDef() const { return flags & IS_PHI_DEF; }
void setIsPHIDef(bool phiDef) {
if (phiDef)
flags |= IS_PHI_DEF;
else
flags &= ~IS_PHI_DEF;
}
/// Returns true if this value is unused.
bool isUnused() const { return flags & IS_UNUSED; }
void setIsUnused(bool unused) {
if (unused)
flags |= IS_UNUSED;
else
flags &= ~IS_UNUSED;
}
/// Returns true if the def is accurate.
bool isDefAccurate() const { return flags & IS_DEF_ACCURATE; }
void setIsDefAccurate(bool defAccurate) {
if (defAccurate)
flags |= IS_DEF_ACCURATE;
else
flags &= ~IS_DEF_ACCURATE;
}
};
/// LiveRange structure - This represents a simple register range in the
@ -108,7 +191,6 @@ namespace llvm {
unsigned reg; // the register or stack slot of this interval
// if the top bits is set, it represents a stack slot.
float weight; // weight of this interval
unsigned short preference; // preferred register for this interval
Ranges ranges; // the ranges in which this register is live
VNInfoList valnos; // value#'s
@ -134,7 +216,7 @@ namespace llvm {
};
LiveInterval(unsigned Reg, float Weight, bool IsSS = false)
: reg(Reg), weight(Weight), preference(0) {
: reg(Reg), weight(Weight) {
if (IsSS)
reg = reg | (1U << (sizeof(unsigned)*CHAR_BIT-1));
}
@ -210,15 +292,17 @@ namespace llvm {
void copyValNumInfo(VNInfo *DstValNo, const VNInfo *SrcValNo) {
DstValNo->def = SrcValNo->def;
DstValNo->copy = SrcValNo->copy;
DstValNo->hasPHIKill = SrcValNo->hasPHIKill;
DstValNo->redefByEC = SrcValNo->redefByEC;
DstValNo->setFlags(SrcValNo->getFlags());
DstValNo->kills = SrcValNo->kills;
}
/// getNextValue - Create a new value number and return it. MIIdx specifies
/// the instruction that defines the value number.
VNInfo *getNextValue(unsigned MIIdx, MachineInstr *CopyMI,
BumpPtrAllocator &VNInfoAllocator) {
bool isDefAccurate, BumpPtrAllocator &VNInfoAllocator) {
assert(MIIdx != ~0u && MIIdx != ~1u &&
"PHI def / unused flags should now be passed explicitly.");
#ifdef __GNUC__
unsigned Alignment = (unsigned)__alignof__(VNInfo);
#else
@ -229,6 +313,26 @@ namespace llvm {
static_cast<VNInfo*>(VNInfoAllocator.Allocate((unsigned)sizeof(VNInfo),
Alignment));
new (VNI) VNInfo((unsigned)valnos.size(), MIIdx, CopyMI);
VNI->setIsDefAccurate(isDefAccurate);
valnos.push_back(VNI);
return VNI;
}
/// Create a copy of the given value. The new value will be identical except
/// for the Value number.
VNInfo *createValueCopy(const VNInfo *orig, BumpPtrAllocator &VNInfoAllocator) {
#ifdef __GNUC__
unsigned Alignment = (unsigned)__alignof__(VNInfo);
#else
// FIXME: ugly.
unsigned Alignment = 8;
#endif
VNInfo *VNI =
static_cast<VNInfo*>(VNInfoAllocator.Allocate((unsigned)sizeof(VNInfo),
Alignment));
new (VNI) VNInfo((unsigned)valnos.size(), *orig);
valnos.push_back(VNI);
return VNI;
}
@ -339,7 +443,8 @@ namespace llvm {
/// Copy - Copy the specified live interval. This copies all the fields
/// except for the register of the interval.
void Copy(const LiveInterval &RHS, BumpPtrAllocator &VNInfoAllocator);
void Copy(const LiveInterval &RHS, MachineRegisterInfo *MRI,
BumpPtrAllocator &VNInfoAllocator);
bool empty() const { return ranges.empty(); }
@ -416,7 +521,8 @@ namespace llvm {
/// the intervals are not joinable, this aborts.
void join(LiveInterval &Other, const int *ValNoAssignments,
const int *RHSValNoAssignments,
SmallVector<VNInfo*, 16> &NewVNInfo);
SmallVector<VNInfo*, 16> &NewVNInfo,
MachineRegisterInfo *MRI);
/// isInOneLiveRange - Return true if the range specified is entirely in the
/// a single LiveRange of the live interval.

View File

@ -19,7 +19,7 @@
#define LLVM_CODEGEN_MACHINEFUNCTION_H
#include "llvm/ADT/ilist.h"
#include "llvm/CodeGen/DebugLoc.h"
#include "llvm/Support/DebugLoc.h"
#include "llvm/CodeGen/MachineBasicBlock.h"
#include "llvm/Support/Annotation.h"
#include "llvm/Support/Allocator.h"

View File

@ -22,7 +22,7 @@
#include "llvm/CodeGen/MachineOperand.h"
#include "llvm/CodeGen/MachineMemOperand.h"
#include "llvm/Target/TargetInstrDesc.h"
#include "llvm/CodeGen/DebugLoc.h"
#include "llvm/Support/DebugLoc.h"
#include <list>
#include <vector>
@ -104,7 +104,7 @@ public:
/// getDebugLoc - Returns the debug location id of this MachineInstr.
///
const DebugLoc getDebugLoc() const { return debugLoc; }
DebugLoc getDebugLoc() const { return debugLoc; }
/// getDesc - Returns the target instruction descriptor of this
/// MachineInstr.

View File

@ -33,15 +33,15 @@ class raw_ostream;
class MachineOperand {
public:
enum MachineOperandType {
MO_Register, ///< Register operand.
MO_Immediate, ///< Immediate operand
MO_FPImmediate, ///< Floating-point immediate operand
MO_MachineBasicBlock, ///< MachineBasicBlock reference
MO_FrameIndex, ///< Abstract Stack Frame Index
MO_ConstantPoolIndex, ///< Address of indexed Constant in Constant Pool
MO_JumpTableIndex, ///< Address of indexed Jump Table for switch
MO_ExternalSymbol, ///< Name of external global symbol
MO_GlobalAddress ///< Address of a global value
MO_Register, ///< Register operand.
MO_Immediate, ///< Immediate operand
MO_FPImmediate, ///< Floating-point immediate operand
MO_MachineBasicBlock, ///< MachineBasicBlock reference
MO_FrameIndex, ///< Abstract Stack Frame Index
MO_ConstantPoolIndex, ///< Address of indexed Constant in Constant Pool
MO_JumpTableIndex, ///< Address of indexed Jump Table for switch
MO_ExternalSymbol, ///< Name of external global symbol
MO_GlobalAddress ///< Address of a global value
};
private:

View File

@ -37,6 +37,15 @@ class MachineRegisterInfo {
/// virtual registers. For each target register class, it keeps a list of
/// virtual registers belonging to the class.
std::vector<std::vector<unsigned> > RegClass2VRegMap;
/// RegAllocHints - This vector records register allocation hints for virtual
/// registers. For each virtual register, it keeps a register and hint type
/// pair making up the allocation hint. Hint type is target specific except
/// for the value 0 which means the second value of the pair is the preferred
/// register for allocation. For example, if the hint is <0, 1024>, it means
/// the allocator should prefer the physical register allocated to the virtual
/// register of the hint.
std::vector<std::pair<unsigned, unsigned> > RegAllocHints;
/// PhysRegUseDefLists - This is an array of the head of the use/def list for
/// physical registers.
@ -170,7 +179,25 @@ public:
std::vector<unsigned> &getRegClassVirtRegs(const TargetRegisterClass *RC) {
return RegClass2VRegMap[RC->getID()];
}
/// setRegAllocationHint - Specify a register allocation hint for the
/// specified virtual register.
void setRegAllocationHint(unsigned Reg, unsigned Type, unsigned PrefReg) {
Reg -= TargetRegisterInfo::FirstVirtualRegister;
assert(Reg < VRegInfo.size() && "Invalid vreg!");
RegAllocHints[Reg].first = Type;
RegAllocHints[Reg].second = PrefReg;
}
/// getRegAllocationHint - Return the register allocation hint for the
/// specified virtual register.
std::pair<unsigned, unsigned>
getRegAllocationHint(unsigned Reg) const {
Reg -= TargetRegisterInfo::FirstVirtualRegister;
assert(Reg < VRegInfo.size() && "Invalid vreg!");
return RegAllocHints[Reg];
}
//===--------------------------------------------------------------------===//
// Physical Register Use Info
//===--------------------------------------------------------------------===//

View File

@ -153,6 +153,8 @@ namespace RTLIB {
FPROUND_PPCF128_F32,
FPROUND_F80_F64,
FPROUND_PPCF128_F64,
FPTOSINT_F32_I8,
FPTOSINT_F32_I16,
FPTOSINT_F32_I32,
FPTOSINT_F32_I64,
FPTOSINT_F32_I128,
@ -165,6 +167,8 @@ namespace RTLIB {
FPTOSINT_PPCF128_I32,
FPTOSINT_PPCF128_I64,
FPTOSINT_PPCF128_I128,
FPTOUINT_F32_I8,
FPTOUINT_F32_I16,
FPTOUINT_F32_I32,
FPTOUINT_F32_I64,
FPTOUINT_F32_I128,

View File

@ -31,7 +31,7 @@
#include "llvm/Support/Allocator.h"
#include "llvm/Support/RecyclingAllocator.h"
#include "llvm/Support/DataTypes.h"
#include "llvm/CodeGen/DebugLoc.h"
#include "llvm/Support/DebugLoc.h"
#include <cassert>
#include <climits>

View File

@ -0,0 +1,29 @@
//===- llvm/Config/AsmPrinters.def - LLVM Assembly Printers -----*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file enumerates all of the assembly-language printers
// supported by this build of LLVM. Clients of this file should define
// the LLVM_ASM_PRINTER macro to be a function-like macro with a
// single parameter (the name of the target whose assembly can be
// generated); including this file will then enumerate all of the
// targets with assembly printers.
//
// The set of targets supported by LLVM is generated at configuration
// time, at which point this header is generated. Do not modify this
// header directly.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_ASM_PRINTER
# error Please define the macro LLVM_ASM_PRINTER(TargetName)
#endif
@LLVM_ENUM_ASM_PRINTERS@
#undef LLVM_ASM_PRINTER

View File

@ -0,0 +1,28 @@
//===- llvm/Config/Targets.def - LLVM Target Architectures ------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file enumerates all of the target architectures supported by
// this build of LLVM. Clients of this file should define the
// LLVM_TARGET macro to be a function-like macro with a single
// parameter (the name of the target); including this file will then
// enumerate all of the targets.
//
// The set of targets supported by LLVM is generated at configuration
// time, at which point this header is generated. Do not modify this
// header directly.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_TARGET
# error Please define the macro LLVM_TARGET(TargetName)
#endif
@LLVM_ENUM_TARGETS@
#undef LLVM_TARGET

View File

@ -580,3 +580,6 @@
/* Define to a function implementing strdup */
#cmakedefine strdup ${strdup}
/* Native LLVM architecture */
#cmakedefine LLVM_NATIVE_ARCH ${LLVM_NATIVE_ARCH}

View File

@ -285,6 +285,9 @@
/* Have pthread_mutex_lock */
#undef HAVE_PTHREAD_MUTEX_LOCK
/* Have pthread_rwlock_init */
#undef HAVE_PTHREAD_RWLOCK_INIT
/* Define to 1 if srand48/lrand48/drand48 exist in <stdlib.h> */
#undef HAVE_RAND48
@ -473,6 +476,9 @@
/* Build multithreading support into LLVM */
#undef LLVM_MULTITHREADED
/* LLVM architecture name for the native architecture, if available */
#undef LLVM_NATIVE_ARCH
/* Define if this is Unixish platform */
#undef LLVM_ON_UNIX

View File

@ -102,19 +102,28 @@ public:
return CreateTrueFalseVals(false);
}
/// Return a ConstantInt with the specified value for the specified type. The
/// value V will be canonicalized to an unsigned APInt. Accessing it with
/// either getSExtValue() or getZExtValue() will yield a correctly sized and
/// signed value for the type Ty.
/// Return a ConstantInt with the specified integer value for the specified
/// type. If the type is wider than 64 bits, the value will be zero-extended
/// to fit the type, unless isSigned is true, in which case the value will
/// be interpreted as a 64-bit signed integer and sign-extended to fit
/// the type.
/// @brief Get a ConstantInt for a specific value.
static ConstantInt *get(const Type *Ty, uint64_t V, bool isSigned = false);
static ConstantInt *get(const IntegerType *Ty,
uint64_t V, bool isSigned = false);
/// If Ty is a vector type, return a Constant with a splat of the given
/// value. Otherwise return a ConstantInt for the given value.
static Constant *get(const Type *Ty, uint64_t V, bool isSigned = false);
/// Return a ConstantInt with the specified value for the specified type. The
/// value V will be canonicalized to a an unsigned APInt. Accessing it with
/// either getSExtValue() or getZExtValue() will yield a correctly sized and
/// signed value for the type Ty.
/// @brief Get a ConstantInt for a specific signed value.
static ConstantInt *getSigned(const Type *Ty, int64_t V) {
static ConstantInt *getSigned(const IntegerType *Ty, int64_t V) {
return get(Ty, V, true);
}
static Constant *getSigned(const Type *Ty, int64_t V) {
return get(Ty, V, true);
}
@ -122,6 +131,10 @@ public:
/// type is the integer type that corresponds to the bit width of the value.
static ConstantInt *get(const APInt &V);
/// If Ty is a vector type, return a Constant with a splat of the given
/// value. Otherwise return a ConstantInt for the given value.
static Constant *get(const Type *Ty, const APInt &V);
/// getType - Specialize the getType() method to always return an IntegerType,
/// which reduces the amount of casting needed in parts of the compiler.
///
@ -248,10 +261,11 @@ public:
/// get() - Static factory methods - Return objects of the specified value
static ConstantFP *get(const APFloat &V);
/// get() - This returns a constant fp for the specified value in the
/// specified type. This should only be used for simple constant values like
/// 2.0/1.0 etc, that are known-valid both as double and as the target format.
static ConstantFP *get(const Type *Ty, double V);
/// get() - This returns a ConstantFP, or a vector containing a splat of a
/// ConstantFP, for the specified value in the specified type. This should
/// only be used for simple constant values like 2.0/1.0 etc, that are
/// known-valid both as host double and as the target format.
static Constant *get(const Type *Ty, double V);
/// isValueValidForType - return true if Ty is big enough to represent V.
static bool isValueValidForType(const Type *Ty, const APFloat& V);

View File

@ -50,6 +50,10 @@ protected:
///
void dropAllTypeUses();
/// unlockedRefineAbstractTypeTo - Internal version of refineAbstractTypeTo
/// that performs no locking. Only used for internal recursion.
void unlockedRefineAbstractTypeTo(const Type *NewType);
public:
//===--------------------------------------------------------------------===//

125
include/llvm/MC/MCInst.h Normal file
View File

@ -0,0 +1,125 @@
//===-- llvm/MC/MCInst.h - MCInst class -------------------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file contains the declaration of the MCInst and MCOperand classes, which
// is the basic representation used to represent low-level machine code
// instructions.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_MC_MCINST_H
#define LLVM_MC_MCINST_H
#include "llvm/ADT/SmallVector.h"
#include "llvm/Support/DataTypes.h"
#include "llvm/Support/DebugLoc.h"
namespace llvm {
/// MCOperand - Instances of this class represent operands of the MCInst class.
/// This is a simple discriminated union.
class MCOperand {
enum MachineOperandType {
kInvalid, ///< Uninitialized.
kRegister, ///< Register operand.
kImmediate, ///< Immediate operand.
kMBBLabel ///< Basic block label.
};
unsigned char Kind;
union {
unsigned RegVal;
int64_t ImmVal;
struct {
unsigned FunctionNo;
unsigned BlockNo;
} MBBLabel;
};
public:
MCOperand() : Kind(kInvalid) {}
MCOperand(const MCOperand &RHS) { *this = RHS; }
bool isReg() const { return Kind == kRegister; }
bool isImm() const { return Kind == kImmediate; }
bool isMBBLabel() const { return Kind == kMBBLabel; }
/// getReg - Returns the register number.
unsigned getReg() const {
assert(isReg() && "This is not a register operand!");
return RegVal;
}
/// setReg - Set the register number.
void setReg(unsigned Reg) {
assert(isReg() && "This is not a register operand!");
RegVal = Reg;
}
int64_t getImm() const {
assert(isImm() && "This is not an immediate");
return ImmVal;
}
void setImm(int64_t Val) {
assert(isImm() && "This is not an immediate");
ImmVal = Val;
}
unsigned getMBBLabelFunction() const {
assert(isMBBLabel() && "Wrong accessor");
return MBBLabel.FunctionNo;
}
unsigned getMBBLabelBlock() const {
assert(isMBBLabel() && "Wrong accessor");
return MBBLabel.BlockNo;
}
void MakeReg(unsigned Reg) {
Kind = kRegister;
RegVal = Reg;
}
void MakeImm(int64_t Val) {
Kind = kImmediate;
ImmVal = Val;
}
void MakeMBBLabel(unsigned Fn, unsigned MBB) {
Kind = kMBBLabel;
MBBLabel.FunctionNo = Fn;
MBBLabel.BlockNo = MBB;
}
};
/// MCInst - Instances of this class represent a single low-level machine
/// instruction.
class MCInst {
unsigned Opcode;
SmallVector<MCOperand, 8> Operands;
public:
MCInst() : Opcode(~0U) {}
void setOpcode(unsigned Op) { Opcode = Op; }
unsigned getOpcode() const { return Opcode; }
DebugLoc getDebugLoc() const { return DebugLoc(); }
const MCOperand &getOperand(unsigned i) const { return Operands[i]; }
MCOperand &getOperand(unsigned i) { return Operands[i]; }
void addOperand(const MCOperand &Op) {
Operands.push_back(Op);
}
};
} // end namespace llvm
#endif

View File

@ -539,7 +539,7 @@ template<>
class parser<bool> : public basic_parser<bool> {
const char *ArgStr;
public:
// parse - Return true on error.
bool parse(Option &O, const char *ArgName, const std::string &Arg, bool &Val);
@ -1105,7 +1105,7 @@ public:
}
};
// multi_arg - Modifier to set the number of additional values.
// multi_val - Modifier to set the number of additional values.
struct multi_val {
unsigned AdditionalVals;
explicit multi_val(unsigned N) : AdditionalVals(N) {}

View File

@ -1,4 +1,4 @@
//===---- llvm/CodeGen/DebugLoc.h - Debug Location Information --*- C++ -*-===//
//===---- llvm/DebugLoc.h - Debug Location Information ----------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
@ -7,13 +7,13 @@
//
//===----------------------------------------------------------------------===//
//
// This file defines a number of light weight data structures used by the code
// generator to describe and track debug location information.
// This file defines a number of light weight data structures used
// to describe and track debug location information.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CODEGEN_DEBUGLOC_H
#define LLVM_CODEGEN_DEBUGLOC_H
#ifndef LLVM_DEBUGLOC_H
#define LLVM_DEBUGLOC_H
#include "llvm/ADT/DenseMap.h"
#include <vector>
@ -98,4 +98,4 @@ namespace llvm {
} // end namespace llvm
#endif /* LLVM_CODEGEN_DEBUGLOC_H */
#endif /* LLVM_DEBUGLOC_H */

View File

@ -17,6 +17,7 @@
#include "llvm/Constants.h"
#include "llvm/Instructions.h"
#include "llvm/GlobalAlias.h"
#include "llvm/GlobalVariable.h"
#include "llvm/Function.h"
#include "llvm/Support/ConstantFolder.h"
@ -202,7 +203,7 @@ public:
Value *CreateFMul(Value *LHS, Value *RHS, const char *Name = "") {
if (Constant *LC = dyn_cast<Constant>(LHS))
if (Constant *RC = dyn_cast<Constant>(RHS))
return Folder.CreateMul(LC, RC);
return Folder.CreateFMul(LC, RC);
return Insert(BinaryOperator::CreateFMul(LHS, RHS), Name);
}
Value *CreateUDiv(Value *LHS, Value *RHS, const char *Name = "") {
@ -291,6 +292,11 @@ public:
return Folder.CreateNeg(VC);
return Insert(BinaryOperator::CreateNeg(V), Name);
}
Value *CreateFNeg(Value *V, const char *Name = "") {
if (Constant *VC = dyn_cast<Constant>(V))
return Folder.CreateFNeg(VC);
return Insert(BinaryOperator::CreateFNeg(V), Name);
}
Value *CreateNot(Value *V, const char *Name = "") {
if (Constant *VC = dyn_cast<Constant>(V))
return Folder.CreateNot(VC);

View File

@ -15,6 +15,7 @@
#define LLVM_SUPPORT_MANAGED_STATIC_H
#include "llvm/System/Atomic.h"
#include "llvm/System/Threading.h"
namespace llvm {
@ -60,28 +61,28 @@ public:
// Accessors.
C &operator*() {
void* tmp = Ptr;
sys::MemoryFence();
if (llvm_is_multithreaded()) sys::MemoryFence();
if (!tmp) RegisterManagedStatic(object_creator<C>, object_deleter<C>);
return *static_cast<C*>(Ptr);
}
C *operator->() {
void* tmp = Ptr;
sys::MemoryFence();
if (llvm_is_multithreaded()) sys::MemoryFence();
if (!tmp) RegisterManagedStatic(object_creator<C>, object_deleter<C>);
return static_cast<C*>(Ptr);
}
const C &operator*() const {
void* tmp = Ptr;
sys::MemoryFence();
if (llvm_is_multithreaded()) sys::MemoryFence();
if (!tmp) RegisterManagedStatic(object_creator<C>, object_deleter<C>);
return *static_cast<C*>(Ptr);
}
const C *operator->() const {
void* tmp = Ptr;
sys::MemoryFence();
if (llvm_is_multithreaded()) sys::MemoryFence();
if (!tmp) RegisterManagedStatic(object_creator<C>, object_deleter<C>);
return static_cast<C*>(Ptr);
@ -94,13 +95,6 @@ public:
void Register() { RegisterManagedStatic(0, CleanupFn); }
};
/// llvm_start_multithreaded - Allocate and initialize structures needed to
/// make LLVM safe for multithreading. The return value indicates whether
/// multithreaded initialization succeeded. LLVM will still be operational
/// on "failed" return, but will not be safe to run multithreaded.
bool llvm_start_multithreaded();
/// llvm_shutdown - Deallocate and destroy all ManagedStatic variables.
void llvm_shutdown();

View File

@ -1,4 +1,4 @@
//===- TGSourceMgr.h - Manager for Source Buffers & Diagnostics -*- C++ -*-===//
//===- SourceMgr.h - Manager for Source Buffers & Diagnostics ---*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
@ -7,12 +7,14 @@
//
//===----------------------------------------------------------------------===//
//
// This file declares the TGSourceMgr class.
// This file declares the SourceMgr class. This class is used as a simple
// substrate for diagnostics, #include handling, and other low level things for
// simple parsers.
//
//===----------------------------------------------------------------------===//
#ifndef TGSOURCEMGR_H
#define TGSOURCEMGR_H
#ifndef SUPPORT_SOURCEMGR_H
#define SUPPORT_SOURCEMGR_H
#include <string>
#include <vector>
@ -20,46 +22,54 @@
namespace llvm {
class MemoryBuffer;
class TGSourceMgr;
class SourceMgr;
class TGLoc {
class SMLoc {
const char *Ptr;
public:
TGLoc() : Ptr(0) {}
TGLoc(const TGLoc &RHS) : Ptr(RHS.Ptr) {}
SMLoc() : Ptr(0) {}
SMLoc(const SMLoc &RHS) : Ptr(RHS.Ptr) {}
bool operator==(const TGLoc &RHS) const { return RHS.Ptr == Ptr; }
bool operator!=(const TGLoc &RHS) const { return RHS.Ptr != Ptr; }
bool operator==(const SMLoc &RHS) const { return RHS.Ptr == Ptr; }
bool operator!=(const SMLoc &RHS) const { return RHS.Ptr != Ptr; }
const char *getPointer() const { return Ptr; }
static TGLoc getFromPointer(const char *Ptr) {
TGLoc L;
static SMLoc getFromPointer(const char *Ptr) {
SMLoc L;
L.Ptr = Ptr;
return L;
}
};
/// TGSourceMgr - This owns the files read by tblgen, handles include stacks,
/// SourceMgr - This owns the files read by tblgen, handles include stacks,
/// and handles printing of diagnostics.
class TGSourceMgr {
class SourceMgr {
struct SrcBuffer {
/// Buffer - The memory buffer for the file.
MemoryBuffer *Buffer;
/// IncludeLoc - This is the location of the parent include, or null if at
/// the top level.
TGLoc IncludeLoc;
SMLoc IncludeLoc;
};
/// Buffers - This is all of the buffers that we are reading from.
std::vector<SrcBuffer> Buffers;
TGSourceMgr(const TGSourceMgr&); // DO NOT IMPLEMENT
void operator=(const TGSourceMgr&); // DO NOT IMPLEMENT
// IncludeDirectories - This is the list of directories we should search for
// include files in.
std::vector<std::string> IncludeDirectories;
SourceMgr(const SourceMgr&); // DO NOT IMPLEMENT
void operator=(const SourceMgr&); // DO NOT IMPLEMENT
public:
TGSourceMgr() {}
~TGSourceMgr();
SourceMgr() {}
~SourceMgr();
void setIncludeDirs(const std::vector<std::string> &Dirs) {
IncludeDirectories = Dirs;
}
const SrcBuffer &getBufferInfo(unsigned i) const {
assert(i < Buffers.size() && "Invalid Buffer ID!");
@ -71,12 +81,12 @@ public:
return Buffers[i].Buffer;
}
TGLoc getParentIncludeLoc(unsigned i) const {
SMLoc getParentIncludeLoc(unsigned i) const {
assert(i < Buffers.size() && "Invalid Buffer ID!");
return Buffers[i].IncludeLoc;
}
unsigned AddNewSourceBuffer(MemoryBuffer *F, TGLoc IncludeLoc) {
unsigned AddNewSourceBuffer(MemoryBuffer *F, SMLoc IncludeLoc) {
SrcBuffer NB;
NB.Buffer = F;
NB.IncludeLoc = IncludeLoc;
@ -84,21 +94,25 @@ public:
return Buffers.size()-1;
}
/// AddIncludeFile - Search for a file with the specified name in the current
/// directory or in one of the IncludeDirs. If no file is found, this returns
/// ~0, otherwise it returns the buffer ID of the stacked file.
unsigned AddIncludeFile(const std::string &Filename, SMLoc IncludeLoc);
/// FindBufferContainingLoc - Return the ID of the buffer containing the
/// specified location, returning -1 if not found.
int FindBufferContainingLoc(TGLoc Loc) const;
int FindBufferContainingLoc(SMLoc Loc) const;
/// FindLineNumber - Find the line number for the specified location in the
/// specified file. This is not a fast method.
unsigned FindLineNumber(TGLoc Loc, int BufferID = -1) const;
unsigned FindLineNumber(SMLoc Loc, int BufferID = -1) const;
/// PrintError - Emit an error message about the specified location with the
/// PrintMessage - Emit a message about the specified location with the
/// specified string.
void PrintError(TGLoc ErrorLoc, const std::string &Msg) const;
void PrintMessage(SMLoc Loc, const std::string &Msg) const;
private:
void PrintIncludeStack(TGLoc IncludeLoc) const;
void PrintIncludeStack(SMLoc IncludeLoc) const;
};
} // end llvm namespace

View File

@ -24,6 +24,8 @@ namespace llvm {
cas_flag CompareAndSwap(volatile cas_flag* ptr,
cas_flag new_value,
cas_flag old_value);
cas_flag AtomicIncrement(volatile cas_flag* ptr);
cas_flag AtomicDecrement(volatile cas_flag* ptr);
}
}

View File

@ -14,12 +14,15 @@
#ifndef LLVM_SYSTEM_MUTEX_H
#define LLVM_SYSTEM_MUTEX_H
#include "llvm/System/Threading.h"
#include <cassert>
namespace llvm
{
namespace sys
{
/// @brief Platform agnostic Mutex class.
class Mutex
class MutexImpl
{
/// @name Constructors
/// @{
@ -30,11 +33,11 @@ namespace llvm
/// also more likely to deadlock (same thread can't acquire more than
/// once).
/// @brief Default Constructor.
explicit Mutex(bool recursive = true);
explicit MutexImpl(bool recursive = true);
/// Releases and removes the lock
/// @brief Destructor
~Mutex();
~MutexImpl();
/// @}
/// @name Methods
@ -66,18 +69,81 @@ namespace llvm
/// @name Platform Dependent Data
/// @{
private:
#ifdef ENABLE_THREADS
void* data_; ///< We don't know what the data will be
#endif
/// @}
/// @name Do Not Implement
/// @{
private:
Mutex(const Mutex & original);
void operator=(const Mutex &);
MutexImpl(const MutexImpl & original);
void operator=(const MutexImpl &);
/// @}
};
/// SmartMutex - A mutex with a compile time constant parameter that
/// indicates whether this mutex should become a no-op when we're not
/// running in multithreaded mode.
template<bool mt_only>
class SmartMutex : public MutexImpl {
unsigned acquired;
bool recursive;
public:
explicit SmartMutex(bool rec = true) :
MutexImpl(rec), acquired(0), recursive(rec) { }
bool acquire() {
if (!mt_only || llvm_is_multithreaded())
return MutexImpl::acquire();
// Single-threaded debugging code. This would be racy in multithreaded
// mode, but provides not sanity checks in single threaded mode.
assert((recursive || acquired == 0) && "Lock already acquired!!");
++acquired;
return true;
}
bool release() {
if (!mt_only || llvm_is_multithreaded())
return MutexImpl::release();
// Single-threaded debugging code. This would be racy in multithreaded
// mode, but provides not sanity checks in single threaded mode.
assert(((recursive && acquired) || (acquired == 1)) &&
"Lock not acquired before release!");
--acquired;
return true;
}
bool tryacquire() {
if (!mt_only || llvm_is_multithreaded())
return MutexImpl::tryacquire();
return true;
}
private:
SmartMutex(const SmartMutex<mt_only> & original);
void operator=(const SmartMutex<mt_only> &);
};
/// Mutex - A standard, always enforced mutex.
typedef SmartMutex<false> Mutex;
template<bool mt_only>
class SmartScopedLock {
SmartMutex<mt_only>* mtx;
public:
SmartScopedLock(SmartMutex<mt_only>* m) : mtx(m) {
mtx->acquire();
}
~SmartScopedLock() {
mtx->release();
}
};
typedef SmartScopedLock<false> ScopedLock;
}
}

View File

@ -309,6 +309,11 @@ namespace sys {
/// @brief Determine if the path is absolute.
bool isAbsolute() const;
/// This function determines if the path name is absolute, as opposed to
/// relative.
/// @brief Determine if the path is absolute.
static bool isAbsolute(const char *NameStart, unsigned NameLen);
/// This function opens the file associated with the path name provided by
/// the Path object and reads its magic number. If the magic number at the
/// start of the file matches \p magic, true is returned. In all other

View File

@ -0,0 +1,175 @@
//===- RWMutex.h - Reader/Writer Mutual Exclusion Lock ----------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file declares the llvm::sys::RWMutex class.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_SYSTEM_RWMUTEX_H
#define LLVM_SYSTEM_RWMUTEX_H
#include "llvm/System/Threading.h"
#include <cassert>
namespace llvm
{
namespace sys
{
/// @brief Platform agnostic RWMutex class.
class RWMutexImpl
{
/// @name Constructors
/// @{
public:
/// Initializes the lock but doesn't acquire it.
/// @brief Default Constructor.
explicit RWMutexImpl();
/// Releases and removes the lock
/// @brief Destructor
~RWMutexImpl();
/// @}
/// @name Methods
/// @{
public:
/// Attempts to unconditionally acquire the lock in reader mode. If the
/// lock is held by a writer, this method will wait until it can acquire
/// the lock.
/// @returns false if any kind of error occurs, true otherwise.
/// @brief Unconditionally acquire the lock in reader mode.
bool reader_acquire();
/// Attempts to release the lock in reader mode.
/// @returns false if any kind of error occurs, true otherwise.
/// @brief Unconditionally release the lock in reader mode.
bool reader_release();
/// Attempts to unconditionally acquire the lock in reader mode. If the
/// lock is held by any readers, this method will wait until it can
/// acquire the lock.
/// @returns false if any kind of error occurs, true otherwise.
/// @brief Unconditionally acquire the lock in writer mode.
bool writer_acquire();
/// Attempts to release the lock in writer mode.
/// @returns false if any kind of error occurs, true otherwise.
/// @brief Unconditionally release the lock in write mode.
bool writer_release();
//@}
/// @name Platform Dependent Data
/// @{
private:
void* data_; ///< We don't know what the data will be
/// @}
/// @name Do Not Implement
/// @{
private:
RWMutexImpl(const RWMutexImpl & original);
void operator=(const RWMutexImpl &);
/// @}
};
/// SmartMutex - An R/W mutex with a compile time constant parameter that
/// indicates whether this mutex should become a no-op when we're not
/// running in multithreaded mode.
template<bool mt_only>
class SmartRWMutex : public RWMutexImpl {
unsigned readers, writers;
public:
explicit SmartRWMutex() : RWMutexImpl(), readers(0), writers(0) { }
bool reader_acquire() {
if (!mt_only || llvm_is_multithreaded())
return RWMutexImpl::reader_acquire();
// Single-threaded debugging code. This would be racy in multithreaded
// mode, but provides not sanity checks in single threaded mode.
++readers;
return true;
}
bool reader_release() {
if (!mt_only || llvm_is_multithreaded())
return RWMutexImpl::reader_release();
// Single-threaded debugging code. This would be racy in multithreaded
// mode, but provides not sanity checks in single threaded mode.
assert(readers > 0 && "Reader lock not acquired before release!");
--readers;
return true;
}
bool writer_acquire() {
if (!mt_only || llvm_is_multithreaded())
return RWMutexImpl::writer_acquire();
// Single-threaded debugging code. This would be racy in multithreaded
// mode, but provides not sanity checks in single threaded mode.
assert(writers == 0 && "Writer lock already acquired!");
++writers;
return true;
}
bool writer_release() {
if (!mt_only || llvm_is_multithreaded())
return RWMutexImpl::writer_release();
// Single-threaded debugging code. This would be racy in multithreaded
// mode, but provides not sanity checks in single threaded mode.
assert(writers == 1 && "Writer lock not acquired before release!");
--writers;
return true;
}
private:
SmartRWMutex(const SmartRWMutex<mt_only> & original);
void operator=(const SmartRWMutex<mt_only> &);
};
typedef SmartRWMutex<false> RWMutex;
/// ScopedReader - RAII acquisition of a reader lock
template<bool mt_only>
struct SmartScopedReader {
SmartRWMutex<mt_only>* mutex;
explicit SmartScopedReader(SmartRWMutex<mt_only>* m) {
mutex = m;
mutex->reader_acquire();
}
~SmartScopedReader() {
mutex->reader_release();
}
};
typedef SmartScopedReader<false> ScopedReader;
/// ScopedWriter - RAII acquisition of a writer lock
template<bool mt_only>
struct SmartScopedWriter {
SmartRWMutex<mt_only>* mutex;
explicit SmartScopedWriter(SmartRWMutex<mt_only>* m) {
mutex = m;
mutex->writer_acquire();
}
~SmartScopedWriter() {
mutex->writer_release();
}
};
typedef SmartScopedWriter<false> ScopedWriter;
}
}
#endif

View File

@ -0,0 +1,45 @@
//===-- llvm/System/Threading.h - Control multithreading mode --*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// TThis file defines llvm_start_multithreaded() and friends.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_SYSTEM_THREADING_H
#define LLVM_SYSTEM_THREADING_H
namespace llvm {
/// llvm_start_multithreaded - Allocate and initialize structures needed to
/// make LLVM safe for multithreading. The return value indicates whether
/// multithreaded initialization succeeded. LLVM will still be operational
/// on "failed" return, and will still be safe for hosting threading
/// applications in the JIT, but will not be safe for concurrent calls to the
/// LLVM APIs.
/// THIS MUST EXECUTE IN ISOLATION FROM ALL OTHER LLVM API CALLS.
bool llvm_start_multithreaded();
/// llvm_stop_multithreaded - Deallocate structures necessary to make LLVM
/// safe for multithreading.
/// THIS MUST EXECUTE IN ISOLATION FROM ALL OTHER LLVM API CALLS.
void llvm_stop_multithreaded();
/// llvm_is_multithreaded - Check whether LLVM is executing in thread-safe
/// mode or not.
bool llvm_is_multithreaded();
/// acquire_global_lock - Acquire the global lock. This is a no-op if called
/// before llvm_start_multithreaded().
void llvm_acquire_global_lock();
/// release_global_lock - Release the global lock. This is a no-op if called
/// before llvm_start_multithreaded().
void llvm_release_global_lock();
}
#endif

View File

@ -23,7 +23,7 @@ namespace llvm {
class Type;
class Mangler;
struct DarwinTargetAsmInfo: public TargetAsmInfo {
struct DarwinTargetAsmInfo : public TargetAsmInfo {
const Section* TextCoalSection;
const Section* ConstTextCoalSection;
const Section* ConstDataCoalSection;

View File

@ -274,6 +274,7 @@ def unknown;
class Operand<ValueType ty> {
ValueType Type = ty;
string PrintMethod = "printOperand";
string AsmOperandLowerMethod = ?;
dag MIOperandInfo = (ops);
}

View File

@ -130,7 +130,6 @@ namespace llvm {
private:
mutable StringMap<Section> Sections;
mutable SectionFlags::FlagsStringsMapType FlagsStrings;
void fillDefaultValues();
protected:
/// TM - The current TargetMachine.
const TargetMachine &TM;
@ -278,6 +277,10 @@ namespace llvm {
/// use '\1' as the first character.
const char *StringConstantPrefix; // Defaults to ".str"
/// AllowQuotesInName - This is true if the assembler allows for complex
/// symbol names to be surrounded in quotes. This defaults to false.
bool AllowQuotesInName;
//===--- Data Emission Directives -------------------------------------===//
/// ZeroDirective - this should be set to the directive used to get some
@ -308,8 +311,7 @@ namespace llvm {
/// directives for various sizes and non-default address spaces.
virtual const char *getASDirective(unsigned size,
unsigned AS) const {
assert (AS > 0
&& "Dont know the directives for default addr space");
assert(AS > 0 && "Dont know the directives for default addr space");
return NULL;
}
@ -472,10 +474,6 @@ namespace llvm {
/// encode inline subroutine information.
bool DwarfUsesInlineInfoSection; // Defaults to false.
/// SupportsMacInfo - true if the Dwarf output supports macro information
///
bool SupportsMacInfoSection; // Defaults to true
/// NonLocalEHFrameLabel - If set, the EH_frame label needs to be non-local.
///
bool NonLocalEHFrameLabel; // Defaults to false.
@ -536,9 +534,9 @@ namespace llvm {
///
const char *DwarfRangesSection; // Defaults to ".debug_ranges".
/// DwarfMacInfoSection - Section directive for Dwarf info.
/// DwarfMacroInfoSection - Section directive for DWARF macro info.
///
const char *DwarfMacInfoSection; // Defaults to ".debug_macinfo".
const char *DwarfMacroInfoSection; // Defaults to ".debug_macinfo".
/// DwarfEHFrameSection - Section directive for Exception frames.
///
@ -749,6 +747,9 @@ namespace llvm {
const char *getStringConstantPrefix() const {
return StringConstantPrefix;
}
bool doesAllowQuotesInName() const {
return AllowQuotesInName;
}
const char *getZeroDirective() const {
return ZeroDirective;
}
@ -866,9 +867,6 @@ namespace llvm {
bool doesDwarfUsesInlineInfoSection() const {
return DwarfUsesInlineInfoSection;
}
bool doesSupportMacInfoSection() const {
return SupportsMacInfoSection;
}
bool doesRequireNonLocalEHFrameLabel() const {
return NonLocalEHFrameLabel;
}
@ -914,8 +912,8 @@ namespace llvm {
const char *getDwarfRangesSection() const {
return DwarfRangesSection;
}
const char *getDwarfMacInfoSection() const {
return DwarfMacInfoSection;
const char *getDwarfMacroInfoSection() const {
return DwarfMacroInfoSection;
}
const char *getDwarfEHFrameSection() const {
return DwarfEHFrameSection;

View File

@ -30,7 +30,7 @@
#include "llvm/ADT/SmallSet.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/CodeGen/DebugLoc.h"
#include "llvm/Support/DebugLoc.h"
#include "llvm/Target/TargetMachine.h"
#include <climits>
#include <map>

View File

@ -78,11 +78,13 @@ namespace CodeGenOpt {
};
}
// Possible float ABI settings. Used with FloatABIType in TargetOptions.h.
namespace FloatABI {
enum ABIType {
Default,
Soft,
Hard
Default, // Target-specific (either soft of hard depending on triple, etc).
Soft, // Soft float.
Hard // Hard float.
};
}

View File

@ -519,6 +519,36 @@ public:
return NULL;
}
/// getAllocationOrder - Returns the register allocation order for a specified
/// register class in the form of a pair of TargetRegisterClass iterators.
virtual std::pair<TargetRegisterClass::iterator,TargetRegisterClass::iterator>
getAllocationOrder(const TargetRegisterClass *RC,
unsigned HintType, unsigned HintReg,
const MachineFunction &MF) const {
return std::make_pair(RC->allocation_order_begin(MF),
RC->allocation_order_end(MF));
}
/// ResolveRegAllocHint - Resolves the specified register allocation hint
/// to a physical register. Returns the physical register if it is successful.
virtual unsigned ResolveRegAllocHint(unsigned Type, unsigned Reg,
const MachineFunction &MF) const {
if (Type == 0 && Reg && isPhysicalRegister(Reg))
return Reg;
return 0;
}
/// UpdateRegAllocHint - A callback to allow target a chance to update
/// register allocation hints when a register is "changed" (e.g. coalesced)
/// to another register. e.g. On ARM, some virtual registers should target
/// register pairs, if one of pair is coalesced to another register, the
/// allocation hint of the other half of the pair should be changed to point
/// to the new register.
virtual void UpdateRegAllocHint(unsigned Reg, unsigned NewReg,
MachineFunction &MF) const {
// Do nothing.
}
/// targetHandlesStackFrameRounding - Returns true if the target is
/// responsible for rounding up the stack frame (probably at emitPrologue
/// time).

View File

@ -0,0 +1,65 @@
//===- TargetSelect.h - Target Selection & Registration -------------------===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file provides utilities to make sure that certain classes of targets are
// linked into the main application executable, and initialize them as
// appropriate.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_TARGET_TARGETSELECT_H
#define LLVM_TARGET_TARGETSELECT_H
#include "llvm/Config/config.h"
namespace llvm {
// Declare all of the target-initialization functions that are available.
#define LLVM_TARGET(TargetName) void Initialize##TargetName##Target();
#include "llvm/Config/Targets.def"
// Declare all of the available asm-printer initialization functions.
// Declare all of the target-initialization functions.
#define LLVM_ASM_PRINTER(TargetName) void Initialize##TargetName##AsmPrinter();
#include "llvm/Config/AsmPrinters.def"
/// InitializeAllTargets - The main program should call this function if it
/// wants to link in all available targets that LLVM is configured to support.
inline void InitializeAllTargets() {
#define LLVM_TARGET(TargetName) llvm::Initialize##TargetName##Target();
#include "llvm/Config/Targets.def"
}
/// InitializeAllAsmPrinters - The main program should call this function if
/// it wants all asm printers that LLVM is configured to support. This will
/// cause them to be linked into its executable.
inline void InitializeAllAsmPrinters() {
#define LLVM_ASM_PRINTER(TargetName) Initialize##TargetName##AsmPrinter();
#include "llvm/Config/AsmPrinters.def"
}
/// InitializeNativeTarget - The main program should call this function to
/// initialize the native target corresponding to the host. This is useful
/// for JIT applications to ensure that the target gets linked in correctly.
inline bool InitializeNativeTarget() {
// If we have a native target, initialize it to ensure it is linked in.
#ifdef LLVM_NATIVE_ARCH
#define DoInit2(TARG, MOD) llvm::Initialize ## TARG ## MOD()
#define DoInit(T, M) DoInit2(T, M)
DoInit(LLVM_NATIVE_ARCH, Target);
return false;
#undef DoInit
#undef DoInit2
#else
return true;
#endif
}
}
#endif

View File

@ -143,10 +143,10 @@ Pass *createLoopIndexSplitPass();
// this pass is:
//
// FROM CODE TO CODE
// %X = alloca int, uint 1 ret int 42
// store int 42, int *%X
// %Y = load int* %X
// ret int %Y
// %X = alloca i32, i32 1 ret i32 42
// store i32 42, i32 *%X
// %Y = load i32* %X
// ret i32 %Y
//
FunctionPass *createPromoteMemoryToRegisterPass();
extern const PassInfo *const PromoteMemoryToRegisterID;

View File

@ -30,6 +30,16 @@ struct DbgInfoIntrinsic;
template<typename T> class SmallVectorImpl;
//===----------------------------------------------------------------------===//
// Local analysis.
//
/// isSafeToLoadUnconditionally - Return true if we know that executing a load
/// from this value cannot trap. If it is not obviously safe to load from the
/// specified pointer, we do a quick local scan of the basic block containing
/// ScanFrom, to determine if the address is already accessed.
bool isSafeToLoadUnconditionally(Value *V, Instruction *ScanFrom);
//===----------------------------------------------------------------------===//
// Local constant propagation.
//

View File

@ -14,6 +14,7 @@
#include "llvm/AbstractTypeUser.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/DataTypes.h"
#include "llvm/System/Atomic.h"
#include "llvm/ADT/GraphTraits.h"
#include "llvm/ADT/iterator.h"
#include <string>
@ -102,7 +103,7 @@ private:
/// has no AbstractTypeUsers, the type is deleted. This is only sensical for
/// derived types.
///
mutable unsigned RefCount;
mutable sys::cas_flag RefCount;
const Type *getForwardedTypeInternal() const;
@ -268,19 +269,16 @@ public:
/// primitive type.
///
unsigned getPrimitiveSizeInBits() const;
/// getScalarSizeInBits - If this is a vector type, return the
/// getPrimitiveSizeInBits value for the element type. Otherwise return the
/// getPrimitiveSizeInBits value for this type.
unsigned getScalarSizeInBits() const;
/// getFPMantissaWidth - Return the width of the mantissa of this type. This
/// is only valid on scalar floating point types. If the FP type does not
/// is only valid on floating point types. If the FP type does not
/// have a stable mantissa (e.g. ppc long double), this method returns -1.
int getFPMantissaWidth() const {
assert(isFloatingPoint() && "Not a floating point type!");
if (ID == FloatTyID) return 24;
if (ID == DoubleTyID) return 53;
if (ID == X86_FP80TyID) return 64;
if (ID == FP128TyID) return 113;
assert(ID == PPC_FP128TyID && "unknown fp type");
return -1;
}
int getFPMantissaWidth() const;
/// getForwardedType - Return the type that this type has been resolved to if
/// it has been resolved to anything. This is used to implement the
@ -296,6 +294,10 @@ public:
/// function.
const Type *getVAArgsPromotedType() const;
/// getScalarType - If this is a vector type, return the element type,
/// otherwise return this.
const Type *getScalarType() const;
//===--------------------------------------------------------------------===//
// Type Iteration support
//
@ -336,7 +338,7 @@ public:
void addRef() const {
assert(isAbstract() && "Cannot add a reference to a non-abstract type!");
++RefCount;
sys::AtomicIncrement(&RefCount);
}
void dropRef() const {
@ -345,17 +347,15 @@ public:
// If this is the last PATypeHolder using this object, and there are no
// PATypeHandles using it, the type is dead, delete it now.
if (--RefCount == 0 && AbstractTypeUsers.empty())
sys::cas_flag OldCount = sys::AtomicDecrement(&RefCount);
if (OldCount == 0 && AbstractTypeUsers.empty())
this->destroy();
}
/// addAbstractTypeUser - Notify an abstract type that there is a new user of
/// it. This function is called primarily by the PATypeHandle class.
///
void addAbstractTypeUser(AbstractTypeUser *U) const {
assert(isAbstract() && "addAbstractTypeUser: Current type not abstract!");
AbstractTypeUsers.push_back(U);
}
void addAbstractTypeUser(AbstractTypeUser *U) const;
/// removeAbstractTypeUser - Notify an abstract type that a user of the class
/// no longer has a handle to the type. This function is called primarily by

View File

@ -365,7 +365,7 @@ Constant *llvm::ConstantFoldInstOperands(unsigned Opcode, const Type *DestTy,
if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Ops[0])) {
if (TD && CE->getOpcode() == Instruction::IntToPtr) {
Constant *Input = CE->getOperand(0);
unsigned InWidth = Input->getType()->getPrimitiveSizeInBits();
unsigned InWidth = Input->getType()->getScalarSizeInBits();
if (TD->getPointerSizeInBits() < InWidth) {
Constant *Mask =
ConstantInt::get(APInt::getLowBitsSet(InWidth,
@ -384,7 +384,7 @@ Constant *llvm::ConstantFoldInstOperands(unsigned Opcode, const Type *DestTy,
if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Ops[0])) {
if (TD &&
TD->getPointerSizeInBits() <=
CE->getType()->getPrimitiveSizeInBits()) {
CE->getType()->getScalarSizeInBits()) {
if (CE->getOpcode() == Instruction::PtrToInt) {
Constant *Input = CE->getOperand(0);
Constant *C = FoldBitCast(Input, DestTy, *TD);

View File

@ -352,7 +352,7 @@ Constant *DIFactory::GetStringConstant(const std::string &String) {
const PointerType *DestTy = PointerType::getUnqual(Type::Int8Ty);
// If empty string then use a sbyte* null instead.
// If empty string then use a i8* null instead.
if (String.empty())
return Slot = ConstantPointerNull::get(DestTy);

View File

@ -82,11 +82,8 @@ static bool containsAddRecFromDifferentLoop(SCEVHandle S, Loop *L) {
/// outer loop of the current loop.
static bool getSCEVStartAndStride(const SCEVHandle &SH, Loop *L, Loop *UseLoop,
SCEVHandle &Start, SCEVHandle &Stride,
bool &isSigned,
ScalarEvolution *SE, DominatorTree *DT) {
SCEVHandle TheAddRec = Start; // Initialize to zero.
bool isSExt = false;
bool isZExt = false;
// If the outer level is an AddExpr, the operands are all start values except
// for a nested AddRecExpr.
@ -101,13 +98,6 @@ static bool getSCEVStartAndStride(const SCEVHandle &SH, Loop *L, Loop *UseLoop,
} else {
Start = SE->getAddExpr(Start, AE->getOperand(i));
}
} else if (const SCEVZeroExtendExpr *Z = dyn_cast<SCEVZeroExtendExpr>(SH)) {
TheAddRec = Z->getOperand();
isZExt = true;
} else if (const SCEVSignExtendExpr *S = dyn_cast<SCEVSignExtendExpr>(SH)) {
TheAddRec = S->getOperand();
isSExt = true;
} else if (isa<SCEVAddRecExpr>(SH)) {
TheAddRec = SH;
} else {
@ -120,9 +110,8 @@ static bool getSCEVStartAndStride(const SCEVHandle &SH, Loop *L, Loop *UseLoop,
// Use getSCEVAtScope to attempt to simplify other loops out of
// the picture.
SCEVHandle AddRecStart = AddRec->getStart();
SCEVHandle BetterAddRecStart = SE->getSCEVAtScope(AddRecStart, UseLoop);
if (!isa<SCEVCouldNotCompute>(BetterAddRecStart))
AddRecStart = BetterAddRecStart;
AddRecStart = SE->getSCEVAtScope(AddRecStart, UseLoop);
SCEVHandle AddRecStride = AddRec->getStepRecurrence(*SE);
// FIXME: If Start contains an SCEVAddRecExpr from a different loop, other
// than an outer loop of the current loop, reject it. LSR has no concept of
@ -131,24 +120,20 @@ static bool getSCEVStartAndStride(const SCEVHandle &SH, Loop *L, Loop *UseLoop,
if (containsAddRecFromDifferentLoop(AddRecStart, L))
return false;
if (isSExt || isZExt)
Start = SE->getTruncateExpr(Start, AddRec->getType());
Start = SE->getAddExpr(Start, AddRecStart);
if (!isa<SCEVConstant>(AddRec->getStepRecurrence(*SE))) {
// If stride is an instruction, make sure it dominates the loop preheader.
// Otherwise we could end up with a use before def situation.
// If stride is an instruction, make sure it dominates the loop preheader.
// Otherwise we could end up with a use before def situation.
if (!isa<SCEVConstant>(AddRecStride)) {
BasicBlock *Preheader = L->getLoopPreheader();
if (!AddRec->getStepRecurrence(*SE)->dominates(Preheader, DT))
if (!AddRecStride->dominates(Preheader, DT))
return false;
DOUT << "[" << L->getHeader()->getName()
<< "] Variable stride: " << *AddRec << "\n";
}
Stride = AddRec->getStepRecurrence(*SE);
isSigned = isSExt;
Stride = AddRecStride;
return true;
}
@ -218,9 +203,8 @@ bool IVUsers::AddUsersIfInteresting(Instruction *I) {
Loop *UseLoop = LI->getLoopFor(I->getParent());
SCEVHandle Start = SE->getIntegerSCEV(0, ISE->getType());
SCEVHandle Stride = Start;
bool isSigned = false; // Arbitrary initial value - pacifies compiler.
if (!getSCEVStartAndStride(ISE, L, UseLoop, Start, Stride, isSigned, SE, DT))
if (!getSCEVStartAndStride(ISE, L, UseLoop, Start, Stride, SE, DT))
return false; // Non-reducible symbolic expression, bail out.
SmallPtrSet<Instruction *, 4> UniqueUsers;
@ -271,11 +255,11 @@ bool IVUsers::AddUsersIfInteresting(Instruction *I) {
// The value used will be incremented by the stride more than we are
// expecting, so subtract this off.
SCEVHandle NewStart = SE->getMinusSCEV(Start, Stride);
StrideUses->addUser(NewStart, User, I, isSigned);
StrideUses->addUser(NewStart, User, I);
StrideUses->Users.back().setIsUseOfPostIncrementedValue(true);
DOUT << " USING POSTINC SCEV, START=" << *NewStart<< "\n";
} else {
StrideUses->addUser(Start, User, I, isSigned);
StrideUses->addUser(Start, User, I);
}
}
}
@ -312,7 +296,6 @@ bool IVUsers::runOnLoop(Loop *l, LPPassManager &LPM) {
/// getReplacementExpr - Return a SCEV expression which computes the
/// value of the OperandValToReplace of the given IVStrideUse.
SCEVHandle IVUsers::getReplacementExpr(const IVStrideUse &U) const {
const Type *UseTy = U.getOperandValToReplace()->getType();
// Start with zero.
SCEVHandle RetVal = SE->getIntegerSCEV(0, U.getParent()->Stride->getType());
// Create the basic add recurrence.
@ -326,17 +309,9 @@ SCEVHandle IVUsers::getReplacementExpr(const IVStrideUse &U) const {
// Evaluate the expression out of the loop, if possible.
if (!L->contains(U.getUser()->getParent())) {
SCEVHandle ExitVal = SE->getSCEVAtScope(RetVal, L->getParentLoop());
if (!isa<SCEVCouldNotCompute>(ExitVal) && ExitVal->isLoopInvariant(L))
if (ExitVal->isLoopInvariant(L))
RetVal = ExitVal;
}
// Promote the result to the type of the use.
if (SE->getTypeSizeInBits(RetVal->getType()) !=
SE->getTypeSizeInBits(UseTy)) {
if (U.isSigned())
RetVal = SE->getSignExtendExpr(RetVal, UseTy);
else
RetVal = SE->getZeroExtendExpr(RetVal, UseTy);
}
return RetVal;
}

File diff suppressed because it is too large Load Diff

View File

@ -182,7 +182,8 @@ static bool FactorOutConstant(SCEVHandle &S,
if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(S))
if (const SCEVConstant *C = dyn_cast<SCEVConstant>(M->getOperand(0)))
if (!C->getValue()->getValue().srem(Factor)) {
std::vector<SCEVHandle> NewMulOps(M->getOperands());
const SmallVectorImpl<SCEVHandle> &MOperands = M->getOperands();
SmallVector<SCEVHandle, 4> NewMulOps(MOperands.begin(), MOperands.end());
NewMulOps[0] =
SE.getConstant(C->getValue()->getValue().sdiv(Factor));
S = SE.getMulExpr(NewMulOps);
@ -239,7 +240,7 @@ Value *SCEVExpander::expandAddToGEP(const SCEVHandle *op_begin,
Value *V) {
const Type *ElTy = PTy->getElementType();
SmallVector<Value *, 4> GepIndices;
std::vector<SCEVHandle> Ops(op_begin, op_end);
SmallVector<SCEVHandle, 8> Ops(op_begin, op_end);
bool AnyNonZeroIndices = false;
// Decend down the pointer's type and attempt to convert the other
@ -250,8 +251,8 @@ Value *SCEVExpander::expandAddToGEP(const SCEVHandle *op_begin,
for (;;) {
APInt ElSize = APInt(SE.getTypeSizeInBits(Ty),
ElTy->isSized() ? SE.TD->getTypeAllocSize(ElTy) : 0);
std::vector<SCEVHandle> NewOps;
std::vector<SCEVHandle> ScaledOps;
SmallVector<SCEVHandle, 8> NewOps;
SmallVector<SCEVHandle, 8> ScaledOps;
for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
// Split AddRecs up into parts as either of the parts may be usable
// without the other.
@ -297,9 +298,7 @@ Value *SCEVExpander::expandAddToGEP(const SCEVHandle *op_begin,
GepIndices.push_back(ConstantInt::get(Type::Int32Ty, ElIdx));
ElTy = STy->getTypeAtIndex(ElIdx);
Ops[0] =
SE.getConstant(ConstantInt::get(Ty,
FullOffset -
SL.getElementOffset(ElIdx)));
SE.getConstant(Ty, FullOffset - SL.getElementOffset(ElIdx));
AnyNonZeroIndices = true;
continue;
}
@ -365,7 +364,7 @@ Value *SCEVExpander::visitAddExpr(const SCEVAddExpr *S) {
// comments on expandAddToGEP for details.
if (SE.TD)
if (const PointerType *PTy = dyn_cast<PointerType>(V->getType())) {
const std::vector<SCEVHandle> &Ops = S->getOperands();
const SmallVectorImpl<SCEVHandle> &Ops = S->getOperands();
return expandAddToGEP(&Ops[0], &Ops[Ops.size() - 1],
PTy, Ty, V);
}
@ -432,7 +431,7 @@ static void ExposePointerBase(SCEVHandle &Base, SCEVHandle &Rest,
}
if (const SCEVAddExpr *A = dyn_cast<SCEVAddExpr>(Base)) {
Base = A->getOperand(A->getNumOperands()-1);
std::vector<SCEVHandle> NewAddOps(A->op_begin(), A->op_end());
SmallVector<SCEVHandle, 8> NewAddOps(A->op_begin(), A->op_end());
NewAddOps.back() = Rest;
Rest = SE.getAddExpr(NewAddOps);
ExposePointerBase(Base, Rest, SE);
@ -473,7 +472,8 @@ Value *SCEVExpander::visitAddRecExpr(const SCEVAddRecExpr *S) {
// {X,+,F} --> X + {0,+,F}
if (!S->getStart()->isZero()) {
std::vector<SCEVHandle> NewOps(S->getOperands());
const SmallVectorImpl<SCEVHandle> &SOperands = S->getOperands();
SmallVector<SCEVHandle, 4> NewOps(SOperands.begin(), SOperands.end());
NewOps[0] = SE.getIntegerSCEV(0, Ty);
SCEVHandle Rest = SE.getAddRecExpr(NewOps, L);

View File

@ -52,11 +52,12 @@ void llvm::ComputeMaskedBits(Value *V, const APInt &Mask,
assert(V && "No Value?");
assert(Depth <= MaxDepth && "Limit Search Depth");
unsigned BitWidth = Mask.getBitWidth();
assert((V->getType()->isInteger() || isa<PointerType>(V->getType())) &&
assert((V->getType()->isIntOrIntVector() || isa<PointerType>(V->getType())) &&
"Not integer or pointer type!");
assert((!TD || TD->getTypeSizeInBits(V->getType()) == BitWidth) &&
(!isa<IntegerType>(V->getType()) ||
V->getType()->getPrimitiveSizeInBits() == BitWidth) &&
assert((!TD ||
TD->getTypeSizeInBits(V->getType()->getScalarType()) == BitWidth) &&
(!V->getType()->isIntOrIntVector() ||
V->getType()->getScalarSizeInBits() == BitWidth) &&
KnownZero.getBitWidth() == BitWidth &&
KnownOne.getBitWidth() == BitWidth &&
"V, Mask, KnownOne and KnownZero should have same BitWidth");
@ -67,12 +68,26 @@ void llvm::ComputeMaskedBits(Value *V, const APInt &Mask,
KnownZero = ~KnownOne & Mask;
return;
}
// Null is all-zeros.
if (isa<ConstantPointerNull>(V)) {
// Null and aggregate-zero are all-zeros.
if (isa<ConstantPointerNull>(V) ||
isa<ConstantAggregateZero>(V)) {
KnownOne.clear();
KnownZero = Mask;
return;
}
// Handle a constant vector by taking the intersection of the known bits of
// each element.
if (ConstantVector *CV = dyn_cast<ConstantVector>(V)) {
KnownZero.set(); KnownOne.set();
for (unsigned i = 0, e = CV->getNumOperands(); i != e; ++i) {
APInt KnownZero2(BitWidth, 0), KnownOne2(BitWidth, 0);
ComputeMaskedBits(CV->getOperand(i), Mask, KnownZero2, KnownOne2,
TD, Depth);
KnownZero &= KnownZero2;
KnownOne &= KnownOne2;
}
return;
}
// The address of an aligned GlobalValue has trailing zeros.
if (GlobalValue *GV = dyn_cast<GlobalValue>(V)) {
unsigned Align = GV->getAlignment();
@ -218,7 +233,7 @@ void llvm::ComputeMaskedBits(Value *V, const APInt &Mask,
const Type *SrcTy = I->getOperand(0)->getType();
unsigned SrcBitWidth = TD ?
TD->getTypeSizeInBits(SrcTy) :
SrcTy->getPrimitiveSizeInBits();
SrcTy->getScalarSizeInBits();
APInt MaskIn(Mask);
MaskIn.zextOrTrunc(SrcBitWidth);
KnownZero.zextOrTrunc(SrcBitWidth);
@ -480,7 +495,7 @@ void llvm::ComputeMaskedBits(Value *V, const APInt &Mask,
// Handle array index arithmetic.
const Type *IndexedTy = GTI.getIndexedType();
if (!IndexedTy->isSized()) return;
unsigned GEPOpiBits = Index->getType()->getPrimitiveSizeInBits();
unsigned GEPOpiBits = Index->getType()->getScalarSizeInBits();
uint64_t TypeSize = TD ? TD->getTypeAllocSize(IndexedTy) : 1;
LocalMask = APInt::getAllOnesValue(GEPOpiBits);
LocalKnownZero = LocalKnownOne = APInt(GEPOpiBits, 0);
@ -609,8 +624,8 @@ bool llvm::MaskedValueIsZero(Value *V, const APInt &Mask,
/// 'Op' must have a scalar integer type.
///
unsigned llvm::ComputeNumSignBits(Value *V, TargetData *TD, unsigned Depth) {
const IntegerType *Ty = cast<IntegerType>(V->getType());
unsigned TyBits = Ty->getBitWidth();
const Type *Ty = V->getType();
unsigned TyBits = Ty->getScalarSizeInBits();
unsigned Tmp, Tmp2;
unsigned FirstAnswer = 1;

View File

@ -526,6 +526,10 @@ lltok::Kind LLLexer::LexIdentifier() {
KEYWORD(coldcc);
KEYWORD(x86_stdcallcc);
KEYWORD(x86_fastcallcc);
KEYWORD(arm_apcscc);
KEYWORD(arm_aapcscc);
KEYWORD(arm_aapcs_vfpcc);
KEYWORD(cc);
KEYWORD(c);

View File

@ -808,8 +808,11 @@ bool LLParser::ParseOptionalVisibility(unsigned &Res) {
/// ::= 'coldcc'
/// ::= 'x86_stdcallcc'
/// ::= 'x86_fastcallcc'
/// ::= 'arm_apcscc'
/// ::= 'arm_aapcscc'
/// ::= 'arm_aapcs_vfpcc'
/// ::= 'cc' UINT
///
///
bool LLParser::ParseOptionalCallingConv(unsigned &CC) {
switch (Lex.getKind()) {
default: CC = CallingConv::C; return false;
@ -818,6 +821,9 @@ bool LLParser::ParseOptionalCallingConv(unsigned &CC) {
case lltok::kw_coldcc: CC = CallingConv::Cold; break;
case lltok::kw_x86_stdcallcc: CC = CallingConv::X86_StdCall; break;
case lltok::kw_x86_fastcallcc: CC = CallingConv::X86_FastCall; break;
case lltok::kw_arm_apcscc: CC = CallingConv::ARM_APCS; break;
case lltok::kw_arm_aapcscc: CC = CallingConv::ARM_AAPCS; break;
case lltok::kw_arm_aapcs_vfpcc:CC = CallingConv::ARM_AAPCS_VFP; break;
case lltok::kw_cc: Lex.Lex(); return ParseUInt32(CC);
}
Lex.Lex();
@ -1743,7 +1749,7 @@ bool LLParser::ParseValID(ValID &ID) {
Lex.Lex();
if (ParseToken(lltok::lparen, "expected '(' after constantexpr cast") ||
ParseGlobalTypeAndValue(SrcVal) ||
ParseToken(lltok::kw_to, "expected 'to' int constantexpr cast") ||
ParseToken(lltok::kw_to, "expected 'to' in constantexpr cast") ||
ParseType(DestTy) ||
ParseToken(lltok::rparen, "expected ')' at end of constantexpr cast"))
return true;
@ -3145,7 +3151,7 @@ bool LLParser::ParseFree(Instruction *&Inst, PerFunctionState &PFS) {
}
/// ParseLoad
/// ::= 'volatile'? 'load' TypeAndValue (',' 'align' uint)?
/// ::= 'volatile'? 'load' TypeAndValue (',' 'align' i32)?
bool LLParser::ParseLoad(Instruction *&Inst, PerFunctionState &PFS,
bool isVolatile) {
Value *Val; LocTy Loc;
@ -3163,7 +3169,7 @@ bool LLParser::ParseLoad(Instruction *&Inst, PerFunctionState &PFS,
}
/// ParseStore
/// ::= 'volatile'? 'store' TypeAndValue ',' TypeAndValue (',' 'align' uint)?
/// ::= 'volatile'? 'store' TypeAndValue ',' TypeAndValue (',' 'align' i32)?
bool LLParser::ParseStore(Instruction *&Inst, PerFunctionState &PFS,
bool isVolatile) {
Value *Val, *Ptr; LocTy Loc, PtrLoc;
@ -3186,7 +3192,7 @@ bool LLParser::ParseStore(Instruction *&Inst, PerFunctionState &PFS,
}
/// ParseGetResult
/// ::= 'getresult' TypeAndValue ',' uint
/// ::= 'getresult' TypeAndValue ',' i32
/// FIXME: Remove support for getresult in LLVM 3.0
bool LLParser::ParseGetResult(Instruction *&Inst, PerFunctionState &PFS) {
Value *Val; LocTy ValLoc, EltLoc;

View File

@ -60,7 +60,9 @@ namespace lltok {
kw_gc,
kw_c,
kw_cc, kw_ccc, kw_fastcc, kw_coldcc, kw_x86_stdcallcc, kw_x86_fastcallcc,
kw_cc, kw_ccc, kw_fastcc, kw_coldcc,
kw_x86_stdcallcc, kw_x86_fastcallcc,
kw_arm_apcscc, kw_arm_aapcscc, kw_arm_aapcs_vfpcc,
kw_signext,
kw_zeroext,

View File

@ -2040,14 +2040,13 @@ void BitcodeReader::dematerializeFunction(Function *F) {
Module *BitcodeReader::materializeModule(std::string *ErrInfo) {
for (DenseMap<Function*, std::pair<uint64_t, unsigned> >::iterator I =
DeferredFunctionInfo.begin(), E = DeferredFunctionInfo.end(); I != E;
++I) {
Function *F = I->first;
// Iterate over the module, deserializing any functions that are still on
// disk.
for (Module::iterator F = TheModule->begin(), E = TheModule->end();
F != E; ++F)
if (F->hasNotBeenReadFromBitcode() &&
materializeFunction(F, ErrInfo))
return 0;
}
// Upgrade any intrinsic calls that slipped through (should not happen!) and
// delete the old functions to clean up. We can't do this unless the entire
@ -2123,7 +2122,7 @@ Module *llvm::ParseBitcodeFile(MemoryBuffer *Buffer, std::string *ErrMsg){
// is run.
if (M)
M = R->releaseModule(ErrMsg);
delete R;
return M;
}

View File

@ -152,6 +152,9 @@ void AsmPrinter::getAnalysisUsage(AnalysisUsage &AU) const {
bool AsmPrinter::doInitialization(Module &M) {
Mang = new Mangler(M, TAI->getGlobalPrefix(), TAI->getPrivateGlobalPrefix());
if (TAI->doesAllowQuotesInName())
Mang->setUseQuotes(true);
GCModuleInfo *MI = getAnalysisIfAvailable<GCModuleInfo>();
assert(MI && "AsmPrinter didn't require GCModuleInfo?");
@ -174,9 +177,17 @@ bool AsmPrinter::doInitialization(Module &M) {
SwitchToDataSection(""); // Reset back to no section.
MachineModuleInfo *MMI = getAnalysisIfAvailable<MachineModuleInfo>();
if (MMI) MMI->AnalyzeModule(M);
DW = getAnalysisIfAvailable<DwarfWriter>();
if (TAI->doesSupportDebugInformation()
|| TAI->doesSupportExceptionHandling()) {
MachineModuleInfo *MMI = getAnalysisIfAvailable<MachineModuleInfo>();
if (MMI) {
MMI->AnalyzeModule(M);
DW = getAnalysisIfAvailable<DwarfWriter>();
if (DW)
DW->BeginModule(&M, MMI, O, this, TAI);
}
}
return false;
}
@ -347,8 +358,9 @@ void AsmPrinter::EmitJumpTableInfo(MachineJumpTableInfo *MJTI,
const char* JumpTableDataSection = TAI->getJumpTableDataSection();
const Function *F = MF.getFunction();
unsigned SectionFlags = TAI->SectionFlagsForGlobal(F);
bool JTInDiffSection = false;
if ((IsPic && !(LoweringInfo && LoweringInfo->usesGlobalOffsetTable())) ||
!JumpTableDataSection ||
!JumpTableDataSection ||
SectionFlags & SectionFlags::Linkonce) {
// In PIC mode, we need to emit the jump table to the same section as the
// function body itself, otherwise the label differences won't make sense.
@ -357,6 +369,7 @@ void AsmPrinter::EmitJumpTableInfo(MachineJumpTableInfo *MJTI,
SwitchToSection(TAI->SectionForGlobal(F));
} else {
SwitchToDataSection(JumpTableDataSection);
JTInDiffSection = true;
}
EmitAlignment(Log2_32(MJTI->getAlignment()));
@ -380,8 +393,10 @@ void AsmPrinter::EmitJumpTableInfo(MachineJumpTableInfo *MJTI,
// before each jump table. The first label is never referenced, but tells
// the assembler and linker the extents of the jump table object. The
// second label is actually referenced by the code.
if (const char *JTLabelPrefix = TAI->getJumpTableSpecialLabelPrefix())
O << JTLabelPrefix << "JTI" << getFunctionNumber() << '_' << i << ":\n";
if (JTInDiffSection) {
if (const char *JTLabelPrefix = TAI->getJumpTableSpecialLabelPrefix())
O << JTLabelPrefix << "JTI" << getFunctionNumber() << '_' << i << ":\n";
}
O << TAI->getPrivateGlobalPrefix() << "JTI" << getFunctionNumber()
<< '_' << i << ":\n";
@ -502,7 +517,7 @@ const GlobalValue * AsmPrinter::findGlobalValue(const Constant *CV) {
void AsmPrinter::EmitLLVMUsedList(Constant *List) {
const char *Directive = TAI->getUsedDirective();
// Should be an array of 'sbyte*'.
// Should be an array of 'i8*'.
ConstantArray *InitList = dyn_cast<ConstantArray>(List);
if (InitList == 0) return;

View File

@ -1757,6 +1757,9 @@ unsigned DwarfDebug::RecordInlinedFnStart(DISubprogram &SP, DICompileUnit CU,
if (TimePassesIsEnabled)
DebugTimer->startTimer();
CompileUnit *Unit = MainCU;
if (!Unit)
Unit = &FindCompileUnit(SP.getCompileUnit());
GlobalVariable *GV = SP.getGV();
DenseMap<const GlobalVariable *, DbgScope *>::iterator
II = AbstractInstanceRootMap.find(GV);
@ -1767,7 +1770,6 @@ unsigned DwarfDebug::RecordInlinedFnStart(DISubprogram &SP, DICompileUnit CU,
DbgScope *Scope = new DbgScope(NULL, DIDescriptor(GV));
// Get the compile unit context.
CompileUnit *Unit = &FindCompileUnit(SP.getCompileUnit());
DIE *SPDie = Unit->getDieMapSlotFor(GV);
if (!SPDie)
SPDie = CreateSubprogramDIE(Unit, SP, false, true);
@ -1789,7 +1791,6 @@ unsigned DwarfDebug::RecordInlinedFnStart(DISubprogram &SP, DICompileUnit CU,
// Create a concrete inlined instance for this inlined function.
DbgConcreteScope *ConcreteScope = new DbgConcreteScope(DIDescriptor(GV));
DIE *ScopeDie = new DIE(dwarf::DW_TAG_inlined_subroutine);
CompileUnit *Unit = &FindCompileUnit(SP.getCompileUnit());
ScopeDie->setAbstractCompileUnit(Unit);
DIE *Origin = Unit->getDieMapSlotFor(GV);
@ -1850,7 +1851,14 @@ unsigned DwarfDebug::RecordInlinedFnEnd(DISubprogram &SP) {
}
SmallVector<DbgScope *, 8> &Scopes = I->second;
assert(!Scopes.empty() && "We should have at least one debug scope!");
if (Scopes.empty()) {
// Returned ID is 0 if this is unbalanced "end of inlined
// scope". This could happen if optimizer eats dbg intrinsics
// or "beginning of inlined scope" is not recoginized due to
// missing location info. In such cases, ignore this region.end.
return 0;
}
DbgScope *Scope = Scopes.back(); Scopes.pop_back();
unsigned ID = MMI->NextLabelID();
MMI->RecordUsedDbgLabel(ID);
@ -1987,8 +1995,8 @@ void DwarfDebug::EmitInitial() {
Asm->SwitchToDataSection(TAI->getDwarfARangesSection());
EmitLabel("section_aranges", 0);
if (TAI->doesSupportMacInfoSection()) {
Asm->SwitchToDataSection(TAI->getDwarfMacInfoSection());
if (const char *LineInfoDirective = TAI->getDwarfMacroInfoSection()) {
Asm->SwitchToDataSection(LineInfoDirective);
EmitLabel("section_macinfo", 0);
}
@ -2534,9 +2542,9 @@ void DwarfDebug::EmitDebugRanges() {
/// EmitDebugMacInfo - Emit visible names into a debug macinfo section.
///
void DwarfDebug::EmitDebugMacInfo() {
if (TAI->doesSupportMacInfoSection()) {
if (const char *LineInfoDirective = TAI->getDwarfMacroInfoSection()) {
// Start the dwarf macinfo section.
Asm->SwitchToDataSection(TAI->getDwarfMacInfoSection());
Asm->SwitchToDataSection(LineInfoDirective);
Asm->EOL();
}
}

View File

@ -190,7 +190,7 @@ void Dwarf::EmitFrameMoves(const char *BaseLabel, unsigned BaseLabelID,
Asm->EmitULEB128Bytes(Offset);
Asm->EOL("Offset");
} else {
assert(0 && "Machine move no supported yet.");
assert(0 && "Machine move not supported yet.");
}
} else if (Src.isReg() &&
Src.getReg() == MachineLocation::VirtualFP) {
@ -200,7 +200,7 @@ void Dwarf::EmitFrameMoves(const char *BaseLabel, unsigned BaseLabelID,
Asm->EmitULEB128Bytes(RI->getDwarfRegNum(Dst.getReg(), isEH));
Asm->EOL("Register");
} else {
assert(0 && "Machine move no supported yet.");
assert(0 && "Machine move not supported yet.");
}
} else {
unsigned Reg = RI->getDwarfRegNum(Src.getReg(), isEH);

View File

@ -547,7 +547,11 @@ void IfConverter::ScanInstructions(BBInfo &BBI) {
// fallthrough.
if (!BBI.FalseBB)
BBI.FalseBB = findFalseBlock(BBI.BB, BBI.TrueBB);
assert(BBI.FalseBB && "Expected to find the fallthrough block!");
if (!BBI.FalseBB) {
// Malformed bcc? True and false blocks are the same?
BBI.IsUnpredicable = true;
return;
}
}
// Then scan all the instructions.
@ -663,6 +667,13 @@ IfConverter::BBInfo &IfConverter::AnalyzeBlock(MachineBasicBlock *BB,
return BBI;
}
// Do not ifcvt if true and false fallthrough blocks are the same.
if (!BBI.FalseBB) {
BBI.IsBeingAnalyzed = false;
BBI.IsAnalyzed = true;
return BBI;
}
BBInfo &TrueBBI = AnalyzeBlock(BBI.TrueBB, Tokens);
BBInfo &FalseBBI = AnalyzeBlock(BBI.FalseBB, Tokens);

View File

@ -32,10 +32,12 @@ void LazyLiveness::computeBackedgeChain(MachineFunction& mf,
calculated.set(preorder[MBB]);
for (SparseBitVector<128>::iterator I = tmp.begin(); I != tmp.end(); ++I) {
assert(rev_preorder.size() > *I && "Unknown block!");
MachineBasicBlock* SrcMBB = rev_preorder[*I];
for (MachineBasicBlock::succ_iterator SI = SrcMBB->succ_begin();
SI != SrcMBB->succ_end(); ++SI) {
for (MachineBasicBlock::succ_iterator SI = SrcMBB->succ_begin(),
SE = SrcMBB->succ_end(); SI != SE; ++SI) {
MachineBasicBlock* TgtMBB = *SI;
if (backedges.count(std::make_pair(SrcMBB, TgtMBB)) &&
@ -44,7 +46,8 @@ void LazyLiveness::computeBackedgeChain(MachineFunction& mf,
computeBackedgeChain(mf, TgtMBB);
tv[MBB].set(preorder[TgtMBB]);
tv[MBB] |= tv[TgtMBB];
SparseBitVector<128> right = tv[TgtMBB];
tv[MBB] |= right;
}
}
@ -60,6 +63,12 @@ bool LazyLiveness::runOnMachineFunction(MachineFunction &mf) {
backedge_target.clear();
calculated.clear();
preorder.clear();
rev_preorder.clear();
rv.resize(mf.size());
tv.resize(mf.size());
preorder.resize(mf.size());
rev_preorder.reserve(mf.size());
MRI = &mf.getRegInfo();
MachineDominatorTree& MDT = getAnalysis<MachineDominatorTree>();
@ -106,8 +115,8 @@ bool LazyLiveness::runOnMachineFunction(MachineFunction &mf) {
for (MachineBasicBlock::succ_iterator SI = (*POI)->succ_begin(),
SE = (*POI)->succ_end(); SI != SE; ++SI)
if (!backedges.count(std::make_pair(*POI, *SI)) && tv.count(*SI)) {
SparseBitVector<128>& PBV = tv[*POI];
PBV = tv[*SI];
SparseBitVector<128> right = tv[*SI];
tv[*POI] |= right;
}
for (po_iterator<MachineBasicBlock*> POI = po_begin(&*mf.begin()),

View File

@ -19,6 +19,7 @@
//===----------------------------------------------------------------------===//
#include "llvm/CodeGen/LiveInterval.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/SmallSet.h"
#include "llvm/ADT/STLExtras.h"
@ -305,9 +306,9 @@ void LiveInterval::removeRange(unsigned Start, unsigned End,
VNInfo *VNI = valnos.back();
valnos.pop_back();
VNI->~VNInfo();
} while (!valnos.empty() && valnos.back()->def == ~1U);
} while (!valnos.empty() && valnos.back()->isUnused());
} else {
ValNo->def = ~1U;
ValNo->setIsUnused(true);
}
}
}
@ -353,9 +354,9 @@ void LiveInterval::removeValNo(VNInfo *ValNo) {
VNInfo *VNI = valnos.back();
valnos.pop_back();
VNI->~VNInfo();
} while (!valnos.empty() && valnos.back()->def == ~1U);
} while (!valnos.empty() && valnos.back()->isUnused());
} else {
ValNo->def = ~1U;
ValNo->setIsUnused(true);
}
}
@ -371,9 +372,8 @@ void LiveInterval::scaleNumbering(unsigned factor) {
// Scale VNI info.
for (vni_iterator VNI = vni_begin(), VNIE = vni_end(); VNI != VNIE; ++VNI) {
VNInfo *vni = *VNI;
if (vni->def != ~0U && vni->def != ~1U) {
vni->def = InstrSlots::scale(vni->def, factor);
}
vni->def = InstrSlots::scale(vni->def, factor);
for (unsigned i = 0; i < vni->kills.size(); ++i) {
if (vni->kills[i] != 0)
@ -421,13 +421,13 @@ VNInfo *LiveInterval::findDefinedVNInfo(unsigned DefIdxOrReg) const {
return VNI;
}
/// join - Join two live intervals (this, and other) together. This applies
/// mappings to the value numbers in the LHS/RHS intervals as specified. If
/// the intervals are not joinable, this aborts.
void LiveInterval::join(LiveInterval &Other, const int *LHSValNoAssignments,
const int *RHSValNoAssignments,
SmallVector<VNInfo*, 16> &NewVNInfo) {
SmallVector<VNInfo*, 16> &NewVNInfo,
MachineRegisterInfo *MRI) {
// Determine if any of our live range values are mapped. This is uncommon, so
// we want to avoid the interval scan if not.
bool MustMapCurValNos = false;
@ -502,8 +502,18 @@ void LiveInterval::join(LiveInterval &Other, const int *LHSValNoAssignments,
}
weight += Other.weight;
if (Other.preference && !preference)
preference = Other.preference;
// Update regalloc hint if currently there isn't one.
if (TargetRegisterInfo::isVirtualRegister(reg) &&
TargetRegisterInfo::isVirtualRegister(Other.reg)) {
std::pair<unsigned, unsigned> Hint = MRI->getRegAllocationHint(reg);
if (Hint.first == 0 && Hint.second == 0) {
std::pair<unsigned, unsigned> OtherHint =
MRI->getRegAllocationHint(Other.reg);
if (OtherHint.first || OtherHint.second)
MRI->setRegAllocationHint(reg, OtherHint.first, OtherHint.second);
}
}
}
/// MergeRangesInAsValue - Merge all of the intervals in RHS into this live
@ -582,9 +592,9 @@ void LiveInterval::MergeValueInAsValue(const LiveInterval &RHS,
VNInfo *VNI = valnos.back();
valnos.pop_back();
VNI->~VNInfo();
} while (!valnos.empty() && valnos.back()->def == ~1U);
} while (!valnos.empty() && valnos.back()->isUnused());
} else {
V1->def = ~1U;
V1->setIsUnused(true);
}
}
}
@ -611,7 +621,7 @@ void LiveInterval::MergeInClobberRanges(const LiveInterval &Clobbers,
else if (UnusedValNo)
ClobberValNo = UnusedValNo;
else {
UnusedValNo = ClobberValNo = getNextValue(~0U, 0, VNInfoAllocator);
UnusedValNo = ClobberValNo = getNextValue(0, 0, false, VNInfoAllocator);
ValNoMaps.insert(std::make_pair(I->valno, ClobberValNo));
}
@ -664,7 +674,7 @@ void LiveInterval::MergeInClobberRange(unsigned Start, unsigned End,
BumpPtrAllocator &VNInfoAllocator) {
// Find a value # to use for the clobber ranges. If there is already a value#
// for unknown values, use it.
VNInfo *ClobberValNo = getNextValue(~0U, 0, VNInfoAllocator);
VNInfo *ClobberValNo = getNextValue(0, 0, false, VNInfoAllocator);
iterator IP = begin();
IP = std::upper_bound(IP, end(), Start);
@ -747,24 +757,26 @@ VNInfo* LiveInterval::MergeValueNumberInto(VNInfo *V1, VNInfo *V2) {
VNInfo *VNI = valnos.back();
valnos.pop_back();
VNI->~VNInfo();
} while (valnos.back()->def == ~1U);
} while (valnos.back()->isUnused());
} else {
V1->def = ~1U;
V1->setIsUnused(true);
}
return V2;
}
void LiveInterval::Copy(const LiveInterval &RHS,
MachineRegisterInfo *MRI,
BumpPtrAllocator &VNInfoAllocator) {
ranges.clear();
valnos.clear();
preference = RHS.preference;
std::pair<unsigned, unsigned> Hint = MRI->getRegAllocationHint(RHS.reg);
MRI->setRegAllocationHint(reg, Hint.first, Hint.second);
weight = RHS.weight;
for (unsigned i = 0, e = RHS.getNumValNums(); i != e; ++i) {
const VNInfo *VNI = RHS.getValNumInfo(i);
VNInfo *NewVNI = getNextValue(~0U, 0, VNInfoAllocator);
copyValNumInfo(NewVNI, VNI);
createValueCopy(VNI, VNInfoAllocator);
}
for (unsigned i = 0, e = RHS.ranges.size(); i != e; ++i) {
const LiveRange &LR = RHS.ranges[i];
@ -816,22 +828,22 @@ void LiveInterval::print(std::ostream &OS,
const VNInfo *vni = *i;
if (vnum) OS << " ";
OS << vnum << "@";
if (vni->def == ~1U) {
if (vni->isUnused()) {
OS << "x";
} else {
if (vni->def == ~0U)
if (!vni->isDefAccurate())
OS << "?";
else
OS << vni->def;
unsigned ee = vni->kills.size();
if (ee || vni->hasPHIKill) {
if (ee || vni->hasPHIKill()) {
OS << "-(";
for (unsigned j = 0; j != ee; ++j) {
OS << vni->kills[j];
if (j != ee-1)
OS << " ";
}
if (vni->hasPHIKill) {
if (vni->hasPHIKill()) {
if (ee)
OS << " ";
OS << "phi";

View File

@ -199,7 +199,7 @@ void LiveIntervals::computeNumbering() {
// Remap the VNInfo def index, which works the same as the
// start indices above. VN's with special sentinel defs
// don't need to be remapped.
if (vni->def != ~0U && vni->def != ~1U) {
if (vni->isDefAccurate() && !vni->isUnused()) {
unsigned index = vni->def / InstrSlots::NUM;
unsigned offset = vni->def % InstrSlots::NUM;
if (offset == InstrSlots::LOAD) {
@ -447,7 +447,7 @@ void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb,
tii_->isMoveInstr(*mi, SrcReg, DstReg, SrcSubReg, DstSubReg))
CopyMI = mi;
// Earlyclobbers move back one.
ValNo = interval.getNextValue(defIndex, CopyMI, VNInfoAllocator);
ValNo = interval.getNextValue(defIndex, CopyMI, true, VNInfoAllocator);
assert(ValNo->id == 0 && "First value in interval is not 0?");
@ -539,13 +539,15 @@ void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb,
// The new value number (#1) is defined by the instruction we claimed
// defined value #0.
VNInfo *ValNo = interval.getNextValue(OldValNo->def, OldValNo->copy,
false, // update at *
VNInfoAllocator);
ValNo->setFlags(OldValNo->getFlags()); // * <- updating here
// Value#0 is now defined by the 2-addr instruction.
OldValNo->def = RedefIndex;
OldValNo->copy = 0;
if (MO.isEarlyClobber())
OldValNo->redefByEC = true;
OldValNo->setHasRedefByEC(true);
// Add the new live interval which replaces the range for the input copy.
LiveRange LR(DefIndex, RedefIndex, ValNo);
@ -577,12 +579,14 @@ void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb,
DOUT << " Removing [" << Start << "," << End << "] from: ";
interval.print(DOUT, tri_); DOUT << "\n";
interval.removeRange(Start, End);
VNI->hasPHIKill = true;
VNI->setHasPHIKill(true);
DOUT << " RESULT: "; interval.print(DOUT, tri_);
// Replace the interval with one of a NEW value number. Note that this
// value number isn't actually defined by an instruction, weird huh? :)
LiveRange LR(Start, End, interval.getNextValue(~0, 0, VNInfoAllocator));
LiveRange LR(Start, End,
interval.getNextValue(mbb->getNumber(), 0, false, VNInfoAllocator));
LR.valno->setIsPHIDef(true);
DOUT << " replace range with " << LR;
interval.addRange(LR);
interval.addKill(LR.valno, End);
@ -604,13 +608,13 @@ void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb,
mi->getOpcode() == TargetInstrInfo::SUBREG_TO_REG ||
tii_->isMoveInstr(*mi, SrcReg, DstReg, SrcSubReg, DstSubReg))
CopyMI = mi;
ValNo = interval.getNextValue(defIndex, CopyMI, VNInfoAllocator);
ValNo = interval.getNextValue(defIndex, CopyMI, true, VNInfoAllocator);
unsigned killIndex = getMBBEndIdx(mbb) + 1;
LiveRange LR(defIndex, killIndex, ValNo);
interval.addRange(LR);
interval.addKill(ValNo, killIndex);
ValNo->hasPHIKill = true;
ValNo->setHasPHIKill(true);
DOUT << " +" << LR;
}
}
@ -692,9 +696,9 @@ exit:
LiveInterval::iterator OldLR = interval.FindLiveRangeContaining(start);
bool Extend = OldLR != interval.end();
VNInfo *ValNo = Extend
? OldLR->valno : interval.getNextValue(start, CopyMI, VNInfoAllocator);
? OldLR->valno : interval.getNextValue(start, CopyMI, true, VNInfoAllocator);
if (MO.isEarlyClobber() && Extend)
ValNo->redefByEC = true;
ValNo->setHasRedefByEC(true);
LiveRange LR(start, end, ValNo);
interval.addRange(LR);
interval.addKill(LR.valno, end);
@ -750,7 +754,7 @@ void LiveIntervals::handleLiveInRegister(MachineBasicBlock *MBB,
DOUT << " killed";
end = getUseIndex(baseIndex) + 1;
SeenDefUse = true;
goto exit;
break;
} else if (mi->modifiesRegister(interval.reg, tri_)) {
// Another instruction redefines the register before it is ever read.
// Then the register is essentially dead at the instruction that defines
@ -759,7 +763,7 @@ void LiveIntervals::handleLiveInRegister(MachineBasicBlock *MBB,
DOUT << " dead";
end = getDefIndex(start) + 1;
SeenDefUse = true;
goto exit;
break;
}
baseIndex += InstrSlots::NUM;
@ -771,7 +775,6 @@ void LiveIntervals::handleLiveInRegister(MachineBasicBlock *MBB,
}
}
exit:
// Live-in register might not be used at all.
if (!SeenDefUse) {
if (isAlias) {
@ -783,7 +786,11 @@ exit:
}
}
LiveRange LR(start, end, interval.getNextValue(~0U, 0, VNInfoAllocator));
VNInfo *vni =
interval.getNextValue(MBB->getNumber(), 0, false, VNInfoAllocator);
vni->setIsPHIDef(true);
LiveRange LR(start, end, vni);
interval.addRange(LR);
interval.addKill(LR.valno, end);
DOUT << " +" << LR << '\n';
@ -896,7 +903,7 @@ LiveInterval* LiveIntervals::createInterval(unsigned reg) {
/// managing the allocated memory.
LiveInterval* LiveIntervals::dupInterval(LiveInterval *li) {
LiveInterval *NewLI = createInterval(li->reg);
NewLI->Copy(*li, getVNInfoAllocator());
NewLI->Copy(*li, mri_, getVNInfoAllocator());
return NewLI;
}
@ -1099,13 +1106,12 @@ bool LiveIntervals::isReMaterializable(const LiveInterval &li,
for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
i != e; ++i) {
const VNInfo *VNI = *i;
unsigned DefIdx = VNI->def;
if (DefIdx == ~1U)
if (VNI->isUnused())
continue; // Dead val#.
// Is the def for the val# rematerializable?
if (DefIdx == ~0u)
if (!VNI->isDefAccurate())
return false;
MachineInstr *ReMatDefMI = getInstructionFromIndex(DefIdx);
MachineInstr *ReMatDefMI = getInstructionFromIndex(VNI->def);
bool DefIsLoad = false;
if (!ReMatDefMI ||
!isReMaterializable(li, VNI, ReMatDefMI, SpillIs, DefIsLoad))
@ -1450,7 +1456,7 @@ rewriteInstructionForSpills(const LiveInterval &li, const VNInfo *VNI,
if (HasUse) {
if (CreatedNewVReg) {
LiveRange LR(getLoadIndex(index), getUseIndex(index)+1,
nI.getNextValue(~0U, 0, VNInfoAllocator));
nI.getNextValue(0, 0, false, VNInfoAllocator));
DOUT << " +" << LR;
nI.addRange(LR);
} else {
@ -1464,7 +1470,7 @@ rewriteInstructionForSpills(const LiveInterval &li, const VNInfo *VNI,
}
if (HasDef) {
LiveRange LR(getDefIndex(index), getStoreIndex(index),
nI.getNextValue(~0U, 0, VNInfoAllocator));
nI.getNextValue(0, 0, false, VNInfoAllocator));
DOUT << " +" << LR;
nI.addRange(LR);
}
@ -1840,14 +1846,14 @@ addIntervalsForSpillsFast(const LiveInterval &li,
unsigned index = getInstructionIndex(MI);
if (HasUse) {
LiveRange LR(getLoadIndex(index), getUseIndex(index),
nI.getNextValue(~0U, 0, getVNInfoAllocator()));
nI.getNextValue(0, 0, false, getVNInfoAllocator()));
DOUT << " +" << LR;
nI.addRange(LR);
vrm.addRestorePoint(NewVReg, MI);
}
if (HasDef) {
LiveRange LR(getDefIndex(index), getStoreIndex(index),
nI.getNextValue(~0U, 0, getVNInfoAllocator()));
nI.getNextValue(0, 0, false, getVNInfoAllocator()));
DOUT << " +" << LR;
nI.addRange(LR);
vrm.addSpillPoint(NewVReg, true, MI);
@ -1961,12 +1967,11 @@ addIntervalsForSpills(const LiveInterval &li,
i != e; ++i) {
const VNInfo *VNI = *i;
unsigned VN = VNI->id;
unsigned DefIdx = VNI->def;
if (DefIdx == ~1U)
if (VNI->isUnused())
continue; // Dead val#.
// Is the def for the val# rematerializable?
MachineInstr *ReMatDefMI = (DefIdx == ~0u)
? 0 : getInstructionFromIndex(DefIdx);
MachineInstr *ReMatDefMI = VNI->isDefAccurate()
? getInstructionFromIndex(VNI->def) : 0;
bool dummy;
if (ReMatDefMI && isReMaterializable(li, VNI, ReMatDefMI, SpillIs, dummy)) {
// Remember how to remat the def of this val#.
@ -1977,7 +1982,7 @@ addIntervalsForSpills(const LiveInterval &li,
ReMatDefs[VN] = Clone;
bool CanDelete = true;
if (VNI->hasPHIKill) {
if (VNI->hasPHIKill()) {
// A kill is a phi node, not all of its uses can be rematerialized.
// It must not be deleted.
CanDelete = false;
@ -2287,8 +2292,8 @@ LiveRange LiveIntervals::addLiveRangeToEndOfBlock(unsigned reg,
LiveInterval& Interval = getOrCreateInterval(reg);
VNInfo* VN = Interval.getNextValue(
getInstructionIndex(startInst) + InstrSlots::DEF,
startInst, getVNInfoAllocator());
VN->hasPHIKill = true;
startInst, true, getVNInfoAllocator());
VN->setHasPHIKill(true);
VN->kills.push_back(getMBBEndIdx(startInst->getParent()));
LiveRange LR(getInstructionIndex(startInst) + InstrSlots::DEF,
getMBBEndIdx(startInst->getParent()) + 1, VN);

View File

@ -359,12 +359,11 @@ bool LiveVariables::HandlePhysRegKill(unsigned Reg, MachineInstr *MI) {
// That is, unless we are currently processing the last reference itself.
LastRefOrPartRef->addRegisterDead(Reg, TRI, true);
/* Partial uses. Mark register def dead and add implicit def of
sub-registers which are used.
FIXME: LiveIntervalAnalysis can't handle this yet!
EAX<dead> = op AL<imp-def>
That is, EAX def is dead but AL def extends pass it.
Enable this after live interval analysis is fixed to improve codegen!
// Partial uses. Mark register def dead and add implicit def of
// sub-registers which are used.
// EAX<dead> = op AL<imp-def>
// That is, EAX def is dead but AL def extends pass it.
// Enable this after live interval analysis is fixed to improve codegen!
else if (!PhysRegUse[Reg]) {
PhysRegDef[Reg]->addRegisterDead(Reg, TRI, true);
for (const unsigned *SubRegs = TRI->getSubRegisters(Reg);
@ -377,7 +376,7 @@ bool LiveVariables::HandlePhysRegKill(unsigned Reg, MachineInstr *MI) {
PartUses.erase(*SS);
}
}
} */
}
else
LastRefOrPartRef->addRegisterKilled(Reg, TRI, true);
return true;

View File

@ -16,6 +16,7 @@ using namespace llvm;
MachineRegisterInfo::MachineRegisterInfo(const TargetRegisterInfo &TRI) {
VRegInfo.reserve(256);
RegAllocHints.reserve(256);
RegClass2VRegMap.resize(TRI.getNumRegClasses()+1); // RC ID starts at 1.
UsedPhysRegs.resize(TRI.getNumRegs());
@ -64,6 +65,7 @@ MachineRegisterInfo::createVirtualRegister(const TargetRegisterClass *RegClass){
// Add a reg, but keep track of whether the vector reallocated or not.
void *ArrayBase = VRegInfo.empty() ? 0 : &VRegInfo[0];
VRegInfo.push_back(std::make_pair(RegClass, (MachineOperand*)0));
RegAllocHints.push_back(std::make_pair(0, 0));
if (!((&VRegInfo[0] == ArrayBase || VRegInfo.size() == 1)))
// The vector reallocated, handle this now.

View File

@ -343,7 +343,7 @@ int PreAllocSplitting::CreateSpillStackSlot(unsigned Reg,
if (CurrSLI->hasAtLeastOneValue())
CurrSValNo = CurrSLI->getValNumInfo(0);
else
CurrSValNo = CurrSLI->getNextValue(~0U, 0, LSs->getVNInfoAllocator());
CurrSValNo = CurrSLI->getNextValue(0, 0, false, LSs->getVNInfoAllocator());
return SS;
}
@ -637,8 +637,9 @@ PreAllocSplitting::PerformPHIConstructionFallBack(MachineBasicBlock::iterator Us
if (Phis.count(MBB)) return Phis[MBB];
unsigned StartIndex = LIs->getMBBStartIdx(MBB);
VNInfo *RetVNI = Phis[MBB] = LI->getNextValue(~0U, /*FIXME*/ 0,
LIs->getVNInfoAllocator());
VNInfo *RetVNI = Phis[MBB] =
LI->getNextValue(0, /*FIXME*/ 0, false, LIs->getVNInfoAllocator());
if (!IsIntraBlock) LiveOut[MBB] = RetVNI;
// If there are no uses or defs between our starting point and the
@ -654,7 +655,7 @@ PreAllocSplitting::PerformPHIConstructionFallBack(MachineBasicBlock::iterator Us
IncomingVNs[*PI] = Incoming;
}
if (MBB->pred_size() == 1 && !RetVNI->hasPHIKill) {
if (MBB->pred_size() == 1 && !RetVNI->hasPHIKill()) {
VNInfo* OldVN = RetVNI;
VNInfo* NewVN = IncomingVNs.begin()->second;
VNInfo* MergedVN = LI->MergeValueNumberInto(OldVN, NewVN);
@ -678,7 +679,7 @@ PreAllocSplitting::PerformPHIConstructionFallBack(MachineBasicBlock::iterator Us
// VNInfo to represent the joined value.
for (DenseMap<MachineBasicBlock*, VNInfo*>::iterator I =
IncomingVNs.begin(), E = IncomingVNs.end(); I != E; ++I) {
I->second->hasPHIKill = true;
I->second->setHasPHIKill(true);
unsigned KillIndex = LIs->getMBBEndIdx(I->first);
if (!LiveInterval::isKill(I->second, KillIndex))
LI->addKill(I->second, KillIndex);
@ -730,7 +731,9 @@ void PreAllocSplitting::ReconstructLiveInterval(LiveInterval* LI) {
unsigned DefIdx = LIs->getInstructionIndex(&*DI);
DefIdx = LiveIntervals::getDefIndex(DefIdx);
VNInfo* NewVN = LI->getNextValue(DefIdx, 0, Alloc);
assert(DI->getOpcode() != TargetInstrInfo::PHI &&
"Following NewVN isPHIDef flag incorrect. Fix me!");
VNInfo* NewVN = LI->getNextValue(DefIdx, 0, true, Alloc);
// If the def is a move, set the copy field.
unsigned SrcReg, DstReg, SrcSubIdx, DstSubIdx;
@ -793,7 +796,7 @@ void PreAllocSplitting::RenumberValno(VNInfo* VN) {
// Bail out if we ever encounter a valno that has a PHI kill. We can't
// renumber these.
if (OldVN->hasPHIKill) return;
if (OldVN->hasPHIKill()) return;
VNsToCopy.push_back(OldVN);
@ -823,9 +826,7 @@ void PreAllocSplitting::RenumberValno(VNInfo* VN) {
VNInfo* OldVN = *OI;
// Copy the valno over
VNInfo* NewVN = NewLI.getNextValue(OldVN->def, OldVN->copy,
LIs->getVNInfoAllocator());
NewLI.copyValNumInfo(NewVN, OldVN);
VNInfo* NewVN = NewLI.createValueCopy(OldVN, LIs->getVNInfoAllocator());
NewLI.MergeValueInAsValue(*CurrLI, OldVN, NewVN);
// Remove the valno from the old interval
@ -873,7 +874,7 @@ bool PreAllocSplitting::Rematerialize(unsigned vreg, VNInfo* ValNo,
MachineBasicBlock::iterator KillPt = BarrierMBB->end();
unsigned KillIdx = 0;
if (ValNo->def == ~0U || DefMI->getParent() == BarrierMBB)
if (!ValNo->isDefAccurate() || DefMI->getParent() == BarrierMBB)
KillPt = findSpillPoint(BarrierMBB, Barrier, NULL, RefsInMBB, KillIdx);
else
KillPt = findNextEmptySlot(DefMI->getParent(), DefMI, KillIdx);
@ -942,7 +943,7 @@ MachineInstr* PreAllocSplitting::FoldSpill(unsigned vreg,
if (CurrSLI->hasAtLeastOneValue())
CurrSValNo = CurrSLI->getValNumInfo(0);
else
CurrSValNo = CurrSLI->getNextValue(~0U, 0, LSs->getVNInfoAllocator());
CurrSValNo = CurrSLI->getNextValue(0, 0, false, LSs->getVNInfoAllocator());
}
return FMI;
@ -1032,13 +1033,13 @@ bool PreAllocSplitting::SplitRegLiveInterval(LiveInterval *LI) {
CurrLI->FindLiveRangeContaining(LIs->getUseIndex(BarrierIdx));
VNInfo *ValNo = LR->valno;
if (ValNo->def == ~1U) {
if (ValNo->isUnused()) {
// Defined by a dead def? How can this be?
assert(0 && "Val# is defined by a dead def?");
abort();
}
MachineInstr *DefMI = (ValNo->def != ~0U)
MachineInstr *DefMI = ValNo->isDefAccurate()
? LIs->getInstructionFromIndex(ValNo->def) : NULL;
// If this would create a new join point, do not split.
@ -1072,8 +1073,8 @@ bool PreAllocSplitting::SplitRegLiveInterval(LiveInterval *LI) {
unsigned SpillIndex = 0;
MachineInstr *SpillMI = NULL;
int SS = -1;
if (ValNo->def == ~0U) {
// If it's defined by a phi, we must split just before the barrier.
if (!ValNo->isDefAccurate()) {
// If we don't know where the def is we must split just before the barrier.
if ((SpillMI = FoldSpill(LI->reg, RC, 0, Barrier,
BarrierMBB, SS, RefsInMBB))) {
SpillIndex = LIs->getInstructionIndex(SpillMI);
@ -1254,17 +1255,16 @@ bool PreAllocSplitting::removeDeadSpills(SmallPtrSet<LiveInterval*, 8>& split) {
// We don't currently try to handle definitions with PHI kills, because
// it would involve processing more than one VNInfo at once.
if (CurrVN->hasPHIKill) continue;
if (CurrVN->hasPHIKill()) continue;
// We also don't try to handle the results of PHI joins, since there's
// no defining instruction to analyze.
unsigned DefIdx = CurrVN->def;
if (DefIdx == ~0U || DefIdx == ~1U) continue;
if (!CurrVN->isDefAccurate() || CurrVN->isUnused()) continue;
// We're only interested in eliminating cruft introduced by the splitter,
// is of the form load-use or load-use-store. First, check that the
// definition is a load, and remember what stack slot we loaded it from.
MachineInstr* DefMI = LIs->getInstructionFromIndex(DefIdx);
MachineInstr* DefMI = LIs->getInstructionFromIndex(CurrVN->def);
int FrameIndex;
if (!TII->isLoadFromStackSlot(DefMI, FrameIndex)) continue;
@ -1383,7 +1383,7 @@ bool PreAllocSplitting::createsNewJoin(LiveRange* LR,
if (DefMBB == BarrierMBB)
return false;
if (LR->valno->hasPHIKill)
if (LR->valno->hasPHIKill())
return false;
unsigned MBBEnd = LIs->getMBBEndIdx(BarrierMBB);

View File

@ -281,7 +281,8 @@ namespace {
/// getFreePhysReg - return a free physical register for this virtual
/// register interval if we have one, otherwise return 0.
unsigned getFreePhysReg(LiveInterval* cur);
unsigned getFreePhysReg(const TargetRegisterClass *RC,
unsigned getFreePhysReg(LiveInterval* cur,
const TargetRegisterClass *RC,
unsigned MaxInactiveCount,
SmallVector<unsigned, 256> &inactiveCounts,
bool SkipDGRegs);
@ -352,11 +353,12 @@ void RALinScan::ComputeRelatedRegClasses() {
/// different register classes or because the coalescer was overly
/// conservative.
unsigned RALinScan::attemptTrivialCoalescing(LiveInterval &cur, unsigned Reg) {
if ((cur.preference && cur.preference == Reg) || !cur.containsOneValue())
unsigned Preference = vrm_->getRegAllocPref(cur.reg);
if ((Preference && Preference == Reg) || !cur.containsOneValue())
return Reg;
VNInfo *vni = cur.begin()->valno;
if (!vni->def || vni->def == ~1U || vni->def == ~0U)
if (!vni->def || vni->isUnused() || !vni->isDefAccurate())
return Reg;
MachineInstr *CopyMI = li_->getInstructionFromIndex(vni->def);
unsigned SrcReg, DstReg, SrcSubReg, DstSubReg, PhysReg;
@ -584,7 +586,7 @@ void RALinScan::linearScan()
// register allocator had to spill other registers in its register class.
if (ls_->getNumIntervals() == 0)
return;
if (!vrm_->FindUnusedRegisters(tri_, li_))
if (!vrm_->FindUnusedRegisters(li_))
return;
}
@ -743,7 +745,7 @@ static void addStackInterval(LiveInterval *cur, LiveStacks *ls_,
if (SI.hasAtLeastOneValue())
VNI = SI.getValNumInfo(0);
else
VNI = SI.getNextValue(~0U, 0, ls_->getVNInfoAllocator());
VNI = SI.getNextValue(0, 0, false, ls_->getVNInfoAllocator());
LiveInterval &RI = li_->getInterval(cur->reg);
// FIXME: This may be overly conservative.
@ -897,7 +899,7 @@ void RALinScan::assignRegOrStackSlotAtInterval(LiveInterval* cur)
// This is an implicitly defined live interval, just assign any register.
const TargetRegisterClass *RC = mri_->getRegClass(cur->reg);
if (cur->empty()) {
unsigned physReg = cur->preference;
unsigned physReg = vrm_->getRegAllocPref(cur->reg);
if (!physReg)
physReg = *RC->allocation_order_begin(*mf_);
DOUT << tri_->getName(physReg) << '\n';
@ -917,9 +919,9 @@ void RALinScan::assignRegOrStackSlotAtInterval(LiveInterval* cur)
// register class, then we should try to assign it the same register.
// This can happen when the move is from a larger register class to a smaller
// one, e.g. X86::mov32to32_. These move instructions are not coalescable.
if (!cur->preference && cur->hasAtLeastOneValue()) {
if (!vrm_->getRegAllocPref(cur->reg) && cur->hasAtLeastOneValue()) {
VNInfo *vni = cur->begin()->valno;
if (vni->def && vni->def != ~1U && vni->def != ~0U) {
if (vni->def && !vni->isUnused() && vni->isDefAccurate()) {
MachineInstr *CopyMI = li_->getInstructionFromIndex(vni->def);
unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
if (CopyMI &&
@ -935,7 +937,7 @@ void RALinScan::assignRegOrStackSlotAtInterval(LiveInterval* cur)
if (DstSubReg)
Reg = tri_->getMatchingSuperReg(Reg, DstSubReg, RC);
if (Reg && allocatableRegs_[Reg] && RC->contains(Reg))
cur->preference = Reg;
mri_->setRegAllocationHint(cur->reg, 0, Reg);
}
}
}
@ -1044,7 +1046,7 @@ void RALinScan::assignRegOrStackSlotAtInterval(LiveInterval* cur)
if (LiveInterval *NextReloadLI = hasNextReloadInterval(cur)) {
// "Downgrade" physReg to try to keep physReg from being allocated until
// the next reload from the same SS is allocated.
NextReloadLI->preference = physReg;
mri_->setRegAllocationHint(NextReloadLI->reg, 0, physReg);
DowngradeRegister(cur, physReg);
}
return;
@ -1071,7 +1073,7 @@ void RALinScan::assignRegOrStackSlotAtInterval(LiveInterval* cur)
// Find a register to spill.
float minWeight = HUGE_VALF;
unsigned minReg = 0; /*cur->preference*/; // Try the pref register first.
unsigned minReg = 0;
bool Found = false;
std::vector<std::pair<unsigned,float> > RegsWeights;
@ -1290,7 +1292,7 @@ void RALinScan::assignRegOrStackSlotAtInterval(LiveInterval* cur)
// It interval has a preference, it must be defined by a copy. Clear the
// preference now since the source interval allocation may have been
// undone as well.
i->preference = 0;
mri_->setRegAllocationHint(i->reg, 0, 0);
else {
UpgradeRegister(ii->second);
}
@ -1346,15 +1348,23 @@ void RALinScan::assignRegOrStackSlotAtInterval(LiveInterval* cur)
}
}
unsigned RALinScan::getFreePhysReg(const TargetRegisterClass *RC,
unsigned RALinScan::getFreePhysReg(LiveInterval* cur,
const TargetRegisterClass *RC,
unsigned MaxInactiveCount,
SmallVector<unsigned, 256> &inactiveCounts,
bool SkipDGRegs) {
unsigned FreeReg = 0;
unsigned FreeRegInactiveCount = 0;
TargetRegisterClass::iterator I = RC->allocation_order_begin(*mf_);
TargetRegisterClass::iterator E = RC->allocation_order_end(*mf_);
std::pair<unsigned, unsigned> Hint = mri_->getRegAllocationHint(cur->reg);
// Resolve second part of the hint (if possible) given the current allocation.
unsigned physReg = Hint.second;
if (physReg &&
TargetRegisterInfo::isVirtualRegister(physReg) && vrm_->hasPhys(physReg))
physReg = vrm_->getPhys(physReg);
TargetRegisterClass::iterator I, E;
tie(I, E) = tri_->getAllocationOrder(RC, Hint.first, physReg, *mf_);
assert(I != E && "No allocatable register in this register class!");
// Scan for the first available register.
@ -1377,7 +1387,7 @@ unsigned RALinScan::getFreePhysReg(const TargetRegisterClass *RC,
// return this register.
if (FreeReg == 0 || FreeRegInactiveCount == MaxInactiveCount)
return FreeReg;
// Continue scanning the registers, looking for the one with the highest
// inactive count. Alkis found that this reduced register pressure very
// slightly on X86 (in rev 1.94 of this file), though this should probably be
@ -1428,20 +1438,21 @@ unsigned RALinScan::getFreePhysReg(LiveInterval *cur) {
// If copy coalescer has assigned a "preferred" register, check if it's
// available first.
if (cur->preference) {
DOUT << "(preferred: " << tri_->getName(cur->preference) << ") ";
if (isRegAvail(cur->preference) &&
RC->contains(cur->preference))
return cur->preference;
unsigned Preference = vrm_->getRegAllocPref(cur->reg);
if (Preference) {
DOUT << "(preferred: " << tri_->getName(Preference) << ") ";
if (isRegAvail(Preference) &&
RC->contains(Preference))
return Preference;
}
if (!DowngradedRegs.empty()) {
unsigned FreeReg = getFreePhysReg(RC, MaxInactiveCount, inactiveCounts,
unsigned FreeReg = getFreePhysReg(cur, RC, MaxInactiveCount, inactiveCounts,
true);
if (FreeReg)
return FreeReg;
}
return getFreePhysReg(RC, MaxInactiveCount, inactiveCounts, false);
return getFreePhysReg(cur, RC, MaxInactiveCount, inactiveCounts, false);
}
FunctionPass* llvm::createLinearScanRegisterAllocator() {

View File

@ -651,7 +651,7 @@ void PBQPRegAlloc::addStackInterval(const LiveInterval *spilled,
if (stackInterval.getNumValNums() != 0)
vni = stackInterval.getValNumInfo(0);
else
vni = stackInterval.getNextValue(-0U, 0, lss->getVNInfoAllocator());
vni = stackInterval.getNextValue(0, 0, false, lss->getVNInfoAllocator());
LiveInterval &rhsInterval = lis->getInterval(spilled->reg);
stackInterval.MergeRangesInAsValue(rhsInterval, vni);
@ -733,8 +733,7 @@ void PBQPRegAlloc::finalizeAlloc() const {
itr != end; ++itr) {
LiveInterval *li = *itr;
unsigned physReg = li->preference;
unsigned physReg = vrm->getRegAllocPref(li->reg);
if (physReg == 0) {
const TargetRegisterClass *liRC = mri->getRegClass(li->reg);
physReg = *liRC->allocation_order_begin(*mf);

View File

@ -47,7 +47,6 @@
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineModuleInfo.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/DebugLoc.h"
#include "llvm/CodeGen/DwarfWriter.h"
#include "llvm/Analysis/DebugInfo.h"
#include "llvm/Target/TargetData.h"
@ -361,7 +360,7 @@ bool FastISel::SelectCall(User *I) {
// Returned ID is 0 if this is unbalanced "end of inlined
// scope". This could happen if optimizer eats dbg intrinsics
// or "beginning of inlined scope" is not recoginized due to
// missing location info. In such cases, do ignore this region.end.
// missing location info. In such cases, ignore this region.end.
BuildMI(MBB, DL, II).addImm(ID);
} else {
const TargetInstrDesc &II = TII.get(TargetInstrInfo::DBG_LABEL);

View File

@ -2768,6 +2768,53 @@ void SelectionDAGLegalize::ExpandNode(SDNode *Node,
ISD::SETULT : ISD::SETUGT));
break;
}
case ISD::UMULO:
case ISD::SMULO: {
MVT VT = Node->getValueType(0);
SDValue LHS = Node->getOperand(0);
SDValue RHS = Node->getOperand(1);
SDValue BottomHalf;
SDValue TopHalf;
static unsigned Ops[2][3] =
{ { ISD::MULHU, ISD::UMUL_LOHI, ISD::ZERO_EXTEND },
{ ISD::MULHS, ISD::SMUL_LOHI, ISD::SIGN_EXTEND }};
bool isSigned = Node->getOpcode() == ISD::SMULO;
if (TLI.isOperationLegalOrCustom(Ops[isSigned][0], VT)) {
BottomHalf = DAG.getNode(ISD::MUL, dl, VT, LHS, RHS);
TopHalf = DAG.getNode(Ops[isSigned][0], dl, VT, LHS, RHS);
} else if (TLI.isOperationLegalOrCustom(Ops[isSigned][1], VT)) {
BottomHalf = DAG.getNode(Ops[isSigned][1], dl, DAG.getVTList(VT, VT), LHS,
RHS);
TopHalf = BottomHalf.getValue(1);
} else if (TLI.isTypeLegal(MVT::getIntegerVT(VT.getSizeInBits() * 2))) {
MVT WideVT = MVT::getIntegerVT(VT.getSizeInBits() * 2);
LHS = DAG.getNode(Ops[isSigned][2], dl, WideVT, LHS);
RHS = DAG.getNode(Ops[isSigned][2], dl, WideVT, RHS);
Tmp1 = DAG.getNode(ISD::MUL, dl, WideVT, LHS, RHS);
BottomHalf = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, VT, Tmp1,
DAG.getIntPtrConstant(0));
TopHalf = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, VT, Tmp1,
DAG.getIntPtrConstant(1));
} else {
// FIXME: We should be able to fall back to a libcall with an illegal
// type in some cases cases.
// Also, we can fall back to a division in some cases, but that's a big
// performance hit in the general case.
assert(0 && "Don't know how to expand this operation yet!");
}
if (isSigned) {
Tmp1 = DAG.getConstant(VT.getSizeInBits() - 1, TLI.getShiftAmountTy());
Tmp1 = DAG.getNode(ISD::SRA, dl, VT, BottomHalf, Tmp1);
TopHalf = DAG.getSetCC(dl, TLI.getSetCCResultType(VT), TopHalf, Tmp1,
ISD::SETNE);
} else {
TopHalf = DAG.getSetCC(dl, TLI.getSetCCResultType(VT), TopHalf,
DAG.getConstant(0, VT), ISD::SETNE);
}
Results.push_back(BottomHalf);
Results.push_back(TopHalf);
break;
}
case ISD::BUILD_PAIR: {
MVT PairTy = Node->getValueType(0);
Tmp1 = DAG.getNode(ISD::ZERO_EXTEND, dl, PairTy, Node->getOperand(0));

View File

@ -95,14 +95,13 @@ void DAGTypeLegalizer::ExpandRes_BIT_CONVERT(SDNode *N, SDValue &Lo,
if (InVT.isVector() && OutVT.isInteger()) {
// Handle cases like i64 = BIT_CONVERT v1i64 on x86, where the operand
// is legal but the result is not.
MVT NVT = MVT::getVectorVT(TLI.getTypeToTransformTo(OutVT), 2);
MVT NVT = MVT::getVectorVT(NOutVT, 2);
if (isTypeLegal(NVT)) {
SDValue CastInOp = DAG.getNode(ISD::BIT_CONVERT, dl, NVT, InOp);
MVT EltNVT = NVT.getVectorElementType();
Lo = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltNVT, CastInOp,
Lo = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, NOutVT, CastInOp,
DAG.getIntPtrConstant(0));
Hi = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltNVT, CastInOp,
Hi = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, NOutVT, CastInOp,
DAG.getIntPtrConstant(1));
if (TLI.isBigEndian())

View File

@ -5317,8 +5317,12 @@ void SelectionDAGLowering::visitInlineAsm(CallSite CS) {
if ((OpFlag & 7) == 2 /*REGDEF*/
|| (OpFlag & 7) == 6 /* EARLYCLOBBER REGDEF */) {
// Add (OpFlag&0xffff)>>3 registers to MatchedRegs.
assert(!OpInfo.isIndirect &&
"Don't know how to handle tied indirect register inputs yet!");
if (OpInfo.isIndirect) {
cerr << "llvm: error: "
"Don't know how to handle tied indirect "
"register inputs yet!\n";
exit(1);
}
RegsForValue MatchedRegs;
MatchedRegs.TLI = &TLI;
MatchedRegs.ValueVTs.push_back(InOperandVal.getValueType());

View File

@ -171,6 +171,8 @@ static void InitLibcallNames(const char **Names) {
Names[RTLIB::FPROUND_PPCF128_F32] = "__trunctfsf2";
Names[RTLIB::FPROUND_F80_F64] = "__truncxfdf2";
Names[RTLIB::FPROUND_PPCF128_F64] = "__trunctfdf2";
Names[RTLIB::FPTOSINT_F32_I8] = "__fixsfi8";
Names[RTLIB::FPTOSINT_F32_I16] = "__fixsfi16";
Names[RTLIB::FPTOSINT_F32_I32] = "__fixsfsi";
Names[RTLIB::FPTOSINT_F32_I64] = "__fixsfdi";
Names[RTLIB::FPTOSINT_F32_I128] = "__fixsfti";
@ -183,6 +185,8 @@ static void InitLibcallNames(const char **Names) {
Names[RTLIB::FPTOSINT_PPCF128_I32] = "__fixtfsi";
Names[RTLIB::FPTOSINT_PPCF128_I64] = "__fixtfdi";
Names[RTLIB::FPTOSINT_PPCF128_I128] = "__fixtfti";
Names[RTLIB::FPTOUINT_F32_I8] = "__fixunssfi8";
Names[RTLIB::FPTOUINT_F32_I16] = "__fixunssfi16";
Names[RTLIB::FPTOUINT_F32_I32] = "__fixunssfsi";
Names[RTLIB::FPTOUINT_F32_I64] = "__fixunssfdi";
Names[RTLIB::FPTOUINT_F32_I128] = "__fixunssfti";
@ -271,6 +275,10 @@ RTLIB::Libcall RTLIB::getFPROUND(MVT OpVT, MVT RetVT) {
/// UNKNOWN_LIBCALL if there is none.
RTLIB::Libcall RTLIB::getFPTOSINT(MVT OpVT, MVT RetVT) {
if (OpVT == MVT::f32) {
if (RetVT == MVT::i8)
return FPTOSINT_F32_I8;
if (RetVT == MVT::i16)
return FPTOSINT_F32_I16;
if (RetVT == MVT::i32)
return FPTOSINT_F32_I32;
if (RetVT == MVT::i64)
@ -306,6 +314,10 @@ RTLIB::Libcall RTLIB::getFPTOSINT(MVT OpVT, MVT RetVT) {
/// UNKNOWN_LIBCALL if there is none.
RTLIB::Libcall RTLIB::getFPTOUINT(MVT OpVT, MVT RetVT) {
if (OpVT == MVT::f32) {
if (RetVT == MVT::i8)
return FPTOUINT_F32_I8;
if (RetVT == MVT::i16)
return FPTOUINT_F32_I16;
if (RetVT == MVT::i32)
return FPTOUINT_F32_I32;
if (RetVT == MVT::i64)
@ -2584,8 +2596,12 @@ bool TargetLowering::CheckTailCallReturnConstraints(CallSDNode *TheCall,
// Check that operand of the RET node sources from the CALL node. The RET node
// has at least two operands. Operand 0 holds the chain. Operand 1 holds the
// value.
// Also we need to check that there is no code in between the call and the
// return. Hence we also check that the incomming chain to the return sources
// from the outgoing chain of the call.
if (NumOps > 1 &&
IgnoreHarmlessInstructions(Ret.getOperand(1)) == SDValue(TheCall,0))
IgnoreHarmlessInstructions(Ret.getOperand(1)) == SDValue(TheCall,0) &&
Ret.getOperand(0) == SDValue(TheCall, TheCall->getNumValues()-1))
return true;
// void return: The RET node has the chain result value of the CALL node as
// input.

View File

@ -141,7 +141,7 @@ bool SimpleRegisterCoalescing::AdjustCopiesBackFrom(LiveInterval &IntA,
// The live interval of ECX is represented as this:
// %reg20,inf = [46,47:1)[174,230:0) 0@174-(230) 1@46-(47)
// The coalescer has no idea there was a def in the middle of [174,230].
if (AValNo->redefByEC)
if (AValNo->hasRedefByEC())
return false;
// If AValNo is defined as a copy from IntB, we can potentially process this.
@ -203,7 +203,8 @@ bool SimpleRegisterCoalescing::AdjustCopiesBackFrom(LiveInterval &IntA,
for (const unsigned *SR = tri_->getSubRegisters(IntB.reg); *SR; ++SR) {
LiveInterval &SRLI = li_->getInterval(*SR);
SRLI.addRange(LiveRange(FillerStart, FillerEnd,
SRLI.getNextValue(FillerStart, 0, li_->getVNInfoAllocator())));
SRLI.getNextValue(FillerStart, 0, true,
li_->getVNInfoAllocator())));
}
}
@ -304,8 +305,10 @@ bool SimpleRegisterCoalescing::RemoveCopyByCommutingDef(LiveInterval &IntA,
assert(ALR != IntA.end() && "Live range not found!");
VNInfo *AValNo = ALR->valno;
// If other defs can reach uses of this def, then it's not safe to perform
// the optimization.
if (AValNo->def == ~0U || AValNo->def == ~1U || AValNo->hasPHIKill)
// the optimization. FIXME: Do isPHIDef and isDefAccurate both need to be
// tested?
if (AValNo->isPHIDef() || !AValNo->isDefAccurate() ||
AValNo->isUnused() || AValNo->hasPHIKill())
return false;
MachineInstr *DefMI = li_->getInstructionFromIndex(AValNo->def);
const TargetInstrDesc &TID = DefMI->getDesc();
@ -351,7 +354,7 @@ bool SimpleRegisterCoalescing::RemoveCopyByCommutingDef(LiveInterval &IntA,
unsigned OpIdx = NewMI->findRegisterUseOperandIdx(IntA.reg, false);
NewMI->getOperand(OpIdx).setIsKill();
bool BHasPHIKill = BValNo->hasPHIKill;
bool BHasPHIKill = BValNo->hasPHIKill();
SmallVector<VNInfo*, 4> BDeadValNos;
SmallVector<unsigned, 4> BKills;
std::map<unsigned, unsigned> BExtend;
@ -403,7 +406,7 @@ bool SimpleRegisterCoalescing::RemoveCopyByCommutingDef(LiveInterval &IntA,
// extended to the end of the existing live range defined by the copy.
unsigned DefIdx = li_->getDefIndex(UseIdx);
const LiveRange *DLR = IntB.getLiveRangeContaining(DefIdx);
BHasPHIKill |= DLR->valno->hasPHIKill;
BHasPHIKill |= DLR->valno->hasPHIKill();
assert(DLR->valno->def == DefIdx);
BDeadValNos.push_back(DLR->valno);
BExtend[DLR->start] = DLR->end;
@ -462,7 +465,7 @@ bool SimpleRegisterCoalescing::RemoveCopyByCommutingDef(LiveInterval &IntA,
}
}
IntB.addKills(ValNo, BKills);
ValNo->hasPHIKill = BHasPHIKill;
ValNo->setHasPHIKill(BHasPHIKill);
DOUT << " result = "; IntB.print(DOUT, tri_);
DOUT << "\n";
@ -578,8 +581,10 @@ bool SimpleRegisterCoalescing::ReMaterializeTrivialDef(LiveInterval &SrcInt,
assert(SrcLR != SrcInt.end() && "Live range not found!");
VNInfo *ValNo = SrcLR->valno;
// If other defs can reach uses of this def, then it's not safe to perform
// the optimization.
if (ValNo->def == ~0U || ValNo->def == ~1U || ValNo->hasPHIKill)
// the optimization. FIXME: Do isPHIDef and isDefAccurate both need to be
// tested?
if (ValNo->isPHIDef() || !ValNo->isDefAccurate() ||
ValNo->isUnused() || ValNo->hasPHIKill())
return false;
MachineInstr *DefMI = li_->getInstructionFromIndex(ValNo->def);
const TargetInstrDesc &TID = DefMI->getDesc();
@ -616,19 +621,17 @@ bool SimpleRegisterCoalescing::ReMaterializeTrivialDef(LiveInterval &SrcInt,
}
MachineBasicBlock::iterator MII = next(MachineBasicBlock::iterator(CopyMI));
CopyMI->removeFromParent();
tii_->reMaterialize(*MBB, MII, DstReg, DefMI);
MachineInstr *NewMI = prior(MII);
if (checkForDeadDef) {
// PR4090 fix: Trim interval failed because there was no use of the
// source interval in this MBB. If the def is in this MBB too then we
// should mark it dead:
if (DefMI->getParent() == MBB) {
DefMI->addRegisterDead(SrcInt.reg, tri_);
SrcLR->end = SrcLR->start + 1;
}
// PR4090 fix: Trim interval failed because there was no use of the
// source interval in this MBB. If the def is in this MBB too then we
// should mark it dead:
if (DefMI->getParent() == MBB) {
DefMI->addRegisterDead(SrcInt.reg, tri_);
SrcLR->end = SrcLR->start + 1;
}
}
// CopyMI may have implicit operands, transfer them over to the newly
@ -647,7 +650,7 @@ bool SimpleRegisterCoalescing::ReMaterializeTrivialDef(LiveInterval &SrcInt,
}
li_->ReplaceMachineInstrInMaps(CopyMI, NewMI);
MBB->getParent()->DeleteMachineInstr(CopyMI);
CopyMI->eraseFromParent();
ReMatCopies.insert(CopyMI);
ReMatDefs.insert(DefMI);
++NumReMats;
@ -673,7 +676,7 @@ bool SimpleRegisterCoalescing::isBackEdgeCopy(MachineInstr *CopyMI,
return false;
unsigned KillIdx = li_->getMBBEndIdx(MBB) + 1;
if (DstLR->valno->kills.size() == 1 &&
DstLR->valno->kills[0] == KillIdx && DstLR->valno->hasPHIKill)
DstLR->valno->kills[0] == KillIdx && DstLR->valno->hasPHIKill())
return true;
return false;
}
@ -937,7 +940,7 @@ bool SimpleRegisterCoalescing::CanCoalesceWithImpDef(MachineInstr *CopyMI,
LiveInterval::iterator LR = li.FindLiveRangeContaining(CopyIdx);
if (LR == li.end())
return false;
if (LR->valno->hasPHIKill)
if (LR->valno->hasPHIKill())
return false;
if (LR->valno->def != CopyIdx)
return false;
@ -965,11 +968,11 @@ bool SimpleRegisterCoalescing::CanCoalesceWithImpDef(MachineInstr *CopyMI,
}
/// RemoveCopiesFromValNo - The specified value# is defined by an implicit
/// def and it is being removed. Turn all copies from this value# into
/// identity copies so they will be removed.
void SimpleRegisterCoalescing::RemoveCopiesFromValNo(LiveInterval &li,
VNInfo *VNI) {
/// TurnCopiesFromValNoToImpDefs - The specified value# is defined by an
/// implicit_def and it is being removed. Turn all copies from this value#
/// into implicit_defs.
void SimpleRegisterCoalescing::TurnCopiesFromValNoToImpDefs(LiveInterval &li,
VNInfo *VNI) {
SmallVector<MachineInstr*, 4> ImpDefs;
MachineOperand *LastUse = NULL;
unsigned LastUseIdx = li_->getUseIndex(VNI->def);
@ -979,9 +982,8 @@ void SimpleRegisterCoalescing::RemoveCopiesFromValNo(LiveInterval &li,
MachineInstr *MI = &*RI;
++RI;
if (MO->isDef()) {
if (MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF) {
if (MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF)
ImpDefs.push_back(MI);
}
continue;
}
if (JoinedCopies.count(MI))
@ -994,13 +996,18 @@ void SimpleRegisterCoalescing::RemoveCopiesFromValNo(LiveInterval &li,
unsigned SrcReg, DstReg, SrcSubIdx, DstSubIdx;
if (tii_->isMoveInstr(*MI, SrcReg, DstReg, SrcSubIdx, DstSubIdx) &&
SrcReg == li.reg) {
// Each use MI may have multiple uses of this register. Change them all.
for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
MachineOperand &MO = MI->getOperand(i);
if (MO.isReg() && MO.getReg() == li.reg)
MO.setReg(DstReg);
}
JoinedCopies.insert(MI);
// Change it to an implicit_def.
MI->setDesc(tii_->get(TargetInstrInfo::IMPLICIT_DEF));
for (int i = MI->getNumOperands() - 1, e = 0; i > e; --i)
MI->RemoveOperand(i);
// It's no longer a copy, update the valno it defines.
unsigned DefIdx = li_->getDefIndex(UseIdx);
LiveInterval &DstInt = li_->getInterval(DstReg);
LiveInterval::iterator DLR = DstInt.FindLiveRangeContaining(DefIdx);
assert(DLR != DstInt.end() && "Live range not found!");
assert(DLR->valno->copy == MI);
DLR->valno->copy = NULL;
ReMatCopies.insert(MI);
} else if (UseIdx > LastUseIdx) {
LastUseIdx = UseIdx;
LastUse = MO;
@ -1265,6 +1272,17 @@ SimpleRegisterCoalescing::CanJoinInsertSubRegToPhysReg(unsigned DstReg,
return true;
}
/// getRegAllocPreference - Return register allocation preference register.
///
static unsigned getRegAllocPreference(unsigned Reg, MachineFunction &MF,
MachineRegisterInfo *MRI,
const TargetRegisterInfo *TRI) {
if (TargetRegisterInfo::isPhysicalRegister(Reg))
return 0;
std::pair<unsigned, unsigned> Hint = MRI->getRegAllocationHint(Reg);
return TRI->ResolveRegAllocHint(Hint.first, Hint.second, MF);
}
/// JoinCopy - Attempt to join intervals corresponding to SrcReg/DstReg,
/// which are the src/dst of the copy instruction CopyMI. This returns true
/// if the copy was successfully coalesced away. If it is not currently
@ -1566,7 +1584,7 @@ bool SimpleRegisterCoalescing::JoinCopy(CopyRec &TheCopy, bool &Again) {
if (PhysJoinTweak) {
if (SrcIsPhys) {
if (!isWinToJoinVRWithSrcPhysReg(CopyMI, CopyMBB, DstInt, SrcInt)) {
DstInt.preference = SrcReg;
mri_->setRegAllocationHint(DstInt.reg, 0, SrcReg);
++numAborts;
DOUT << "\tMay tie down a physical register, abort!\n";
Again = true; // May be possible to coalesce later.
@ -1574,7 +1592,7 @@ bool SimpleRegisterCoalescing::JoinCopy(CopyRec &TheCopy, bool &Again) {
}
} else {
if (!isWinToJoinVRWithDstPhysReg(CopyMI, CopyMBB, DstInt, SrcInt)) {
SrcInt.preference = DstReg;
mri_->setRegAllocationHint(SrcInt.reg, 0, DstReg);
++numAborts;
DOUT << "\tMay tie down a physical register, abort!\n";
Again = true; // May be possible to coalesce later.
@ -1598,7 +1616,7 @@ bool SimpleRegisterCoalescing::JoinCopy(CopyRec &TheCopy, bool &Again) {
if (Length > Threshold &&
(((float)std::distance(mri_->use_begin(JoinVReg),
mri_->use_end()) / Length) < Ratio)) {
JoinVInt.preference = JoinPReg;
mri_->setRegAllocationHint(JoinVInt.reg, 0, JoinPReg);
++numAborts;
DOUT << "\tMay tie down a physical register, abort!\n";
Again = true; // May be possible to coalesce later.
@ -1669,9 +1687,9 @@ bool SimpleRegisterCoalescing::JoinCopy(CopyRec &TheCopy, bool &Again) {
E = SavedLI->vni_end(); I != E; ++I) {
const VNInfo *ValNo = *I;
VNInfo *NewValNo = RealInt.getNextValue(ValNo->def, ValNo->copy,
false, // updated at *
li_->getVNInfoAllocator());
NewValNo->hasPHIKill = ValNo->hasPHIKill;
NewValNo->redefByEC = ValNo->redefByEC;
NewValNo->setFlags(ValNo->getFlags()); // * updated here.
RealInt.addKills(NewValNo, ValNo->kills);
RealInt.MergeValueInAsValue(*SavedLI, ValNo, NewValNo);
}
@ -1691,7 +1709,7 @@ bool SimpleRegisterCoalescing::JoinCopy(CopyRec &TheCopy, bool &Again) {
!SrcIsPhys && !DstIsPhys) {
if ((isExtSubReg && !Swapped) ||
((isInsSubReg || isSubRegToReg) && Swapped)) {
ResSrcInt->Copy(*ResDstInt, li_->getVNInfoAllocator());
ResSrcInt->Copy(*ResDstInt, mri_, li_->getVNInfoAllocator());
std::swap(SrcReg, DstReg);
std::swap(ResSrcInt, ResDstInt);
}
@ -1710,7 +1728,8 @@ bool SimpleRegisterCoalescing::JoinCopy(CopyRec &TheCopy, bool &Again) {
for (LiveInterval::const_vni_iterator i = ResSrcInt->vni_begin(),
e = ResSrcInt->vni_end(); i != e; ++i) {
const VNInfo *vni = *i;
if (!vni->def || vni->def == ~1U || vni->def == ~0U)
// FIXME: Do isPHIDef and isDefAccurate both need to be tested?
if (!vni->def || vni->isUnused() || vni->isPHIDef() || !vni->isDefAccurate())
continue;
MachineInstr *CopyMI = li_->getInstructionFromIndex(vni->def);
unsigned NewSrcReg, NewDstReg, NewSrcSubIdx, NewDstSubIdx;
@ -1747,6 +1766,9 @@ bool SimpleRegisterCoalescing::JoinCopy(CopyRec &TheCopy, bool &Again) {
// being merged.
li_->removeInterval(SrcReg);
// Update regalloc hint.
tri_->UpdateRegAllocHint(SrcReg, DstReg, *mf_);
// Manually deleted the live interval copy.
if (SavedLI) {
SavedLI->clear();
@ -1762,7 +1784,7 @@ bool SimpleRegisterCoalescing::JoinCopy(CopyRec &TheCopy, bool &Again) {
VNInfo *ImpVal = LR->valno;
assert(ImpVal->def == CopyIdx);
unsigned NextDef = LR->end;
RemoveCopiesFromValNo(*ResDstInt, ImpVal);
TurnCopiesFromValNoToImpDefs(*ResDstInt, ImpVal);
ResDstInt->removeValNo(ImpVal);
LR = ResDstInt->FindLiveRangeContaining(NextDef);
if (LR != ResDstInt->end() && LR->valno->def == NextDef) {
@ -1778,11 +1800,12 @@ bool SimpleRegisterCoalescing::JoinCopy(CopyRec &TheCopy, bool &Again) {
// If resulting interval has a preference that no longer fits because of subreg
// coalescing, just clear the preference.
if (ResDstInt->preference && (isExtSubReg || isInsSubReg || isSubRegToReg) &&
unsigned Preference = getRegAllocPreference(ResDstInt->reg, *mf_, mri_, tri_);
if (Preference && (isExtSubReg || isInsSubReg || isSubRegToReg) &&
TargetRegisterInfo::isVirtualRegister(ResDstInt->reg)) {
const TargetRegisterClass *RC = mri_->getRegClass(ResDstInt->reg);
if (!RC->contains(ResDstInt->preference))
ResDstInt->preference = 0;
if (!RC->contains(Preference))
mri_->setRegAllocationHint(ResDstInt->reg, 0, 0);
}
DOUT << "\n\t\tJoined. Result = "; ResDstInt->print(DOUT, tri_);
@ -1856,7 +1879,8 @@ bool SimpleRegisterCoalescing::RangeIsDefinedByCopyFromReg(LiveInterval &li,
unsigned SrcReg = li_->getVNInfoSourceReg(LR->valno);
if (SrcReg == Reg)
return true;
if (LR->valno->def == ~0U &&
// FIXME: Do isPHIDef and isDefAccurate both need to be tested?
if ((LR->valno->isPHIDef() || !LR->valno->isDefAccurate()) &&
TargetRegisterInfo::isPhysicalRegister(li.reg) &&
*tri_->getSuperRegisters(li.reg)) {
// It's a sub-register live interval, we may not have precise information.
@ -2025,12 +2049,20 @@ bool SimpleRegisterCoalescing::SimpleJoin(LiveInterval &LHS, LiveInterval &RHS){
// Okay, the final step is to loop over the RHS live intervals, adding them to
// the LHS.
LHSValNo->hasPHIKill |= VNI->hasPHIKill;
if (VNI->hasPHIKill())
LHSValNo->setHasPHIKill(true);
LHS.addKills(LHSValNo, VNI->kills);
LHS.MergeRangesInAsValue(RHS, LHSValNo);
LHS.weight += RHS.weight;
if (RHS.preference && !LHS.preference)
LHS.preference = RHS.preference;
// Update regalloc hint if both are virtual registers.
if (TargetRegisterInfo::isVirtualRegister(LHS.reg) &&
TargetRegisterInfo::isVirtualRegister(RHS.reg)) {
std::pair<unsigned, unsigned> RHSPref = mri_->getRegAllocationHint(RHS.reg);
std::pair<unsigned, unsigned> LHSPref = mri_->getRegAllocationHint(LHS.reg);
if (RHSPref != LHSPref)
mri_->setRegAllocationHint(LHS.reg, RHSPref.first, RHSPref.second);
}
// Update the liveintervals of sub-registers.
if (TargetRegisterInfo::isPhysicalRegister(LHS.reg))
@ -2185,7 +2217,7 @@ SimpleRegisterCoalescing::JoinIntervals(LiveInterval &LHS, LiveInterval &RHS,
for (LiveInterval::vni_iterator i = LHS.vni_begin(), e = LHS.vni_end();
i != e; ++i) {
VNInfo *VNI = *i;
if (VNI->def == ~1U || VNI->copy == 0) // Src not defined by a copy?
if (VNI->isUnused() || VNI->copy == 0) // Src not defined by a copy?
continue;
// DstReg is known to be a register in the LHS interval. If the src is
@ -2202,7 +2234,7 @@ SimpleRegisterCoalescing::JoinIntervals(LiveInterval &LHS, LiveInterval &RHS,
for (LiveInterval::vni_iterator i = RHS.vni_begin(), e = RHS.vni_end();
i != e; ++i) {
VNInfo *VNI = *i;
if (VNI->def == ~1U || VNI->copy == 0) // Src not defined by a copy?
if (VNI->isUnused() || VNI->copy == 0) // Src not defined by a copy?
continue;
// DstReg is known to be a register in the RHS interval. If the src is
@ -2222,7 +2254,7 @@ SimpleRegisterCoalescing::JoinIntervals(LiveInterval &LHS, LiveInterval &RHS,
i != e; ++i) {
VNInfo *VNI = *i;
unsigned VN = VNI->id;
if (LHSValNoAssignments[VN] >= 0 || VNI->def == ~1U)
if (LHSValNoAssignments[VN] >= 0 || VNI->isUnused())
continue;
ComputeUltimateVN(VNI, NewVNInfo,
LHSValsDefinedFromRHS, RHSValsDefinedFromLHS,
@ -2232,7 +2264,7 @@ SimpleRegisterCoalescing::JoinIntervals(LiveInterval &LHS, LiveInterval &RHS,
i != e; ++i) {
VNInfo *VNI = *i;
unsigned VN = VNI->id;
if (RHSValNoAssignments[VN] >= 0 || VNI->def == ~1U)
if (RHSValNoAssignments[VN] >= 0 || VNI->isUnused())
continue;
// If this value number isn't a copy from the LHS, it's a new number.
if (RHSValsDefinedFromLHS.find(VNI) == RHSValsDefinedFromLHS.end()) {
@ -2296,7 +2328,8 @@ SimpleRegisterCoalescing::JoinIntervals(LiveInterval &LHS, LiveInterval &RHS,
VNInfo *VNI = I->first;
unsigned LHSValID = LHSValNoAssignments[VNI->id];
LiveInterval::removeKill(NewVNInfo[LHSValID], VNI->def);
NewVNInfo[LHSValID]->hasPHIKill |= VNI->hasPHIKill;
if (VNI->hasPHIKill())
NewVNInfo[LHSValID]->setHasPHIKill(true);
RHS.addKills(NewVNInfo[LHSValID], VNI->kills);
}
@ -2306,7 +2339,8 @@ SimpleRegisterCoalescing::JoinIntervals(LiveInterval &LHS, LiveInterval &RHS,
VNInfo *VNI = I->first;
unsigned RHSValID = RHSValNoAssignments[VNI->id];
LiveInterval::removeKill(NewVNInfo[RHSValID], VNI->def);
NewVNInfo[RHSValID]->hasPHIKill |= VNI->hasPHIKill;
if (VNI->hasPHIKill())
NewVNInfo[RHSValID]->setHasPHIKill(true);
LHS.addKills(NewVNInfo[RHSValID], VNI->kills);
}
@ -2315,10 +2349,12 @@ SimpleRegisterCoalescing::JoinIntervals(LiveInterval &LHS, LiveInterval &RHS,
if ((RHS.ranges.size() > LHS.ranges.size() &&
TargetRegisterInfo::isVirtualRegister(LHS.reg)) ||
TargetRegisterInfo::isPhysicalRegister(RHS.reg)) {
RHS.join(LHS, &RHSValNoAssignments[0], &LHSValNoAssignments[0], NewVNInfo);
RHS.join(LHS, &RHSValNoAssignments[0], &LHSValNoAssignments[0], NewVNInfo,
mri_);
Swapped = true;
} else {
LHS.join(RHS, &LHSValNoAssignments[0], &RHSValNoAssignments[0], NewVNInfo);
LHS.join(RHS, &LHSValNoAssignments[0], &RHSValNoAssignments[0], NewVNInfo,
mri_);
Swapped = false;
}
return true;
@ -2620,6 +2656,11 @@ SimpleRegisterCoalescing::TurnCopyIntoImpDef(MachineBasicBlock::iterator &I,
return false;
LiveInterval &DstInt = li_->getInterval(DstReg);
const LiveRange *DstLR = DstInt.getLiveRangeContaining(CopyIdx);
// If the valno extends beyond this basic block, then it's not safe to delete
// the val# or else livein information won't be correct.
MachineBasicBlock *EndMBB = li_->getMBBFromIndex(DstLR->end);
if (EndMBB != MBB)
return false;
DstInt.removeValNo(DstLR->valno);
CopyMI->setDesc(tii_->get(TargetInstrInfo::IMPLICIT_DEF));
for (int i = CopyMI->getNumOperands() - 1, e = 0; i > e; --i)
@ -2800,7 +2841,8 @@ bool SimpleRegisterCoalescing::runOnMachineFunction(MachineFunction &fn) {
}
// Slightly prefer live interval that has been assigned a preferred reg.
if (LI.preference)
std::pair<unsigned, unsigned> Hint = mri_->getRegAllocationHint(LI.reg);
if (Hint.first || Hint.second)
LI.weight *= 1.01F;
// Divide the weight of the interval by its size. This encourages

View File

@ -219,10 +219,10 @@ namespace llvm {
bool CanCoalesceWithImpDef(MachineInstr *CopyMI,
LiveInterval &li, LiveInterval &ImpLi) const;
/// RemoveCopiesFromValNo - The specified value# is defined by an implicit
/// def and it is being removed. Turn all copies from this value# into
/// identity copies so they will be removed.
void RemoveCopiesFromValNo(LiveInterval &li, VNInfo *VNI);
/// TurnCopiesFromValNoToImpDefs - The specified value# is defined by an
/// implicit_def and it is being removed. Turn all copies from this value#
/// into implicit_defs.
void TurnCopiesFromValNoToImpDefs(LiveInterval &li, VNInfo *VNI);
/// isWinToJoinVRWithSrcPhysReg - Return true if it's worth while to join a
/// a virtual destination register with physical source register.

View File

@ -39,7 +39,8 @@ protected:
VirtRegMap *vrm;
/// Construct a spiller base.
SpillerBase(MachineFunction *mf, LiveIntervals *lis, LiveStacks *ls, VirtRegMap *vrm) :
SpillerBase(MachineFunction *mf, LiveIntervals *lis, LiveStacks *ls,
VirtRegMap *vrm) :
mf(mf), lis(lis), ls(ls), vrm(vrm)
{
mfi = mf->getFrameInfo();
@ -47,16 +48,24 @@ protected:
tii = mf->getTarget().getInstrInfo();
}
/// Insert a store of the given vreg to the given stack slot immediately
/// after the given instruction. Returns the base index of the inserted
/// instruction. The caller is responsible for adding an appropriate
/// LiveInterval to the LiveIntervals analysis.
unsigned insertStoreFor(MachineInstr *mi, unsigned ss,
unsigned newVReg,
const TargetRegisterClass *trc) {
MachineBasicBlock::iterator nextInstItr(mi);
++nextInstItr;
/// Ensures there is space before the given machine instruction, returns the
/// instruction's new number.
unsigned makeSpaceBefore(MachineInstr *mi) {
if (!lis->hasGapBeforeInstr(lis->getInstructionIndex(mi))) {
lis->scaleNumbering(2);
ls->scaleNumbering(2);
}
unsigned miIdx = lis->getInstructionIndex(mi);
assert(lis->hasGapBeforeInstr(miIdx));
return miIdx;
}
/// Ensure there is space after the given machine instruction, returns the
/// instruction's new number.
unsigned makeSpaceAfter(MachineInstr *mi) {
if (!lis->hasGapAfterInstr(lis->getInstructionIndex(mi))) {
lis->scaleNumbering(2);
ls->scaleNumbering(2);
@ -66,7 +75,24 @@ protected:
assert(lis->hasGapAfterInstr(miIdx));
tii->storeRegToStackSlot(*mi->getParent(), nextInstItr, newVReg,
return miIdx;
}
/// Insert a store of the given vreg to the given stack slot immediately
/// after the given instruction. Returns the base index of the inserted
/// instruction. The caller is responsible for adding an appropriate
/// LiveInterval to the LiveIntervals analysis.
unsigned insertStoreFor(MachineInstr *mi, unsigned ss,
unsigned vreg,
const TargetRegisterClass *trc) {
MachineBasicBlock::iterator nextInstItr(mi);
++nextInstItr;
unsigned miIdx = makeSpaceAfter(mi);
tii->storeRegToStackSlot(*mi->getParent(), nextInstItr, vreg,
true, ss, trc);
MachineBasicBlock::iterator storeInstItr(mi);
++storeInstItr;
@ -81,25 +107,35 @@ protected:
return storeInstIdx;
}
void insertStoreOnInterval(LiveInterval *li,
MachineInstr *mi, unsigned ss,
unsigned vreg,
const TargetRegisterClass *trc) {
unsigned storeInstIdx = insertStoreFor(mi, ss, vreg, trc);
unsigned start = lis->getDefIndex(lis->getInstructionIndex(mi)),
end = lis->getUseIndex(storeInstIdx);
VNInfo *vni =
li->getNextValue(storeInstIdx, 0, true, lis->getVNInfoAllocator());
vni->kills.push_back(storeInstIdx);
LiveRange lr(start, end, vni);
li->addRange(lr);
}
/// Insert a load of the given veg from the given stack slot immediately
/// before the given instruction. Returns the base index of the inserted
/// instruction. The caller is responsible for adding an appropriate
/// LiveInterval to the LiveIntervals analysis.
unsigned insertLoadFor(MachineInstr *mi, unsigned ss,
unsigned newVReg,
unsigned vreg,
const TargetRegisterClass *trc) {
MachineBasicBlock::iterator useInstItr(mi);
if (!lis->hasGapBeforeInstr(lis->getInstructionIndex(mi))) {
lis->scaleNumbering(2);
ls->scaleNumbering(2);
}
unsigned miIdx = lis->getInstructionIndex(mi);
assert(lis->hasGapBeforeInstr(miIdx));
tii->loadRegFromStackSlot(*mi->getParent(), useInstItr, newVReg, ss, trc);
unsigned miIdx = makeSpaceBefore(mi);
tii->loadRegFromStackSlot(*mi->getParent(), useInstItr, vreg, ss, trc);
MachineBasicBlock::iterator loadInstItr(mi);
--loadInstItr;
MachineInstr *loadInst = &*loadInstItr;
@ -113,6 +149,24 @@ protected:
return loadInstIdx;
}
void insertLoadOnInterval(LiveInterval *li,
MachineInstr *mi, unsigned ss,
unsigned vreg,
const TargetRegisterClass *trc) {
unsigned loadInstIdx = insertLoadFor(mi, ss, vreg, trc);
unsigned start = lis->getDefIndex(loadInstIdx),
end = lis->getUseIndex(lis->getInstructionIndex(mi));
VNInfo *vni =
li->getNextValue(loadInstIdx, 0, true, lis->getVNInfoAllocator());
vni->kills.push_back(lis->getInstructionIndex(mi));
LiveRange lr(start, end, vni);
li->addRange(lr);
}
/// Add spill ranges for every use/def of the live interval, inserting loads
/// immediately before each use, and stores after each def. No folding is
@ -173,35 +227,16 @@ protected:
assert(hasUse || hasDef);
if (hasUse) {
unsigned loadInstIdx = insertLoadFor(mi, ss, newVReg, trc);
unsigned start = lis->getDefIndex(loadInstIdx),
end = lis->getUseIndex(lis->getInstructionIndex(mi));
VNInfo *vni =
newLI->getNextValue(loadInstIdx, 0, lis->getVNInfoAllocator());
vni->kills.push_back(lis->getInstructionIndex(mi));
LiveRange lr(start, end, vni);
newLI->addRange(lr);
insertLoadOnInterval(newLI, mi, ss, newVReg, trc);
}
if (hasDef) {
unsigned storeInstIdx = insertStoreFor(mi, ss, newVReg, trc);
unsigned start = lis->getDefIndex(lis->getInstructionIndex(mi)),
end = lis->getUseIndex(storeInstIdx);
VNInfo *vni =
newLI->getNextValue(storeInstIdx, 0, lis->getVNInfoAllocator());
vni->kills.push_back(storeInstIdx);
LiveRange lr(start, end, vni);
newLI->addRange(lr);
insertStoreOnInterval(newLI, mi, ss, newVReg, trc);
}
added.push_back(newLI);
}
return added;
}
@ -212,13 +247,44 @@ protected:
/// folding.
class TrivialSpiller : public SpillerBase {
public:
TrivialSpiller(MachineFunction *mf, LiveIntervals *lis, LiveStacks *ls, VirtRegMap *vrm) :
TrivialSpiller(MachineFunction *mf, LiveIntervals *lis, LiveStacks *ls,
VirtRegMap *vrm) :
SpillerBase(mf, lis, ls, vrm) {}
std::vector<LiveInterval*> spill(LiveInterval *li) {
return trivialSpillEverywhere(li);
}
std::vector<LiveInterval*> intraBlockSplit(LiveInterval *li, VNInfo *valno) {
std::vector<LiveInterval*> spillIntervals;
MachineBasicBlock::iterator storeInsertPoint;
if (valno->isDefAccurate()) {
// If we have an accurate def we can just grab an iterator to the instr
// after the def.
storeInsertPoint =
next(MachineBasicBlock::iterator(lis->getInstructionFromIndex(valno->def)));
} else {
// If the def info isn't accurate we check if this is a PHI def.
// If it is then def holds the index of the defining Basic Block, and we
// can use that to get an insertion point.
if (valno->isPHIDef()) {
} else {
// We have no usable def info. We can't split this value sensibly.
// FIXME: Need sensible feedback for "failure to split", an empty
// set of spill intervals could be reasonably returned from a
// split where both the store and load are folded.
return spillIntervals;
}
}
return spillIntervals;
}
};
}

View File

@ -13,11 +13,14 @@
#include <vector>
namespace llvm {
class LiveInterval;
class LiveIntervals;
class LiveStacks;
class MachineFunction;
class MachineInstr;
class VirtRegMap;
class VNInfo;
/// Spiller interface.
///
@ -26,7 +29,15 @@ namespace llvm {
class Spiller {
public:
virtual ~Spiller() = 0;
/// Spill the given live range. The method used will depend on the Spiller
/// implementation selected.
virtual std::vector<LiveInterval*> spill(LiveInterval *li) = 0;
/// Intra-block split.
virtual std::vector<LiveInterval*> intraBlockSplit(LiveInterval *li,
VNInfo *valno) = 0;
};
/// Create and return a spiller object, as specified on the command line.

View File

@ -827,7 +827,7 @@ void StrongPHIElimination::InsertCopies(MachineDomTreeNode* MDTN,
// Add a live range for the new vreg
LiveInterval& Int = LI.getInterval(I->getOperand(i).getReg());
VNInfo* FirstVN = *Int.vni_begin();
FirstVN->hasPHIKill = false;
FirstVN->setHasPHIKill(false);
if (I->getOperand(i).isKill())
FirstVN->kills.push_back(
LiveIntervals::getUseIndex(LI.getInstructionIndex(I)));
@ -886,10 +886,7 @@ bool StrongPHIElimination::mergeLiveIntervals(unsigned primary,
VNInfo* OldVN = R.valno;
VNInfo*& NewVN = VNMap[OldVN];
if (!NewVN) {
NewVN = LHS.getNextValue(OldVN->def,
OldVN->copy,
LI.getVNInfoAllocator());
NewVN->kills = OldVN->kills;
NewVN = LHS.createValueCopy(OldVN, LI.getVNInfoAllocator());
}
LiveRange LR (R.start, R.end, NewVN);
@ -987,7 +984,7 @@ bool StrongPHIElimination::runOnMachineFunction(MachineFunction &Fn) {
LiveInterval& Int = LI.getOrCreateInterval(I->first);
const LiveRange* LR =
Int.getLiveRangeContaining(LI.getMBBEndIdx(SI->second));
LR->valno->hasPHIKill = true;
LR->valno->setHasPHIKill(true);
I->second.erase(SI->first);
}
@ -1037,7 +1034,7 @@ bool StrongPHIElimination::runOnMachineFunction(MachineFunction &Fn) {
// now has an unknown def.
unsigned idx = LI.getDefIndex(LI.getInstructionIndex(PInstr));
const LiveRange* PLR = PI.getLiveRangeContaining(idx);
PLR->valno->def = ~0U;
PLR->valno->setIsPHIDef(true);
LiveRange R (LI.getMBBStartIdx(PInstr->getParent()),
PLR->start, PLR->valno);
PI.addRange(R);

View File

@ -51,6 +51,7 @@ static RegisterPass<VirtRegMap>
X("virtregmap", "Virtual Register Map");
bool VirtRegMap::runOnMachineFunction(MachineFunction &mf) {
MRI = &mf.getRegInfo();
TII = mf.getTarget().getInstrInfo();
TRI = mf.getTarget().getRegisterInfo();
MF = &mf;
@ -98,6 +99,18 @@ void VirtRegMap::grow() {
ImplicitDefed.resize(LastVirtReg-TargetRegisterInfo::FirstVirtualRegister+1);
}
unsigned VirtRegMap::getRegAllocPref(unsigned virtReg) {
std::pair<unsigned, unsigned> Hint = MRI->getRegAllocationHint(virtReg);
unsigned physReg = Hint.second;
if (physReg &&
TargetRegisterInfo::isVirtualRegister(physReg) && hasPhys(physReg))
physReg = getPhys(physReg);
if (Hint.first == 0)
return (physReg && TargetRegisterInfo::isPhysicalRegister(physReg))
? physReg : 0;
return TRI->ResolveRegAllocHint(Hint.first, physReg, *MF);
}
int VirtRegMap::assignVirt2StackSlot(unsigned virtReg) {
assert(TargetRegisterInfo::isVirtualRegister(virtReg));
assert(Virt2StackSlotMap[virtReg] == NO_STACK_SLOT &&
@ -213,8 +226,7 @@ void VirtRegMap::RemoveMachineInstrFromMaps(MachineInstr *MI) {
/// FindUnusedRegisters - Gather a list of allocatable registers that
/// have not been allocated to any virtual register.
bool VirtRegMap::FindUnusedRegisters(const TargetRegisterInfo *TRI,
LiveIntervals* LIs) {
bool VirtRegMap::FindUnusedRegisters(LiveIntervals* LIs) {
unsigned NumRegs = TRI->getNumRegs();
UnusedRegs.reset();
UnusedRegs.resize(NumRegs);

View File

@ -31,6 +31,7 @@ namespace llvm {
class LiveIntervals;
class MachineInstr;
class MachineFunction;
class MachineRegisterInfo;
class TargetInstrInfo;
class TargetRegisterInfo;
@ -47,6 +48,7 @@ namespace llvm {
std::pair<unsigned, ModRef> > MI2VirtMapTy;
private:
MachineRegisterInfo *MRI;
const TargetInstrInfo *TII;
const TargetRegisterInfo *TRI;
MachineFunction *MF;
@ -190,6 +192,9 @@ namespace llvm {
grow();
}
/// @brief returns the register allocation preference.
unsigned getRegAllocPref(unsigned virtReg);
/// @brief records virtReg is a split live interval from SReg.
void setIsSplitFromReg(unsigned virtReg, unsigned SReg) {
Virt2SplitMap[virtReg] = SReg;
@ -445,8 +450,7 @@ namespace llvm {
/// FindUnusedRegisters - Gather a list of allocatable registers that
/// have not been allocated to any virtual register.
bool FindUnusedRegisters(const TargetRegisterInfo *TRI,
LiveIntervals* LIs);
bool FindUnusedRegisters(LiveIntervals* LIs);
/// HasUnusedRegisters - Return true if there are any allocatable registers
/// that have not been allocated to any virtual register.

View File

@ -118,7 +118,7 @@ int LLVMCreateJITCompiler(LLVMExecutionEngineRef *OutJIT,
char **OutError) {
std::string Error;
if (ExecutionEngine *JIT =
ExecutionEngine::createJIT(unwrap(MP), &Error, 0,
ExecutionEngine::create(unwrap(MP), false, &Error,
(CodeGenOpt::Level)OptLevel)) {
*OutJIT = wrap(JIT);
return 0;

View File

@ -19,6 +19,7 @@ add_llvm_library(LLVMSupport
PrettyStackTrace.cpp
SlowOperationInformer.cpp
SmallPtrSet.cpp
SourceMgr.cpp
Statistic.cpp
Streams.cpp
StringExtras.cpp

View File

@ -14,18 +14,15 @@
#include "llvm/Support/ManagedStatic.h"
#include "llvm/Config/config.h"
#include "llvm/System/Atomic.h"
#include "llvm/System/Mutex.h"
#include <cassert>
using namespace llvm;
static const ManagedStaticBase *StaticList = 0;
static sys::Mutex* ManagedStaticMutex = 0;
void ManagedStaticBase::RegisterManagedStatic(void *(*Creator)(),
void (*Deleter)(void*)) const {
if (ManagedStaticMutex) {
ManagedStaticMutex->acquire();
if (llvm_is_multithreaded()) {
llvm_acquire_global_lock();
if (Ptr == 0) {
void* tmp = Creator ? Creator() : 0;
@ -39,7 +36,7 @@ void ManagedStaticBase::RegisterManagedStatic(void *(*Creator)(),
StaticList = this;
}
ManagedStaticMutex->release();
llvm_release_global_lock();
} else {
assert(Ptr == 0 && DeleterFn == 0 && Next == 0 &&
"Partially initialized ManagedStatic!?");
@ -68,24 +65,11 @@ void ManagedStaticBase::destroy() const {
DeleterFn = 0;
}
bool llvm::llvm_start_multithreaded() {
#if LLVM_MULTITHREADED
assert(ManagedStaticMutex == 0 && "Multithreaded LLVM already initialized!");
ManagedStaticMutex = new sys::Mutex(true);
return true;
#else
return false;
#endif
}
/// llvm_shutdown - Deallocate and destroy all ManagedStatic variables.
void llvm::llvm_shutdown() {
while (StaticList)
StaticList->destroy();
if (ManagedStaticMutex) {
delete ManagedStaticMutex;
ManagedStaticMutex = 0;
}
if (llvm_is_multithreaded()) llvm_stop_multithreaded();
}

View File

@ -1,4 +1,4 @@
//===- TGSourceMgr.cpp - Manager for Source Buffers & Diagnostics ---------===//
//===- SourceMgr.cpp - Manager for Simple Source Buffers & Diagnostics ----===//
//
// The LLVM Compiler Infrastructure
//
@ -7,25 +7,47 @@
//
//===----------------------------------------------------------------------===//
//
// This file implements the TGSourceMgr class.
// This file implements the SourceMgr class. This class is used as a simple
// substrate for diagnostics, #include handling, and other low level things for
// simple parsers.
//
//===----------------------------------------------------------------------===//
#include "TGSourceMgr.h"
#include "llvm/Support/SourceMgr.h"
#include "llvm/Support/MemoryBuffer.h"
#include "llvm/Support/raw_ostream.h"
using namespace llvm;
TGSourceMgr::~TGSourceMgr() {
SourceMgr::~SourceMgr() {
while (!Buffers.empty()) {
delete Buffers.back().Buffer;
Buffers.pop_back();
}
}
/// AddIncludeFile - Search for a file with the specified name in the current
/// directory or in one of the IncludeDirs. If no file is found, this returns
/// ~0, otherwise it returns the buffer ID of the stacked file.
unsigned SourceMgr::AddIncludeFile(const std::string &Filename,
SMLoc IncludeLoc) {
MemoryBuffer *NewBuf = MemoryBuffer::getFile(Filename.c_str());
// If the file didn't exist directly, see if it's in an include path.
for (unsigned i = 0, e = IncludeDirectories.size(); i != e && !NewBuf; ++i) {
std::string IncFile = IncludeDirectories[i] + "/" + Filename;
NewBuf = MemoryBuffer::getFile(IncFile.c_str());
}
if (NewBuf == 0) return ~0U;
return AddNewSourceBuffer(NewBuf, IncludeLoc);
}
/// FindBufferContainingLoc - Return the ID of the buffer containing the
/// specified location, returning -1 if not found.
int TGSourceMgr::FindBufferContainingLoc(TGLoc Loc) const {
int SourceMgr::FindBufferContainingLoc(SMLoc Loc) const {
for (unsigned i = 0, e = Buffers.size(); i != e; ++i)
if (Loc.getPointer() >= Buffers[i].Buffer->getBufferStart() &&
// Use <= here so that a pointer to the null at the end of the buffer
@ -37,7 +59,7 @@ int TGSourceMgr::FindBufferContainingLoc(TGLoc Loc) const {
/// FindLineNumber - Find the line number for the specified location in the
/// specified file. This is not a fast method.
unsigned TGSourceMgr::FindLineNumber(TGLoc Loc, int BufferID) const {
unsigned SourceMgr::FindLineNumber(SMLoc Loc, int BufferID) const {
if (BufferID == -1) BufferID = FindBufferContainingLoc(Loc);
assert(BufferID != -1 && "Invalid Location!");
@ -49,13 +71,13 @@ unsigned TGSourceMgr::FindLineNumber(TGLoc Loc, int BufferID) const {
const char *Ptr = Buff->getBufferStart();
for (; TGLoc::getFromPointer(Ptr) != Loc; ++Ptr)
for (; SMLoc::getFromPointer(Ptr) != Loc; ++Ptr)
if (*Ptr == '\n') ++LineNo;
return LineNo;
}
void TGSourceMgr::PrintIncludeStack(TGLoc IncludeLoc) const {
if (IncludeLoc == TGLoc()) return; // Top of stack.
void SourceMgr::PrintIncludeStack(SMLoc IncludeLoc) const {
if (IncludeLoc == SMLoc()) return; // Top of stack.
int CurBuf = FindBufferContainingLoc(IncludeLoc);
assert(CurBuf != -1 && "Invalid or unspecified location!");
@ -68,12 +90,12 @@ void TGSourceMgr::PrintIncludeStack(TGLoc IncludeLoc) const {
}
void TGSourceMgr::PrintError(TGLoc ErrorLoc, const std::string &Msg) const {
void SourceMgr::PrintMessage(SMLoc Loc, const std::string &Msg) const {
raw_ostream &OS = errs();
// First thing to do: find the current buffer containing the specified
// location.
int CurBuf = FindBufferContainingLoc(ErrorLoc);
int CurBuf = FindBufferContainingLoc(Loc);
assert(CurBuf != -1 && "Invalid or unspecified location!");
PrintIncludeStack(getBufferInfo(CurBuf).IncludeLoc);
@ -82,24 +104,24 @@ void TGSourceMgr::PrintError(TGLoc ErrorLoc, const std::string &Msg) const {
OS << "Parsing " << CurMB->getBufferIdentifier() << ":"
<< FindLineNumber(ErrorLoc, CurBuf) << ": ";
<< FindLineNumber(Loc, CurBuf) << ": ";
OS << Msg << "\n";
// Scan backward to find the start of the line.
const char *LineStart = ErrorLoc.getPointer();
const char *LineStart = Loc.getPointer();
while (LineStart != CurMB->getBufferStart() &&
LineStart[-1] != '\n' && LineStart[-1] != '\r')
--LineStart;
// Get the end of the line.
const char *LineEnd = ErrorLoc.getPointer();
const char *LineEnd = Loc.getPointer();
while (LineEnd != CurMB->getBufferEnd() &&
LineEnd[0] != '\n' && LineEnd[0] != '\r')
++LineEnd;
// Print out the line.
OS << std::string(LineStart, LineEnd) << "\n";
// Print out spaces before the caret.
for (const char *Pos = LineStart; Pos != ErrorLoc.getPointer(); ++Pos)
for (const char *Pos = LineStart; Pos != Loc.getPointer(); ++Pos)
OS << (*Pos == '\t' ? '\t' : ' ');
OS << "^\n";
}

View File

@ -43,6 +43,7 @@ const char *Triple::getOSTypeName(OSType Kind) {
switch (Kind) {
case UnknownOS: return "unknown";
case AuroraUX: return "auroraux";
case Darwin: return "darwin";
case DragonFly: return "dragonfly";
case FreeBSD: return "freebsd";
@ -79,7 +80,9 @@ void Triple::Parse() const {
Vendor = UnknownVendor;
std::string OSName = getOSName();
if (memcmp(&OSName[0], "darwin", 6) == 0)
if (memcmp(&OSName[0], "auroraux", 8) == 0)
OS = AuroraUX;
else if (memcmp(&OSName[0], "darwin", 6) == 0)
OS = Darwin;
else if (memcmp(&OSName[0], "dragonfly", 9) == 0)
OS = DragonFly;

View File

@ -51,3 +51,31 @@ sys::cas_flag sys::CompareAndSwap(volatile sys::cas_flag* ptr,
# error No compare-and-swap implementation for your platform!
#endif
}
sys::cas_flag sys::AtomicIncrement(volatile sys::cas_flag* ptr) {
#if LLVM_MULTITHREADED==0
++(*ptr);
return *ptr;
#elif defined(__GNUC__)
return __sync_add_and_fetch(ptr, 1);
#elif defined(_MSC_VER)
return InterlockedIncrement(ptr);
#else
# error No atomic increment implementation for your platform!
#endif
}
sys::cas_flag sys::AtomicDecrement(volatile sys::cas_flag* ptr) {
#if LLVM_MULTITHREADED==0
--(*ptr);
return *ptr;
#elif defined(__GNUC__)
return __sync_sub_and_fetch(ptr, 1);
#elif defined(_MSC_VER)
return InterlockedDecrement(ptr);
#else
# error No atomic decrement implementation for your platform!
#endif
}

Some files were not shown because too many files have changed in this diff Show More