Merge llvm-project 12.0.1 release
This updates llvm, clang, compiler-rt, libc++, libunwind, lld, lldb and openmp to llvmorg-12.0.1-0-gfed41342a82f, a.k.a. 12.0.1 release. PR: 255570 MFC after: 6 weeks
This commit is contained in:
parent
5d40fb677a
commit
4652422eb4
@ -513,10 +513,11 @@ void X86TargetInfo::getTargetDefines(const LangOptions &Opts,
|
||||
case CK_K8:
|
||||
case CK_K8SSE3:
|
||||
case CK_x86_64:
|
||||
defineCPUMacros(Builder, "k8");
|
||||
break;
|
||||
case CK_x86_64_v2:
|
||||
case CK_x86_64_v3:
|
||||
case CK_x86_64_v4:
|
||||
defineCPUMacros(Builder, "k8");
|
||||
break;
|
||||
case CK_AMDFAM10:
|
||||
defineCPUMacros(Builder, "amdfam10");
|
||||
|
@ -370,15 +370,6 @@ static void ioctl_table_fill() {
|
||||
|
||||
#if SANITIZER_GLIBC
|
||||
// _(SIOCDEVPLIP, WRITE, struct_ifreq_sz); // the same as EQL_ENSLAVE
|
||||
_(CYGETDEFTHRESH, WRITE, sizeof(int));
|
||||
_(CYGETDEFTIMEOUT, WRITE, sizeof(int));
|
||||
_(CYGETMON, WRITE, struct_cyclades_monitor_sz);
|
||||
_(CYGETTHRESH, WRITE, sizeof(int));
|
||||
_(CYGETTIMEOUT, WRITE, sizeof(int));
|
||||
_(CYSETDEFTHRESH, NONE, 0);
|
||||
_(CYSETDEFTIMEOUT, NONE, 0);
|
||||
_(CYSETTHRESH, NONE, 0);
|
||||
_(CYSETTIMEOUT, NONE, 0);
|
||||
_(EQL_EMANCIPATE, WRITE, struct_ifreq_sz);
|
||||
_(EQL_ENSLAVE, WRITE, struct_ifreq_sz);
|
||||
_(EQL_GETMASTRCFG, WRITE, struct_ifreq_sz);
|
||||
|
@ -143,7 +143,6 @@ typedef struct user_fpregs elf_fpregset_t;
|
||||
# include <sys/procfs.h>
|
||||
#endif
|
||||
#include <sys/user.h>
|
||||
#include <linux/cyclades.h>
|
||||
#include <linux/if_eql.h>
|
||||
#include <linux/if_plip.h>
|
||||
#include <linux/lp.h>
|
||||
@ -459,7 +458,6 @@ unsigned struct_ElfW_Phdr_sz = sizeof(Elf_Phdr);
|
||||
|
||||
#if SANITIZER_GLIBC
|
||||
unsigned struct_ax25_parms_struct_sz = sizeof(struct ax25_parms_struct);
|
||||
unsigned struct_cyclades_monitor_sz = sizeof(struct cyclades_monitor);
|
||||
#if EV_VERSION > (0x010000)
|
||||
unsigned struct_input_keymap_entry_sz = sizeof(struct input_keymap_entry);
|
||||
#else
|
||||
@ -823,15 +821,6 @@ unsigned struct_ElfW_Phdr_sz = sizeof(Elf_Phdr);
|
||||
#endif // SANITIZER_LINUX
|
||||
|
||||
#if SANITIZER_LINUX && !SANITIZER_ANDROID
|
||||
unsigned IOCTL_CYGETDEFTHRESH = CYGETDEFTHRESH;
|
||||
unsigned IOCTL_CYGETDEFTIMEOUT = CYGETDEFTIMEOUT;
|
||||
unsigned IOCTL_CYGETMON = CYGETMON;
|
||||
unsigned IOCTL_CYGETTHRESH = CYGETTHRESH;
|
||||
unsigned IOCTL_CYGETTIMEOUT = CYGETTIMEOUT;
|
||||
unsigned IOCTL_CYSETDEFTHRESH = CYSETDEFTHRESH;
|
||||
unsigned IOCTL_CYSETDEFTIMEOUT = CYSETDEFTIMEOUT;
|
||||
unsigned IOCTL_CYSETTHRESH = CYSETTHRESH;
|
||||
unsigned IOCTL_CYSETTIMEOUT = CYSETTIMEOUT;
|
||||
unsigned IOCTL_EQL_EMANCIPATE = EQL_EMANCIPATE;
|
||||
unsigned IOCTL_EQL_ENSLAVE = EQL_ENSLAVE;
|
||||
unsigned IOCTL_EQL_GETMASTRCFG = EQL_GETMASTRCFG;
|
||||
|
@ -983,7 +983,6 @@ extern unsigned struct_vt_mode_sz;
|
||||
|
||||
#if SANITIZER_LINUX && !SANITIZER_ANDROID
|
||||
extern unsigned struct_ax25_parms_struct_sz;
|
||||
extern unsigned struct_cyclades_monitor_sz;
|
||||
extern unsigned struct_input_keymap_entry_sz;
|
||||
extern unsigned struct_ipx_config_data_sz;
|
||||
extern unsigned struct_kbdiacrs_sz;
|
||||
@ -1328,15 +1327,6 @@ extern unsigned IOCTL_VT_WAITACTIVE;
|
||||
#endif // SANITIZER_LINUX
|
||||
|
||||
#if SANITIZER_LINUX && !SANITIZER_ANDROID
|
||||
extern unsigned IOCTL_CYGETDEFTHRESH;
|
||||
extern unsigned IOCTL_CYGETDEFTIMEOUT;
|
||||
extern unsigned IOCTL_CYGETMON;
|
||||
extern unsigned IOCTL_CYGETTHRESH;
|
||||
extern unsigned IOCTL_CYGETTIMEOUT;
|
||||
extern unsigned IOCTL_CYSETDEFTHRESH;
|
||||
extern unsigned IOCTL_CYSETDEFTIMEOUT;
|
||||
extern unsigned IOCTL_CYSETTHRESH;
|
||||
extern unsigned IOCTL_CYSETTIMEOUT;
|
||||
extern unsigned IOCTL_EQL_EMANCIPATE;
|
||||
extern unsigned IOCTL_EQL_ENSLAVE;
|
||||
extern unsigned IOCTL_EQL_GETMASTRCFG;
|
||||
|
@ -165,7 +165,12 @@ bool SupportsColoredOutput(fd_t fd) {
|
||||
|
||||
#if !SANITIZER_GO
|
||||
// TODO(glider): different tools may require different altstack size.
|
||||
static const uptr kAltStackSize = SIGSTKSZ * 4; // SIGSTKSZ is not enough.
|
||||
static uptr GetAltStackSize() {
|
||||
// Note: since GLIBC_2.31, SIGSTKSZ may be a function call, so this may be
|
||||
// more costly that you think. However GetAltStackSize is only call 2-3 times
|
||||
// per thread so don't cache the evaluation.
|
||||
return SIGSTKSZ * 4;
|
||||
}
|
||||
|
||||
void SetAlternateSignalStack() {
|
||||
stack_t altstack, oldstack;
|
||||
@ -176,10 +181,10 @@ void SetAlternateSignalStack() {
|
||||
// TODO(glider): the mapped stack should have the MAP_STACK flag in the
|
||||
// future. It is not required by man 2 sigaltstack now (they're using
|
||||
// malloc()).
|
||||
void* base = MmapOrDie(kAltStackSize, __func__);
|
||||
void *base = MmapOrDie(GetAltStackSize(), __func__);
|
||||
altstack.ss_sp = (char*) base;
|
||||
altstack.ss_flags = 0;
|
||||
altstack.ss_size = kAltStackSize;
|
||||
altstack.ss_size = GetAltStackSize();
|
||||
CHECK_EQ(0, sigaltstack(&altstack, nullptr));
|
||||
}
|
||||
|
||||
@ -187,7 +192,7 @@ void UnsetAlternateSignalStack() {
|
||||
stack_t altstack, oldstack;
|
||||
altstack.ss_sp = nullptr;
|
||||
altstack.ss_flags = SS_DISABLE;
|
||||
altstack.ss_size = kAltStackSize; // Some sane value required on Darwin.
|
||||
altstack.ss_size = GetAltStackSize(); // Some sane value required on Darwin.
|
||||
CHECK_EQ(0, sigaltstack(&altstack, &oldstack));
|
||||
UnmapOrDie(oldstack.ss_sp, oldstack.ss_size);
|
||||
}
|
||||
|
@ -43,6 +43,14 @@
|
||||
// as unavailable. When vendors decide to ship the feature as part of their
|
||||
// shared library, they can update the markup appropriately.
|
||||
//
|
||||
// Furthermore, many features in the standard library have corresponding
|
||||
// feature-test macros. When a feature is made unavailable on some deployment
|
||||
// target, a macro should be defined to signal that it is unavailable. That
|
||||
// macro can then be picked up when feature-test macros are generated (see
|
||||
// generate_feature_test_macro_components.py) to make sure that feature-test
|
||||
// macros don't announce a feature as being implemented if it has been marked
|
||||
// as unavailable.
|
||||
//
|
||||
// Note that this mechanism is disabled by default in the "upstream" libc++.
|
||||
// Availability annotations are only meaningful when shipping libc++ inside
|
||||
// a platform (i.e. as a system library), and so vendors that want them should
|
||||
@ -76,6 +84,8 @@
|
||||
// This controls the availability of std::shared_mutex and std::shared_timed_mutex,
|
||||
// which were added to the dylib later.
|
||||
# define _LIBCPP_AVAILABILITY_SHARED_MUTEX
|
||||
// # define _LIBCPP_AVAILABILITY_DISABLE_FTM___cpp_lib_shared_mutex
|
||||
// # define _LIBCPP_AVAILABILITY_DISABLE_FTM___cpp_lib_shared_timed_mutex
|
||||
|
||||
// These macros control the availability of std::bad_optional_access and
|
||||
// other exception types. These were put in the shared library to prevent
|
||||
@ -114,6 +124,7 @@
|
||||
# define _LIBCPP_AVAILABILITY_FILESYSTEM
|
||||
# define _LIBCPP_AVAILABILITY_FILESYSTEM_PUSH
|
||||
# define _LIBCPP_AVAILABILITY_FILESYSTEM_POP
|
||||
// # define _LIBCPP_AVAILABILITY_DISABLE_FTM___cpp_lib_filesystem
|
||||
|
||||
// This controls the availability of std::to_chars.
|
||||
# define _LIBCPP_AVAILABILITY_TO_CHARS
|
||||
@ -122,6 +133,10 @@
|
||||
// which requires shared library support for various operations
|
||||
// (see libcxx/src/atomic.cpp).
|
||||
# define _LIBCPP_AVAILABILITY_SYNC
|
||||
// # define _LIBCPP_AVAILABILITY_DISABLE_FTM___cpp_lib_atomic_wait
|
||||
// # define _LIBCPP_AVAILABILITY_DISABLE_FTM___cpp_lib_barrier
|
||||
// # define _LIBCPP_AVAILABILITY_DISABLE_FTM___cpp_lib_latch
|
||||
// # define _LIBCPP_AVAILABILITY_DISABLE_FTM___cpp_lib_semaphore
|
||||
|
||||
#elif defined(__APPLE__)
|
||||
|
||||
@ -130,6 +145,14 @@
|
||||
__attribute__((availability(ios,strict,introduced=10.0))) \
|
||||
__attribute__((availability(tvos,strict,introduced=10.0))) \
|
||||
__attribute__((availability(watchos,strict,introduced=3.0)))
|
||||
# if (defined(__ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__) && __ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__ < 101200) || \
|
||||
(defined(__ENVIRONMENT_IPHONE_OS_VERSION_MIN_REQUIRED__) && __ENVIRONMENT_IPHONE_OS_VERSION_MIN_REQUIRED__ < 100000) || \
|
||||
(defined(__ENVIRONMENT_TV_OS_VERSION_MIN_REQUIRED__) && __ENVIRONMENT_TV_OS_VERSION_MIN_REQUIRED__ < 100000) || \
|
||||
(defined(__ENVIRONMENT_WATCH_OS_VERSION_MIN_REQUIRED__) && __ENVIRONMENT_WATCH_OS_VERSION_MIN_REQUIRED__ < 30000)
|
||||
# define _LIBCPP_AVAILABILITY_DISABLE_FTM___cpp_lib_shared_mutex
|
||||
# define _LIBCPP_AVAILABILITY_DISABLE_FTM___cpp_lib_shared_timed_mutex
|
||||
# endif
|
||||
|
||||
# define _LIBCPP_AVAILABILITY_BAD_OPTIONAL_ACCESS \
|
||||
__attribute__((availability(macosx,strict,introduced=10.13))) \
|
||||
__attribute__((availability(ios,strict,introduced=11.0))) \
|
||||
@ -139,27 +162,34 @@
|
||||
_LIBCPP_AVAILABILITY_BAD_OPTIONAL_ACCESS
|
||||
# define _LIBCPP_AVAILABILITY_BAD_ANY_CAST \
|
||||
_LIBCPP_AVAILABILITY_BAD_OPTIONAL_ACCESS
|
||||
|
||||
# define _LIBCPP_AVAILABILITY_UNCAUGHT_EXCEPTIONS \
|
||||
__attribute__((availability(macosx,strict,introduced=10.12))) \
|
||||
__attribute__((availability(ios,strict,introduced=10.0))) \
|
||||
__attribute__((availability(tvos,strict,introduced=10.0))) \
|
||||
__attribute__((availability(watchos,strict,introduced=3.0)))
|
||||
|
||||
# define _LIBCPP_AVAILABILITY_SIZED_NEW_DELETE \
|
||||
__attribute__((availability(macosx,strict,introduced=10.12))) \
|
||||
__attribute__((availability(ios,strict,introduced=10.0))) \
|
||||
__attribute__((availability(tvos,strict,introduced=10.0))) \
|
||||
__attribute__((availability(watchos,strict,introduced=3.0)))
|
||||
|
||||
# define _LIBCPP_AVAILABILITY_FUTURE_ERROR \
|
||||
__attribute__((availability(ios,strict,introduced=6.0)))
|
||||
|
||||
# define _LIBCPP_AVAILABILITY_TYPEINFO_VTABLE \
|
||||
__attribute__((availability(macosx,strict,introduced=10.9))) \
|
||||
__attribute__((availability(ios,strict,introduced=7.0)))
|
||||
|
||||
# define _LIBCPP_AVAILABILITY_LOCALE_CATEGORY \
|
||||
__attribute__((availability(macosx,strict,introduced=10.9))) \
|
||||
__attribute__((availability(ios,strict,introduced=7.0)))
|
||||
|
||||
# define _LIBCPP_AVAILABILITY_ATOMIC_SHARED_PTR \
|
||||
__attribute__((availability(macosx,strict,introduced=10.9))) \
|
||||
__attribute__((availability(ios,strict,introduced=7.0)))
|
||||
|
||||
# define _LIBCPP_AVAILABILITY_FILESYSTEM \
|
||||
__attribute__((availability(macosx,strict,introduced=10.15))) \
|
||||
__attribute__((availability(ios,strict,introduced=13.0))) \
|
||||
@ -175,10 +205,23 @@
|
||||
_Pragma("clang attribute pop") \
|
||||
_Pragma("clang attribute pop") \
|
||||
_Pragma("clang attribute pop")
|
||||
# if (defined(__ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__) && __ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__ < 101500) || \
|
||||
(defined(__ENVIRONMENT_IPHONE_OS_VERSION_MIN_REQUIRED__) && __ENVIRONMENT_IPHONE_OS_VERSION_MIN_REQUIRED__ < 130000) || \
|
||||
(defined(__ENVIRONMENT_TV_OS_VERSION_MIN_REQUIRED__) && __ENVIRONMENT_TV_OS_VERSION_MIN_REQUIRED__ < 130000) || \
|
||||
(defined(__ENVIRONMENT_WATCH_OS_VERSION_MIN_REQUIRED__) && __ENVIRONMENT_WATCH_OS_VERSION_MIN_REQUIRED__ < 60000)
|
||||
# define _LIBCPP_AVAILABILITY_DISABLE_FTM___cpp_lib_filesystem
|
||||
# endif
|
||||
|
||||
# define _LIBCPP_AVAILABILITY_TO_CHARS \
|
||||
_LIBCPP_AVAILABILITY_FILESYSTEM
|
||||
|
||||
// Note: Those are not ABI-stable yet, so we can't ship them.
|
||||
# define _LIBCPP_AVAILABILITY_SYNC \
|
||||
__attribute__((unavailable))
|
||||
# define _LIBCPP_AVAILABILITY_DISABLE_FTM___cpp_lib_atomic_wait
|
||||
# define _LIBCPP_AVAILABILITY_DISABLE_FTM___cpp_lib_barrier
|
||||
# define _LIBCPP_AVAILABILITY_DISABLE_FTM___cpp_lib_latch
|
||||
# define _LIBCPP_AVAILABILITY_DISABLE_FTM___cpp_lib_semaphore
|
||||
|
||||
#else
|
||||
|
||||
|
@ -1114,28 +1114,26 @@ public:
|
||||
#endif
|
||||
{}
|
||||
|
||||
// avoid re-declaring a copy constructor for the non-const version.
|
||||
using __type_for_copy_to_const =
|
||||
_If<_IsConst, __bit_iterator<_Cp, false>, struct __private_nat>;
|
||||
|
||||
// When _IsConst=false, this is the copy constructor.
|
||||
// It is non-trivial. Making it trivial would break ABI.
|
||||
// When _IsConst=true, this is a converting constructor;
|
||||
// the copy and move constructors are implicitly generated
|
||||
// and trivial.
|
||||
_LIBCPP_INLINE_VISIBILITY
|
||||
__bit_iterator(const __type_for_copy_to_const& __it) _NOEXCEPT
|
||||
__bit_iterator(const __bit_iterator<_Cp, false>& __it) _NOEXCEPT
|
||||
: __seg_(__it.__seg_), __ctz_(__it.__ctz_) {}
|
||||
|
||||
// The non-const __bit_iterator has historically had a non-trivial
|
||||
// copy constructor (as a quirk of its construction). We need to maintain
|
||||
// this for ABI purposes.
|
||||
using __type_for_abi_non_trivial_copy_ctor =
|
||||
_If<!_IsConst, __bit_iterator, struct __private_nat>;
|
||||
|
||||
// When _IsConst=false, we have a user-provided copy constructor,
|
||||
// so we must also provide a copy assignment operator because
|
||||
// the implicit generation of a defaulted one is deprecated.
|
||||
// When _IsConst=true, the assignment operators are
|
||||
// implicitly generated and trivial.
|
||||
_LIBCPP_INLINE_VISIBILITY
|
||||
__bit_iterator(__type_for_abi_non_trivial_copy_ctor const& __it) _NOEXCEPT
|
||||
: __seg_(__it.__seg_), __ctz_(__it.__ctz_) {}
|
||||
|
||||
// Always declare the copy assignment operator since the implicit declaration
|
||||
// is deprecated.
|
||||
_LIBCPP_INLINE_VISIBILITY
|
||||
__bit_iterator& operator=(__bit_iterator const&) = default;
|
||||
__bit_iterator& operator=(const _If<_IsConst, struct __private_nat, __bit_iterator>& __it) {
|
||||
__seg_ = __it.__seg_;
|
||||
__ctz_ = __it.__ctz_;
|
||||
return *this;
|
||||
}
|
||||
|
||||
_LIBCPP_INLINE_VISIBILITY reference operator*() const _NOEXCEPT
|
||||
{return reference(__seg_, __storage_type(1) << __ctz_);}
|
||||
|
@ -810,10 +810,35 @@ public:
|
||||
};
|
||||
#endif
|
||||
|
||||
// This class provides a non-trivial default constructor to the class that derives from it
|
||||
// if the condition is satisfied.
|
||||
//
|
||||
// The second template parameter exists to allow giving a unique type to __non_trivial_if,
|
||||
// which makes it possible to avoid breaking the ABI when making this a base class of an
|
||||
// existing class. Without that, imagine we have classes D1 and D2, both of which used to
|
||||
// have no base classes, but which now derive from __non_trivial_if. The layout of a class
|
||||
// that inherits from both D1 and D2 will change because the two __non_trivial_if base
|
||||
// classes are not allowed to share the same address.
|
||||
//
|
||||
// By making those __non_trivial_if base classes unique, we work around this problem and
|
||||
// it is safe to start deriving from __non_trivial_if in existing classes.
|
||||
template <bool _Cond, class _Unique>
|
||||
struct __non_trivial_if { };
|
||||
|
||||
template <class _Unique>
|
||||
struct __non_trivial_if<true, _Unique> {
|
||||
_LIBCPP_INLINE_VISIBILITY
|
||||
_LIBCPP_CONSTEXPR __non_trivial_if() _NOEXCEPT { }
|
||||
};
|
||||
|
||||
// allocator
|
||||
//
|
||||
// Note: For ABI compatibility between C++20 and previous standards, we make
|
||||
// allocator<void> trivial in C++20.
|
||||
|
||||
template <class _Tp>
|
||||
class _LIBCPP_TEMPLATE_VIS allocator
|
||||
: private __non_trivial_if<!is_void<_Tp>::value, allocator<_Tp> >
|
||||
{
|
||||
public:
|
||||
typedef size_t size_type;
|
||||
@ -823,7 +848,7 @@ public:
|
||||
typedef true_type is_always_equal;
|
||||
|
||||
_LIBCPP_INLINE_VISIBILITY _LIBCPP_CONSTEXPR_AFTER_CXX17
|
||||
allocator() _NOEXCEPT { }
|
||||
allocator() _NOEXCEPT _LIBCPP_DEFAULT
|
||||
|
||||
template <class _Up>
|
||||
_LIBCPP_INLINE_VISIBILITY _LIBCPP_CONSTEXPR_AFTER_CXX17
|
||||
@ -895,6 +920,7 @@ public:
|
||||
|
||||
template <class _Tp>
|
||||
class _LIBCPP_TEMPLATE_VIS allocator<const _Tp>
|
||||
: private __non_trivial_if<!is_void<_Tp>::value, allocator<const _Tp> >
|
||||
{
|
||||
public:
|
||||
typedef size_t size_type;
|
||||
@ -904,7 +930,7 @@ public:
|
||||
typedef true_type is_always_equal;
|
||||
|
||||
_LIBCPP_INLINE_VISIBILITY _LIBCPP_CONSTEXPR_AFTER_CXX17
|
||||
allocator() _NOEXCEPT { }
|
||||
allocator() _NOEXCEPT _LIBCPP_DEFAULT
|
||||
|
||||
template <class _Up>
|
||||
_LIBCPP_INLINE_VISIBILITY _LIBCPP_CONSTEXPR_AFTER_CXX17
|
||||
@ -2745,7 +2771,6 @@ public:
|
||||
typename enable_if
|
||||
<
|
||||
!is_lvalue_reference<_Dp>::value &&
|
||||
!is_array<_Yp>::value &&
|
||||
is_convertible<typename unique_ptr<_Yp, _Dp>::pointer, element_type*>::value,
|
||||
__nat
|
||||
>::type = __nat());
|
||||
@ -2754,7 +2779,6 @@ public:
|
||||
typename enable_if
|
||||
<
|
||||
is_lvalue_reference<_Dp>::value &&
|
||||
!is_array<_Yp>::value &&
|
||||
is_convertible<typename unique_ptr<_Yp, _Dp>::pointer, element_type*>::value,
|
||||
__nat
|
||||
>::type = __nat());
|
||||
@ -2795,7 +2819,6 @@ public:
|
||||
template <class _Yp, class _Dp>
|
||||
typename enable_if
|
||||
<
|
||||
!is_array<_Yp>::value &&
|
||||
is_convertible<typename unique_ptr<_Yp, _Dp>::pointer, element_type*>::value,
|
||||
shared_ptr&
|
||||
>::type
|
||||
@ -3157,7 +3180,6 @@ shared_ptr<_Tp>::shared_ptr(unique_ptr<_Yp, _Dp>&& __r,
|
||||
typename enable_if
|
||||
<
|
||||
!is_lvalue_reference<_Dp>::value &&
|
||||
!is_array<_Yp>::value &&
|
||||
is_convertible<typename unique_ptr<_Yp, _Dp>::pointer, element_type*>::value,
|
||||
__nat
|
||||
>::type)
|
||||
@ -3170,7 +3192,7 @@ shared_ptr<_Tp>::shared_ptr(unique_ptr<_Yp, _Dp>&& __r,
|
||||
#endif
|
||||
{
|
||||
typedef typename __shared_ptr_default_allocator<_Yp>::type _AllocT;
|
||||
typedef __shared_ptr_pointer<_Yp*, _Dp, _AllocT > _CntrlBlk;
|
||||
typedef __shared_ptr_pointer<typename unique_ptr<_Yp, _Dp>::pointer, _Dp, _AllocT > _CntrlBlk;
|
||||
__cntrl_ = new _CntrlBlk(__r.get(), __r.get_deleter(), _AllocT());
|
||||
__enable_weak_this(__r.get(), __r.get());
|
||||
}
|
||||
@ -3183,7 +3205,6 @@ shared_ptr<_Tp>::shared_ptr(unique_ptr<_Yp, _Dp>&& __r,
|
||||
typename enable_if
|
||||
<
|
||||
is_lvalue_reference<_Dp>::value &&
|
||||
!is_array<_Yp>::value &&
|
||||
is_convertible<typename unique_ptr<_Yp, _Dp>::pointer, element_type*>::value,
|
||||
__nat
|
||||
>::type)
|
||||
@ -3196,7 +3217,7 @@ shared_ptr<_Tp>::shared_ptr(unique_ptr<_Yp, _Dp>&& __r,
|
||||
#endif
|
||||
{
|
||||
typedef typename __shared_ptr_default_allocator<_Yp>::type _AllocT;
|
||||
typedef __shared_ptr_pointer<_Yp*,
|
||||
typedef __shared_ptr_pointer<typename unique_ptr<_Yp, _Dp>::pointer,
|
||||
reference_wrapper<typename remove_reference<_Dp>::type>,
|
||||
_AllocT > _CntrlBlk;
|
||||
__cntrl_ = new _CntrlBlk(__r.get(), _VSTD::ref(__r.get_deleter()), _AllocT());
|
||||
@ -3280,7 +3301,6 @@ template <class _Yp, class _Dp>
|
||||
inline
|
||||
typename enable_if
|
||||
<
|
||||
!is_array<_Yp>::value &&
|
||||
is_convertible<typename unique_ptr<_Yp, _Dp>::pointer,
|
||||
typename shared_ptr<_Tp>::element_type*>::value,
|
||||
shared_ptr<_Tp>&
|
||||
|
@ -184,7 +184,7 @@ __cpp_lib_void_t 201411L <type_traits>
|
||||
# define __cpp_lib_quoted_string_io 201304L
|
||||
# define __cpp_lib_result_of_sfinae 201210L
|
||||
# define __cpp_lib_robust_nonmodifying_seq_ops 201304L
|
||||
# if !defined(_LIBCPP_HAS_NO_THREADS)
|
||||
# if !defined(_LIBCPP_HAS_NO_THREADS) && !defined(_LIBCPP_AVAILABILITY_DISABLE_FTM___cpp_lib_shared_timed_mutex)
|
||||
# define __cpp_lib_shared_timed_mutex 201402L
|
||||
# endif
|
||||
# define __cpp_lib_string_udls 201304L
|
||||
@ -213,7 +213,9 @@ __cpp_lib_void_t 201411L <type_traits>
|
||||
# define __cpp_lib_clamp 201603L
|
||||
# define __cpp_lib_enable_shared_from_this 201603L
|
||||
// # define __cpp_lib_execution 201603L
|
||||
# define __cpp_lib_filesystem 201703L
|
||||
# if !defined(_LIBCPP_AVAILABILITY_DISABLE_FTM___cpp_lib_filesystem)
|
||||
# define __cpp_lib_filesystem 201703L
|
||||
# endif
|
||||
# define __cpp_lib_gcd_lcm 201606L
|
||||
// # define __cpp_lib_hardware_interference_size 201703L
|
||||
# if defined(_LIBCPP_HAS_UNIQUE_OBJECT_REPRESENTATIONS)
|
||||
@ -241,7 +243,7 @@ __cpp_lib_void_t 201411L <type_traits>
|
||||
# define __cpp_lib_raw_memory_algorithms 201606L
|
||||
# define __cpp_lib_sample 201603L
|
||||
# define __cpp_lib_scoped_lock 201703L
|
||||
# if !defined(_LIBCPP_HAS_NO_THREADS)
|
||||
# if !defined(_LIBCPP_HAS_NO_THREADS) && !defined(_LIBCPP_AVAILABILITY_DISABLE_FTM___cpp_lib_shared_mutex)
|
||||
# define __cpp_lib_shared_mutex 201505L
|
||||
# endif
|
||||
# define __cpp_lib_shared_ptr_arrays 201611L
|
||||
@ -279,10 +281,10 @@ __cpp_lib_void_t 201411L <type_traits>
|
||||
# if !defined(_LIBCPP_HAS_NO_THREADS)
|
||||
// # define __cpp_lib_atomic_value_initialization 201911L
|
||||
# endif
|
||||
# if !defined(_LIBCPP_HAS_NO_THREADS)
|
||||
# if !defined(_LIBCPP_HAS_NO_THREADS) && !defined(_LIBCPP_AVAILABILITY_DISABLE_FTM___cpp_lib_atomic_wait)
|
||||
# define __cpp_lib_atomic_wait 201907L
|
||||
# endif
|
||||
# if !defined(_LIBCPP_HAS_NO_THREADS)
|
||||
# if !defined(_LIBCPP_HAS_NO_THREADS) && !defined(_LIBCPP_AVAILABILITY_DISABLE_FTM___cpp_lib_barrier)
|
||||
# define __cpp_lib_barrier 201907L
|
||||
# endif
|
||||
// # define __cpp_lib_bind_front 201907L
|
||||
@ -326,7 +328,7 @@ __cpp_lib_void_t 201411L <type_traits>
|
||||
# if !defined(_LIBCPP_HAS_NO_THREADS)
|
||||
// # define __cpp_lib_jthread 201911L
|
||||
# endif
|
||||
# if !defined(_LIBCPP_HAS_NO_THREADS)
|
||||
# if !defined(_LIBCPP_HAS_NO_THREADS) && !defined(_LIBCPP_AVAILABILITY_DISABLE_FTM___cpp_lib_latch)
|
||||
# define __cpp_lib_latch 201907L
|
||||
# endif
|
||||
# define __cpp_lib_list_remove_return_type 201806L
|
||||
@ -336,7 +338,7 @@ __cpp_lib_void_t 201411L <type_traits>
|
||||
// # define __cpp_lib_polymorphic_allocator 201902L
|
||||
// # define __cpp_lib_ranges 201811L
|
||||
# define __cpp_lib_remove_cvref 201711L
|
||||
# if !defined(_LIBCPP_HAS_NO_THREADS)
|
||||
# if !defined(_LIBCPP_HAS_NO_THREADS) && !defined(_LIBCPP_AVAILABILITY_DISABLE_FTM___cpp_lib_semaphore)
|
||||
# define __cpp_lib_semaphore 201907L
|
||||
# endif
|
||||
# define __cpp_lib_shift 201806L
|
||||
|
@ -604,6 +604,17 @@ unsigned LLVMGetEnumAttributeKind(LLVMAttributeRef A);
|
||||
*/
|
||||
uint64_t LLVMGetEnumAttributeValue(LLVMAttributeRef A);
|
||||
|
||||
/**
|
||||
* Create a type attribute
|
||||
*/
|
||||
LLVMAttributeRef LLVMCreateTypeAttribute(LLVMContextRef C, unsigned KindID,
|
||||
LLVMTypeRef type_ref);
|
||||
|
||||
/**
|
||||
* Get the type attribute's value.
|
||||
*/
|
||||
LLVMTypeRef LLVMGetTypeAttributeValue(LLVMAttributeRef A);
|
||||
|
||||
/**
|
||||
* Create a string attribute.
|
||||
*/
|
||||
@ -626,6 +637,7 @@ const char *LLVMGetStringAttributeValue(LLVMAttributeRef A, unsigned *Length);
|
||||
*/
|
||||
LLVMBool LLVMIsEnumAttribute(LLVMAttributeRef A);
|
||||
LLVMBool LLVMIsStringAttribute(LLVMAttributeRef A);
|
||||
LLVMBool LLVMIsTypeAttribute(LLVMAttributeRef A);
|
||||
|
||||
/**
|
||||
* Obtain a Type from a context by its registered name.
|
||||
|
@ -111,6 +111,16 @@ public:
|
||||
return AttributeSpecs[idx].Attr;
|
||||
}
|
||||
|
||||
bool getAttrIsImplicitConstByIndex(uint32_t idx) const {
|
||||
assert(idx < AttributeSpecs.size());
|
||||
return AttributeSpecs[idx].isImplicitConst();
|
||||
}
|
||||
|
||||
int64_t getAttrImplicitConstValueByIndex(uint32_t idx) const {
|
||||
assert(idx < AttributeSpecs.size());
|
||||
return AttributeSpecs[idx].getImplicitConstValue();
|
||||
}
|
||||
|
||||
/// Get the index of the specified attribute.
|
||||
///
|
||||
/// Searches the this abbreviation declaration for the index of the specified
|
||||
|
@ -65,6 +65,20 @@ namespace sys {
|
||||
StringRef getHostCPUNameForARM(StringRef ProcCpuinfoContent);
|
||||
StringRef getHostCPUNameForS390x(StringRef ProcCpuinfoContent);
|
||||
StringRef getHostCPUNameForBPF();
|
||||
|
||||
/// Helper functions to extract CPU details from CPUID on x86.
|
||||
namespace x86 {
|
||||
enum class VendorSignatures {
|
||||
UNKNOWN,
|
||||
GENUINE_INTEL,
|
||||
AUTHENTIC_AMD,
|
||||
};
|
||||
|
||||
/// Returns the host CPU's vendor.
|
||||
/// MaxLeaf: if a non-nullptr pointer is specified, the EAX value will be
|
||||
/// assigned to its pointee.
|
||||
VendorSignatures getVendorSignature(unsigned *MaxLeaf = nullptr);
|
||||
} // namespace x86
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -396,22 +396,17 @@ private:
|
||||
bool canVectorizeOuterLoop();
|
||||
|
||||
/// Return true if all of the instructions in the block can be speculatively
|
||||
/// executed, and record the loads/stores that require masking. If's that
|
||||
/// guard loads can be ignored under "assume safety" unless \p PreserveGuards
|
||||
/// is true. This can happen when we introduces guards for which the original
|
||||
/// "unguarded-loads are safe" assumption does not hold. For example, the
|
||||
/// vectorizer's fold-tail transformation changes the loop to execute beyond
|
||||
/// its original trip-count, under a proper guard, which should be preserved.
|
||||
/// executed, and record the loads/stores that require masking.
|
||||
/// \p SafePtrs is a list of addresses that are known to be legal and we know
|
||||
/// that we can read from them without segfault.
|
||||
/// \p MaskedOp is a list of instructions that have to be transformed into
|
||||
/// calls to the appropriate masked intrinsic when the loop is vectorized.
|
||||
/// \p ConditionalAssumes is a list of assume instructions in predicated
|
||||
/// blocks that must be dropped if the CFG gets flattened.
|
||||
bool blockCanBePredicated(BasicBlock *BB, SmallPtrSetImpl<Value *> &SafePtrs,
|
||||
SmallPtrSetImpl<const Instruction *> &MaskedOp,
|
||||
SmallPtrSetImpl<Instruction *> &ConditionalAssumes,
|
||||
bool PreserveGuards = false) const;
|
||||
bool blockCanBePredicated(
|
||||
BasicBlock *BB, SmallPtrSetImpl<Value *> &SafePtrs,
|
||||
SmallPtrSetImpl<const Instruction *> &MaskedOp,
|
||||
SmallPtrSetImpl<Instruction *> &ConditionalAssumes) const;
|
||||
|
||||
/// Updates the vectorization state by adding \p Phi to the inductions list.
|
||||
/// This can set \p Phi as the main induction of the loop if \p Phi is a
|
||||
|
@ -69,7 +69,7 @@ static void dumpRanges(const DWARFObject &Obj, raw_ostream &OS,
|
||||
}
|
||||
}
|
||||
|
||||
static void dumpLocation(raw_ostream &OS, DWARFFormValue &FormValue,
|
||||
static void dumpLocation(raw_ostream &OS, const DWARFFormValue &FormValue,
|
||||
DWARFUnit *U, unsigned Indent,
|
||||
DIDumpOptions DumpOpts) {
|
||||
DWARFContext &Ctx = U->getContext();
|
||||
@ -230,21 +230,22 @@ static void dumpTypeName(raw_ostream &OS, const DWARFDie &D) {
|
||||
}
|
||||
|
||||
static void dumpAttribute(raw_ostream &OS, const DWARFDie &Die,
|
||||
uint64_t *OffsetPtr, dwarf::Attribute Attr,
|
||||
dwarf::Form Form, unsigned Indent,
|
||||
const DWARFAttribute &AttrValue, unsigned Indent,
|
||||
DIDumpOptions DumpOpts) {
|
||||
if (!Die.isValid())
|
||||
return;
|
||||
const char BaseIndent[] = " ";
|
||||
OS << BaseIndent;
|
||||
OS.indent(Indent + 2);
|
||||
dwarf::Attribute Attr = AttrValue.Attr;
|
||||
WithColor(OS, HighlightColor::Attribute) << formatv("{0}", Attr);
|
||||
|
||||
dwarf::Form Form = AttrValue.Value.getForm();
|
||||
if (DumpOpts.Verbose || DumpOpts.ShowForm)
|
||||
OS << formatv(" [{0}]", Form);
|
||||
|
||||
DWARFUnit *U = Die.getDwarfUnit();
|
||||
DWARFFormValue FormValue = DWARFFormValue::createFromUnit(Form, U, OffsetPtr);
|
||||
const DWARFFormValue &FormValue = AttrValue.Value;
|
||||
|
||||
OS << "\t(";
|
||||
|
||||
@ -631,16 +632,8 @@ void DWARFDie::dump(raw_ostream &OS, unsigned Indent,
|
||||
OS << '\n';
|
||||
|
||||
// Dump all data in the DIE for the attributes.
|
||||
for (const auto &AttrSpec : AbbrevDecl->attributes()) {
|
||||
if (AttrSpec.Form == DW_FORM_implicit_const) {
|
||||
// We are dumping .debug_info section ,
|
||||
// implicit_const attribute values are not really stored here,
|
||||
// but in .debug_abbrev section. So we just skip such attrs.
|
||||
continue;
|
||||
}
|
||||
dumpAttribute(OS, *this, &offset, AttrSpec.Attr, AttrSpec.Form,
|
||||
Indent, DumpOpts);
|
||||
}
|
||||
for (const DWARFAttribute &AttrValue : attributes())
|
||||
dumpAttribute(OS, *this, AttrValue, Indent, DumpOpts);
|
||||
|
||||
DWARFDie child = getFirstChild();
|
||||
if (DumpOpts.ShowChildren && DumpOpts.ChildRecurseDepth > 0 && child) {
|
||||
@ -723,10 +716,16 @@ void DWARFDie::attribute_iterator::updateForIndex(
|
||||
// Add the previous byte size of any previous attribute value.
|
||||
AttrValue.Offset += AttrValue.ByteSize;
|
||||
uint64_t ParseOffset = AttrValue.Offset;
|
||||
auto U = Die.getDwarfUnit();
|
||||
assert(U && "Die must have valid DWARF unit");
|
||||
AttrValue.Value = DWARFFormValue::createFromUnit(
|
||||
AbbrDecl.getFormByIndex(Index), U, &ParseOffset);
|
||||
if (AbbrDecl.getAttrIsImplicitConstByIndex(Index))
|
||||
AttrValue.Value = DWARFFormValue::createFromSValue(
|
||||
AbbrDecl.getFormByIndex(Index),
|
||||
AbbrDecl.getAttrImplicitConstValueByIndex(Index));
|
||||
else {
|
||||
auto U = Die.getDwarfUnit();
|
||||
assert(U && "Die must have valid DWARF unit");
|
||||
AttrValue.Value = DWARFFormValue::createFromUnit(
|
||||
AbbrDecl.getFormByIndex(Index), U, &ParseOffset);
|
||||
}
|
||||
AttrValue.ByteSize = ParseOffset - AttrValue.Offset;
|
||||
} else {
|
||||
assert(Index == NumAttrs && "Indexes should be [0, NumAttrs) only");
|
||||
|
@ -168,6 +168,7 @@ bool DWARFFormValue::skipValue(dwarf::Form Form, DataExtractor DebugInfoData,
|
||||
case DW_FORM_line_strp:
|
||||
case DW_FORM_GNU_ref_alt:
|
||||
case DW_FORM_GNU_strp_alt:
|
||||
case DW_FORM_implicit_const:
|
||||
if (Optional<uint8_t> FixedSize =
|
||||
dwarf::getFixedFormByteSize(Form, Params)) {
|
||||
*OffsetPtr += *FixedSize;
|
||||
@ -345,6 +346,9 @@ bool DWARFFormValue::extractValue(const DWARFDataExtractor &Data,
|
||||
case DW_FORM_ref_sig8:
|
||||
Value.uval = Data.getU64(OffsetPtr, &Err);
|
||||
break;
|
||||
case DW_FORM_implicit_const:
|
||||
// Value has been already set by DWARFFormValue::createFromSValue.
|
||||
break;
|
||||
default:
|
||||
// DWARFFormValue::skipValue() will have caught this and caused all
|
||||
// DWARF DIEs to fail to be parsed, so this code is not be reachable.
|
||||
@ -482,6 +486,7 @@ void DWARFFormValue::dump(raw_ostream &OS, DIDumpOptions DumpOpts) const {
|
||||
break;
|
||||
|
||||
case DW_FORM_sdata:
|
||||
case DW_FORM_implicit_const:
|
||||
OS << Value.sval;
|
||||
break;
|
||||
case DW_FORM_udata:
|
||||
|
@ -164,6 +164,18 @@ uint64_t LLVMGetEnumAttributeValue(LLVMAttributeRef A) {
|
||||
return Attr.getValueAsInt();
|
||||
}
|
||||
|
||||
LLVMAttributeRef LLVMCreateTypeAttribute(LLVMContextRef C, unsigned KindID,
|
||||
LLVMTypeRef type_ref) {
|
||||
auto &Ctx = *unwrap(C);
|
||||
auto AttrKind = (Attribute::AttrKind)KindID;
|
||||
return wrap(Attribute::get(Ctx, AttrKind, unwrap(type_ref)));
|
||||
}
|
||||
|
||||
LLVMTypeRef LLVMGetTypeAttributeValue(LLVMAttributeRef A) {
|
||||
auto Attr = unwrap(A);
|
||||
return wrap(Attr.getValueAsType());
|
||||
}
|
||||
|
||||
LLVMAttributeRef LLVMCreateStringAttribute(LLVMContextRef C,
|
||||
const char *K, unsigned KLength,
|
||||
const char *V, unsigned VLength) {
|
||||
@ -194,6 +206,10 @@ LLVMBool LLVMIsStringAttribute(LLVMAttributeRef A) {
|
||||
return unwrap(A).isStringAttribute();
|
||||
}
|
||||
|
||||
LLVMBool LLVMIsTypeAttribute(LLVMAttributeRef A) {
|
||||
return unwrap(A).isTypeAttribute();
|
||||
}
|
||||
|
||||
char *LLVMGetDiagInfoDescription(LLVMDiagnosticInfoRef DI) {
|
||||
std::string MsgStorage;
|
||||
raw_string_ostream Stream(MsgStorage);
|
||||
|
@ -417,11 +417,6 @@ StringRef sys::detail::getHostCPUNameForBPF() {
|
||||
#if defined(__i386__) || defined(_M_IX86) || \
|
||||
defined(__x86_64__) || defined(_M_X64)
|
||||
|
||||
enum VendorSignatures {
|
||||
SIG_INTEL = 0x756e6547 /* Genu */,
|
||||
SIG_AMD = 0x68747541 /* Auth */
|
||||
};
|
||||
|
||||
// The check below for i386 was copied from clang's cpuid.h (__get_cpuid_max).
|
||||
// Check motivated by bug reports for OpenSSL crashing on CPUs without CPUID
|
||||
// support. Consequently, for i386, the presence of CPUID is checked first
|
||||
@ -495,6 +490,42 @@ static bool getX86CpuIDAndInfo(unsigned value, unsigned *rEAX, unsigned *rEBX,
|
||||
#endif
|
||||
}
|
||||
|
||||
namespace llvm {
|
||||
namespace sys {
|
||||
namespace detail {
|
||||
namespace x86 {
|
||||
|
||||
VendorSignatures getVendorSignature(unsigned *MaxLeaf) {
|
||||
unsigned EAX = 0, EBX = 0, ECX = 0, EDX = 0;
|
||||
if (MaxLeaf == nullptr)
|
||||
MaxLeaf = &EAX;
|
||||
else
|
||||
*MaxLeaf = 0;
|
||||
|
||||
if (!isCpuIdSupported())
|
||||
return VendorSignatures::UNKNOWN;
|
||||
|
||||
if (getX86CpuIDAndInfo(0, MaxLeaf, &EBX, &ECX, &EDX) || *MaxLeaf < 1)
|
||||
return VendorSignatures::UNKNOWN;
|
||||
|
||||
// "Genu ineI ntel"
|
||||
if (EBX == 0x756e6547 && EDX == 0x49656e69 && ECX == 0x6c65746e)
|
||||
return VendorSignatures::GENUINE_INTEL;
|
||||
|
||||
// "Auth enti cAMD"
|
||||
if (EBX == 0x68747541 && EDX == 0x69746e65 && ECX == 0x444d4163)
|
||||
return VendorSignatures::AUTHENTIC_AMD;
|
||||
|
||||
return VendorSignatures::UNKNOWN;
|
||||
}
|
||||
|
||||
} // namespace x86
|
||||
} // namespace detail
|
||||
} // namespace sys
|
||||
} // namespace llvm
|
||||
|
||||
using namespace llvm::sys::detail::x86;
|
||||
|
||||
/// getX86CpuIDAndInfoEx - Execute the specified cpuid with subleaf and return
|
||||
/// the 4 values in the specified arguments. If we can't run cpuid on the host,
|
||||
/// return true.
|
||||
@ -1092,14 +1123,12 @@ static void getAvailableFeatures(unsigned ECX, unsigned EDX, unsigned MaxLeaf,
|
||||
}
|
||||
|
||||
StringRef sys::getHostCPUName() {
|
||||
unsigned MaxLeaf = 0;
|
||||
const VendorSignatures Vendor = getVendorSignature(&MaxLeaf);
|
||||
if (Vendor == VendorSignatures::UNKNOWN)
|
||||
return "generic";
|
||||
|
||||
unsigned EAX = 0, EBX = 0, ECX = 0, EDX = 0;
|
||||
unsigned MaxLeaf, Vendor;
|
||||
|
||||
if (!isCpuIdSupported())
|
||||
return "generic";
|
||||
|
||||
if (getX86CpuIDAndInfo(0, &MaxLeaf, &Vendor, &ECX, &EDX) || MaxLeaf < 1)
|
||||
return "generic";
|
||||
getX86CpuIDAndInfo(0x1, &EAX, &EBX, &ECX, &EDX);
|
||||
|
||||
unsigned Family = 0, Model = 0;
|
||||
@ -1114,10 +1143,10 @@ StringRef sys::getHostCPUName() {
|
||||
|
||||
StringRef CPU;
|
||||
|
||||
if (Vendor == SIG_INTEL) {
|
||||
if (Vendor == VendorSignatures::GENUINE_INTEL) {
|
||||
CPU = getIntelProcessorTypeAndSubtype(Family, Model, Features, &Type,
|
||||
&Subtype);
|
||||
} else if (Vendor == SIG_AMD) {
|
||||
} else if (Vendor == VendorSignatures::AUTHENTIC_AMD) {
|
||||
CPU = getAMDProcessorTypeAndSubtype(Family, Model, Features, &Type,
|
||||
&Subtype);
|
||||
}
|
||||
@ -1219,6 +1248,19 @@ StringRef sys::getHostCPUName() {
|
||||
}
|
||||
#else
|
||||
StringRef sys::getHostCPUName() { return "generic"; }
|
||||
namespace llvm {
|
||||
namespace sys {
|
||||
namespace detail {
|
||||
namespace x86 {
|
||||
|
||||
VendorSignatures getVendorSignature(unsigned *MaxLeaf) {
|
||||
return VendorSignatures::UNKNOWN;
|
||||
}
|
||||
|
||||
} // namespace x86
|
||||
} // namespace detail
|
||||
} // namespace sys
|
||||
} // namespace llvm
|
||||
#endif
|
||||
|
||||
#if defined(__linux__) && (defined(__i386__) || defined(__x86_64__))
|
||||
|
@ -145,8 +145,7 @@ bool ARMBlockPlacement::runOnMachineFunction(MachineFunction &MF) {
|
||||
It++) {
|
||||
MachineBasicBlock *MBB = &*It;
|
||||
for (auto &Terminator : MBB->terminators()) {
|
||||
if (Terminator.getOpcode() != ARM::t2LoopEnd &&
|
||||
Terminator.getOpcode() != ARM::t2LoopEndDec)
|
||||
if (Terminator.getOpcode() != ARM::t2LoopEndDec)
|
||||
continue;
|
||||
MachineBasicBlock *LETarget = Terminator.getOperand(2).getMBB();
|
||||
// The LE will become forwards branching if it branches to LoopExit
|
||||
@ -204,10 +203,8 @@ void ARMBlockPlacement::moveBasicBlock(MachineBasicBlock *BB,
|
||||
if (!Terminator.isUnconditionalBranch()) {
|
||||
// The BB doesn't have an unconditional branch so it relied on
|
||||
// fall-through. Fix by adding an unconditional branch to the moved BB.
|
||||
unsigned BrOpc =
|
||||
BBUtils->isBBInRange(&Terminator, To, 254) ? ARM::tB : ARM::t2B;
|
||||
MachineInstrBuilder MIB =
|
||||
BuildMI(From, Terminator.getDebugLoc(), TII->get(BrOpc));
|
||||
BuildMI(From, Terminator.getDebugLoc(), TII->get(ARM::t2B));
|
||||
MIB.addMBB(To);
|
||||
MIB.addImm(ARMCC::CondCodes::AL);
|
||||
MIB.addReg(ARM::NoRegister);
|
||||
|
@ -1467,14 +1467,15 @@ MachineInstr* ARMLowOverheadLoops::ExpandLoopStart(LowOverheadLoop &LoLoop) {
|
||||
|
||||
void ARMLowOverheadLoops::ConvertVPTBlocks(LowOverheadLoop &LoLoop) {
|
||||
auto RemovePredicate = [](MachineInstr *MI) {
|
||||
if (MI->isDebugInstr())
|
||||
return;
|
||||
LLVM_DEBUG(dbgs() << "ARM Loops: Removing predicate from: " << *MI);
|
||||
if (int PIdx = llvm::findFirstVPTPredOperandIdx(*MI)) {
|
||||
assert(MI->getOperand(PIdx).getImm() == ARMVCC::Then &&
|
||||
"Expected Then predicate!");
|
||||
MI->getOperand(PIdx).setImm(ARMVCC::None);
|
||||
MI->getOperand(PIdx+1).setReg(0);
|
||||
} else
|
||||
llvm_unreachable("trying to unpredicate a non-predicated instruction");
|
||||
int PIdx = llvm::findFirstVPTPredOperandIdx(*MI);
|
||||
assert(PIdx >= 1 && "Trying to unpredicate a non-predicated instruction");
|
||||
assert(MI->getOperand(PIdx).getImm() == ARMVCC::Then &&
|
||||
"Expected Then predicate!");
|
||||
MI->getOperand(PIdx).setImm(ARMVCC::None);
|
||||
MI->getOperand(PIdx + 1).setReg(0);
|
||||
};
|
||||
|
||||
for (auto &Block : LoLoop.getVPTBlocks()) {
|
||||
@ -1518,8 +1519,13 @@ void ARMLowOverheadLoops::ConvertVPTBlocks(LowOverheadLoop &LoLoop) {
|
||||
// - Insert a new vpst to predicate the instruction(s) that following
|
||||
// the divergent vpr def.
|
||||
MachineInstr *Divergent = VPTState::getDivergent(Block);
|
||||
MachineBasicBlock *MBB = Divergent->getParent();
|
||||
auto DivergentNext = ++MachineBasicBlock::iterator(Divergent);
|
||||
while (DivergentNext != MBB->end() && DivergentNext->isDebugInstr())
|
||||
++DivergentNext;
|
||||
|
||||
bool DivergentNextIsPredicated =
|
||||
DivergentNext != MBB->end() &&
|
||||
getVPTInstrPredicate(*DivergentNext) != ARMVCC::None;
|
||||
|
||||
for (auto I = ++MachineBasicBlock::iterator(VPST), E = DivergentNext;
|
||||
|
@ -960,7 +960,8 @@ bool MVEGatherScatterLowering::optimiseOffsets(Value *Offsets, BasicBlock *BB,
|
||||
// Get the value that is added to/multiplied with the phi
|
||||
Value *OffsSecondOperand = Offs->getOperand(OffsSecondOp);
|
||||
|
||||
if (IncrementPerRound->getType() != OffsSecondOperand->getType())
|
||||
if (IncrementPerRound->getType() != OffsSecondOperand->getType() ||
|
||||
!L->isLoopInvariant(OffsSecondOperand))
|
||||
// Something has gone wrong, abort
|
||||
return false;
|
||||
|
||||
@ -1165,6 +1166,8 @@ bool MVEGatherScatterLowering::runOnFunction(Function &F) {
|
||||
bool Changed = false;
|
||||
|
||||
for (BasicBlock &BB : F) {
|
||||
Changed |= SimplifyInstructionsInBlock(&BB);
|
||||
|
||||
for (Instruction &I : BB) {
|
||||
IntrinsicInst *II = dyn_cast<IntrinsicInst>(&I);
|
||||
if (II && II->getIntrinsicID() == Intrinsic::masked_gather &&
|
||||
|
@ -205,6 +205,10 @@ bool MVETailPredication::IsSafeActiveMask(IntrinsicInst *ActiveLaneMask,
|
||||
EnableTailPredication == TailPredication::ForceEnabled;
|
||||
|
||||
Value *ElemCount = ActiveLaneMask->getOperand(1);
|
||||
bool Changed = false;
|
||||
if (!L->makeLoopInvariant(ElemCount, Changed))
|
||||
return false;
|
||||
|
||||
auto *EC= SE->getSCEV(ElemCount);
|
||||
auto *TC = SE->getSCEV(TripCount);
|
||||
int VectorWidth =
|
||||
|
@ -107,6 +107,12 @@ static bool StepOverPredicatedInstrs(MachineBasicBlock::instr_iterator &Iter,
|
||||
NumInstrsSteppedOver = 0;
|
||||
|
||||
while (Iter != EndIter) {
|
||||
if (Iter->isDebugInstr()) {
|
||||
// Skip debug instructions
|
||||
++Iter;
|
||||
continue;
|
||||
}
|
||||
|
||||
NextPred = getVPTInstrPredicate(*Iter, PredReg);
|
||||
assert(NextPred != ARMVCC::Else &&
|
||||
"VPT block pass does not expect Else preds");
|
||||
@ -170,6 +176,8 @@ CreateVPTBlock(MachineBasicBlock::instr_iterator &Iter,
|
||||
LLVM_DEBUG(for (MachineBasicBlock::instr_iterator AddedInstIter =
|
||||
std::next(BlockBeg);
|
||||
AddedInstIter != Iter; ++AddedInstIter) {
|
||||
if (AddedInstIter->isDebugInstr())
|
||||
continue;
|
||||
dbgs() << " adding: ";
|
||||
AddedInstIter->dump();
|
||||
});
|
||||
@ -197,7 +205,7 @@ CreateVPTBlock(MachineBasicBlock::instr_iterator &Iter,
|
||||
if (!IsVPRDefinedOrKilledByBlock(Iter, VPNOTBlockEndIter))
|
||||
break;
|
||||
|
||||
LLVM_DEBUG(dbgs() << " removing VPNOT: "; Iter->dump(););
|
||||
LLVM_DEBUG(dbgs() << " removing VPNOT: "; Iter->dump());
|
||||
|
||||
// Record the new size of the block
|
||||
BlockSize += ElseInstCnt;
|
||||
@ -211,6 +219,9 @@ CreateVPTBlock(MachineBasicBlock::instr_iterator &Iter,
|
||||
// Note that we are using "Iter" to iterate over the block so we can update
|
||||
// it at the same time.
|
||||
for (; Iter != VPNOTBlockEndIter; ++Iter) {
|
||||
if (Iter->isDebugInstr())
|
||||
continue;
|
||||
|
||||
// Find the register in which the predicate is
|
||||
int OpIdx = findFirstVPTPredOperandIdx(*Iter);
|
||||
assert(OpIdx != -1);
|
||||
|
@ -15154,17 +15154,38 @@ PPCTargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
|
||||
return std::make_pair(0U, &PPC::LRRCRegClass);
|
||||
}
|
||||
|
||||
// If we name a VSX register, we can't defer to the base class because it
|
||||
// will not recognize the correct register (their names will be VSL{0-31}
|
||||
// and V{0-31} so they won't match). So we match them here.
|
||||
if (Constraint.size() > 3 && Constraint[1] == 'v' && Constraint[2] == 's') {
|
||||
int VSNum = atoi(Constraint.data() + 3);
|
||||
assert(VSNum >= 0 && VSNum <= 63 &&
|
||||
"Attempted to access a vsr out of range");
|
||||
if (VSNum < 32)
|
||||
return std::make_pair(PPC::VSL0 + VSNum, &PPC::VSRCRegClass);
|
||||
return std::make_pair(PPC::V0 + VSNum - 32, &PPC::VSRCRegClass);
|
||||
// Handle special cases of physical registers that are not properly handled
|
||||
// by the base class.
|
||||
if (Constraint[0] == '{' && Constraint[Constraint.size() - 1] == '}') {
|
||||
// If we name a VSX register, we can't defer to the base class because it
|
||||
// will not recognize the correct register (their names will be VSL{0-31}
|
||||
// and V{0-31} so they won't match). So we match them here.
|
||||
if (Constraint.size() > 3 && Constraint[1] == 'v' && Constraint[2] == 's') {
|
||||
int VSNum = atoi(Constraint.data() + 3);
|
||||
assert(VSNum >= 0 && VSNum <= 63 &&
|
||||
"Attempted to access a vsr out of range");
|
||||
if (VSNum < 32)
|
||||
return std::make_pair(PPC::VSL0 + VSNum, &PPC::VSRCRegClass);
|
||||
return std::make_pair(PPC::V0 + VSNum - 32, &PPC::VSRCRegClass);
|
||||
}
|
||||
|
||||
// For float registers, we can't defer to the base class as it will match
|
||||
// the SPILLTOVSRRC class.
|
||||
if (Constraint.size() > 3 && Constraint[1] == 'f') {
|
||||
int RegNum = atoi(Constraint.data() + 2);
|
||||
if (RegNum > 31 || RegNum < 0)
|
||||
report_fatal_error("Invalid floating point register number");
|
||||
if (VT == MVT::f32 || VT == MVT::i32)
|
||||
return Subtarget.hasSPE()
|
||||
? std::make_pair(PPC::R0 + RegNum, &PPC::GPRCRegClass)
|
||||
: std::make_pair(PPC::F0 + RegNum, &PPC::F4RCRegClass);
|
||||
if (VT == MVT::f64 || VT == MVT::i64)
|
||||
return Subtarget.hasSPE()
|
||||
? std::make_pair(PPC::S0 + RegNum, &PPC::SPERCRegClass)
|
||||
: std::make_pair(PPC::F0 + RegNum, &PPC::F8RCRegClass);
|
||||
}
|
||||
}
|
||||
|
||||
std::pair<unsigned, const TargetRegisterClass *> R =
|
||||
TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT);
|
||||
|
||||
|
@ -1550,6 +1550,7 @@ SystemZTargetLowering::LowerCall(CallLoweringInfo &CLI,
|
||||
bool IsVarArg = CLI.IsVarArg;
|
||||
MachineFunction &MF = DAG.getMachineFunction();
|
||||
EVT PtrVT = getPointerTy(MF.getDataLayout());
|
||||
LLVMContext &Ctx = *DAG.getContext();
|
||||
|
||||
// Detect unsupported vector argument and return types.
|
||||
if (Subtarget.hasVector()) {
|
||||
@ -1559,7 +1560,7 @@ SystemZTargetLowering::LowerCall(CallLoweringInfo &CLI,
|
||||
|
||||
// Analyze the operands of the call, assigning locations to each operand.
|
||||
SmallVector<CCValAssign, 16> ArgLocs;
|
||||
SystemZCCState ArgCCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext());
|
||||
SystemZCCState ArgCCInfo(CallConv, IsVarArg, MF, ArgLocs, Ctx);
|
||||
ArgCCInfo.AnalyzeCallOperands(Outs, CC_SystemZ);
|
||||
|
||||
// We don't support GuaranteedTailCallOpt, only automatically-detected
|
||||
@ -1584,14 +1585,25 @@ SystemZTargetLowering::LowerCall(CallLoweringInfo &CLI,
|
||||
|
||||
if (VA.getLocInfo() == CCValAssign::Indirect) {
|
||||
// Store the argument in a stack slot and pass its address.
|
||||
SDValue SpillSlot = DAG.CreateStackTemporary(Outs[I].ArgVT);
|
||||
unsigned ArgIndex = Outs[I].OrigArgIndex;
|
||||
EVT SlotVT;
|
||||
if (I + 1 != E && Outs[I + 1].OrigArgIndex == ArgIndex) {
|
||||
// Allocate the full stack space for a promoted (and split) argument.
|
||||
Type *OrigArgType = CLI.Args[Outs[I].OrigArgIndex].Ty;
|
||||
EVT OrigArgVT = getValueType(MF.getDataLayout(), OrigArgType);
|
||||
MVT PartVT = getRegisterTypeForCallingConv(Ctx, CLI.CallConv, OrigArgVT);
|
||||
unsigned N = getNumRegistersForCallingConv(Ctx, CLI.CallConv, OrigArgVT);
|
||||
SlotVT = EVT::getIntegerVT(Ctx, PartVT.getSizeInBits() * N);
|
||||
} else {
|
||||
SlotVT = Outs[I].ArgVT;
|
||||
}
|
||||
SDValue SpillSlot = DAG.CreateStackTemporary(SlotVT);
|
||||
int FI = cast<FrameIndexSDNode>(SpillSlot)->getIndex();
|
||||
MemOpChains.push_back(
|
||||
DAG.getStore(Chain, DL, ArgValue, SpillSlot,
|
||||
MachinePointerInfo::getFixedStack(MF, FI)));
|
||||
// If the original argument was split (e.g. i128), we need
|
||||
// to store all parts of it here (and pass just one address).
|
||||
unsigned ArgIndex = Outs[I].OrigArgIndex;
|
||||
assert (Outs[I].PartOffset == 0);
|
||||
while (I + 1 != E && Outs[I + 1].OrigArgIndex == ArgIndex) {
|
||||
SDValue PartValue = OutVals[I + 1];
|
||||
@ -1601,6 +1613,8 @@ SystemZTargetLowering::LowerCall(CallLoweringInfo &CLI,
|
||||
MemOpChains.push_back(
|
||||
DAG.getStore(Chain, DL, PartValue, Address,
|
||||
MachinePointerInfo::getFixedStack(MF, FI)));
|
||||
assert((PartOffset + PartValue.getValueType().getStoreSize() <=
|
||||
SlotVT.getStoreSize()) && "Not enough space for argument part!");
|
||||
++I;
|
||||
}
|
||||
ArgValue = SpillSlot;
|
||||
@ -1694,7 +1708,7 @@ SystemZTargetLowering::LowerCall(CallLoweringInfo &CLI,
|
||||
|
||||
// Assign locations to each value returned by this call.
|
||||
SmallVector<CCValAssign, 16> RetLocs;
|
||||
CCState RetCCInfo(CallConv, IsVarArg, MF, RetLocs, *DAG.getContext());
|
||||
CCState RetCCInfo(CallConv, IsVarArg, MF, RetLocs, Ctx);
|
||||
RetCCInfo.AnalyzeCallResult(Ins, RetCC_SystemZ);
|
||||
|
||||
// Copy all of the result registers out of their specified physreg.
|
||||
|
@ -149,6 +149,7 @@ bool Lowerer::lowerEarlyIntrinsics(Function &F) {
|
||||
bool Changed = false;
|
||||
CoroIdInst *CoroId = nullptr;
|
||||
SmallVector<CoroFreeInst *, 4> CoroFrees;
|
||||
bool HasCoroSuspend = false;
|
||||
for (auto IB = inst_begin(F), IE = inst_end(F); IB != IE;) {
|
||||
Instruction &I = *IB++;
|
||||
if (auto *CB = dyn_cast<CallBase>(&I)) {
|
||||
@ -163,6 +164,7 @@ bool Lowerer::lowerEarlyIntrinsics(Function &F) {
|
||||
// pass expects that there is at most one final suspend point.
|
||||
if (cast<CoroSuspendInst>(&I)->isFinal())
|
||||
CB->setCannotDuplicate();
|
||||
HasCoroSuspend = true;
|
||||
break;
|
||||
case Intrinsic::coro_end_async:
|
||||
case Intrinsic::coro_end:
|
||||
@ -213,6 +215,13 @@ bool Lowerer::lowerEarlyIntrinsics(Function &F) {
|
||||
if (CoroId)
|
||||
for (CoroFreeInst *CF : CoroFrees)
|
||||
CF->setArgOperand(0, CoroId);
|
||||
// Coroutine suspention could potentially lead to any argument modified
|
||||
// outside of the function, hence arguments should not have noalias
|
||||
// attributes.
|
||||
if (HasCoroSuspend)
|
||||
for (Argument &A : F.args())
|
||||
if (A.hasNoAliasAttr())
|
||||
A.removeAttr(Attribute::NoAlias);
|
||||
return Changed;
|
||||
}
|
||||
|
||||
|
@ -781,7 +781,13 @@ static StructType *buildFrameType(Function &F, coro::Shape &Shape,
|
||||
PromiseAlloca, DenseMap<Instruction *, llvm::Optional<APInt>>{}, false);
|
||||
// Create an entry for every spilled value.
|
||||
for (auto &S : FrameData.Spills) {
|
||||
FieldIDType Id = B.addField(S.first->getType(), None);
|
||||
Type *FieldType = S.first->getType();
|
||||
// For byval arguments, we need to store the pointed value in the frame,
|
||||
// instead of the pointer itself.
|
||||
if (const Argument *A = dyn_cast<Argument>(S.first))
|
||||
if (A->hasByValAttr())
|
||||
FieldType = FieldType->getPointerElementType();
|
||||
FieldIDType Id = B.addField(FieldType, None);
|
||||
FrameData.setFieldIndex(S.first, Id);
|
||||
}
|
||||
|
||||
@ -1149,6 +1155,7 @@ static Instruction *insertSpills(const FrameDataInfo &FrameData,
|
||||
// Create a store instruction storing the value into the
|
||||
// coroutine frame.
|
||||
Instruction *InsertPt = nullptr;
|
||||
bool NeedToCopyArgPtrValue = false;
|
||||
if (auto *Arg = dyn_cast<Argument>(Def)) {
|
||||
// For arguments, we will place the store instruction right after
|
||||
// the coroutine frame pointer instruction, i.e. bitcast of
|
||||
@ -1159,6 +1166,9 @@ static Instruction *insertSpills(const FrameDataInfo &FrameData,
|
||||
// from the coroutine function.
|
||||
Arg->getParent()->removeParamAttr(Arg->getArgNo(), Attribute::NoCapture);
|
||||
|
||||
if (Arg->hasByValAttr())
|
||||
NeedToCopyArgPtrValue = true;
|
||||
|
||||
} else if (auto *CSI = dyn_cast<AnyCoroSuspendInst>(Def)) {
|
||||
// Don't spill immediately after a suspend; splitting assumes
|
||||
// that the suspend will be followed by a branch.
|
||||
@ -1193,7 +1203,15 @@ static Instruction *insertSpills(const FrameDataInfo &FrameData,
|
||||
Builder.SetInsertPoint(InsertPt);
|
||||
auto *G = Builder.CreateConstInBoundsGEP2_32(
|
||||
FrameTy, FramePtr, 0, Index, Def->getName() + Twine(".spill.addr"));
|
||||
Builder.CreateStore(Def, G);
|
||||
if (NeedToCopyArgPtrValue) {
|
||||
// For byval arguments, we need to store the pointed value in the frame,
|
||||
// instead of the pointer itself.
|
||||
auto *Value =
|
||||
Builder.CreateLoad(Def->getType()->getPointerElementType(), Def);
|
||||
Builder.CreateStore(Value, G);
|
||||
} else {
|
||||
Builder.CreateStore(Def, G);
|
||||
}
|
||||
|
||||
BasicBlock *CurrentBlock = nullptr;
|
||||
Value *CurrentReload = nullptr;
|
||||
@ -1207,9 +1225,12 @@ static Instruction *insertSpills(const FrameDataInfo &FrameData,
|
||||
|
||||
auto *GEP = GetFramePointer(E.first);
|
||||
GEP->setName(E.first->getName() + Twine(".reload.addr"));
|
||||
CurrentReload = Builder.CreateLoad(
|
||||
FrameTy->getElementType(FrameData.getFieldIndex(E.first)), GEP,
|
||||
E.first->getName() + Twine(".reload"));
|
||||
if (NeedToCopyArgPtrValue)
|
||||
CurrentReload = GEP;
|
||||
else
|
||||
CurrentReload = Builder.CreateLoad(
|
||||
FrameTy->getElementType(FrameData.getFieldIndex(E.first)), GEP,
|
||||
E.first->getName() + Twine(".reload"));
|
||||
|
||||
TinyPtrVector<DbgDeclareInst *> DIs = FindDbgDeclareUses(Def);
|
||||
for (DbgDeclareInst *DDI : DIs) {
|
||||
|
@ -95,6 +95,8 @@ isUnmergeableGlobal(GlobalVariable *GV,
|
||||
// Only process constants with initializers in the default address space.
|
||||
return !GV->isConstant() || !GV->hasDefinitiveInitializer() ||
|
||||
GV->getType()->getAddressSpace() != 0 || GV->hasSection() ||
|
||||
// Don't touch thread-local variables.
|
||||
GV->isThreadLocal() ||
|
||||
// Don't touch values marked with attribute(used).
|
||||
UsedGlobals.count(GV);
|
||||
}
|
||||
|
@ -925,10 +925,7 @@ bool LoopVectorizationLegality::blockNeedsPredication(BasicBlock *BB) {
|
||||
bool LoopVectorizationLegality::blockCanBePredicated(
|
||||
BasicBlock *BB, SmallPtrSetImpl<Value *> &SafePtrs,
|
||||
SmallPtrSetImpl<const Instruction *> &MaskedOp,
|
||||
SmallPtrSetImpl<Instruction *> &ConditionalAssumes,
|
||||
bool PreserveGuards) const {
|
||||
const bool IsAnnotatedParallel = TheLoop->isAnnotatedParallel();
|
||||
|
||||
SmallPtrSetImpl<Instruction *> &ConditionalAssumes) const {
|
||||
for (Instruction &I : *BB) {
|
||||
// Check that we don't have a constant expression that can trap as operand.
|
||||
for (Value *Operand : I.operands()) {
|
||||
@ -956,11 +953,7 @@ bool LoopVectorizationLegality::blockCanBePredicated(
|
||||
if (!LI)
|
||||
return false;
|
||||
if (!SafePtrs.count(LI->getPointerOperand())) {
|
||||
// !llvm.mem.parallel_loop_access implies if-conversion safety.
|
||||
// Otherwise, record that the load needs (real or emulated) masking
|
||||
// and let the cost model decide.
|
||||
if (!IsAnnotatedParallel || PreserveGuards)
|
||||
MaskedOp.insert(LI);
|
||||
MaskedOp.insert(LI);
|
||||
continue;
|
||||
}
|
||||
}
|
||||
@ -1276,8 +1269,7 @@ bool LoopVectorizationLegality::prepareToFoldTailByMasking() {
|
||||
// do not need predication such as the header block.
|
||||
for (BasicBlock *BB : TheLoop->blocks()) {
|
||||
if (!blockCanBePredicated(BB, SafePointers, TmpMaskedOp,
|
||||
TmpConditionalAssumes,
|
||||
/* MaskAllLoads= */ true)) {
|
||||
TmpConditionalAssumes)) {
|
||||
LLVM_DEBUG(dbgs() << "LV: Cannot fold tail by masking as requested.\n");
|
||||
return false;
|
||||
}
|
||||
|
@ -100,11 +100,7 @@ FunctionCoverageSummary::get(const InstantiationGroup &Group,
|
||||
for (const auto &FCS : Summaries.drop_front()) {
|
||||
Summary.RegionCoverage.merge(FCS.RegionCoverage);
|
||||
Summary.LineCoverage.merge(FCS.LineCoverage);
|
||||
|
||||
// Sum branch coverage across instantiation groups for the summary rather
|
||||
// than "merge" the maximum count. This is a clearer view into whether all
|
||||
// created branches are covered.
|
||||
Summary.BranchCoverage += FCS.BranchCoverage;
|
||||
Summary.BranchCoverage.merge(FCS.BranchCoverage);
|
||||
}
|
||||
return Summary;
|
||||
}
|
||||
|
@ -123,6 +123,11 @@ public:
|
||||
return *this;
|
||||
}
|
||||
|
||||
void merge(const BranchCoverageInfo &RHS) {
|
||||
Covered = std::max(Covered, RHS.Covered);
|
||||
NumBranches = std::max(NumBranches, RHS.NumBranches);
|
||||
}
|
||||
|
||||
size_t getCovered() const { return Covered; }
|
||||
|
||||
size_t getNumBranches() const { return NumBranches; }
|
||||
|
@ -1,14 +1,14 @@
|
||||
// $FreeBSD$
|
||||
|
||||
#define LLVM_REVISION "llvmorg-12.0.1-rc2-0-ge7dac564cd0e"
|
||||
#define LLVM_REVISION "llvmorg-12.0.1-0-gfed41342a82f"
|
||||
#define LLVM_REPOSITORY "git@github.com:llvm/llvm-project.git"
|
||||
|
||||
#define CLANG_REVISION "llvmorg-12.0.1-rc2-0-ge7dac564cd0e"
|
||||
#define CLANG_REVISION "llvmorg-12.0.1-0-gfed41342a82f"
|
||||
#define CLANG_REPOSITORY "git@github.com:llvm/llvm-project.git"
|
||||
|
||||
// <Upstream revision at import>-<Local identifier in __FreeBSD_version style>
|
||||
#define LLD_REVISION "llvmorg-12.0.1-rc2-0-ge7dac564cd0e-1400001"
|
||||
#define LLD_REVISION "llvmorg-12.0.1-0-gfed41342a82f-1400001"
|
||||
#define LLD_REPOSITORY "FreeBSD"
|
||||
|
||||
#define LLDB_REVISION "llvmorg-12.0.1-rc2-0-ge7dac564cd0e"
|
||||
#define LLDB_REVISION "llvmorg-12.0.1-0-gfed41342a82f"
|
||||
#define LLDB_REPOSITORY "git@github.com:llvm/llvm-project.git"
|
||||
|
@ -1,4 +1,3 @@
|
||||
|
||||
/* $FreeBSD$ */
|
||||
/*===------- llvm/Config/llvm-config.h - llvm configuration -------*- C -*-===*/
|
||||
/* */
|
||||
|
@ -1,3 +1,3 @@
|
||||
/* $FreeBSD$ */
|
||||
#define LLVM_REVISION "llvmorg-12.0.1-rc2-0-ge7dac564cd0e"
|
||||
#define LLVM_REVISION "llvmorg-12.0.1-0-gfed41342a82f"
|
||||
#define LLVM_REPOSITORY "git@github.com:llvm/llvm-project.git"
|
||||
|
Loading…
x
Reference in New Issue
Block a user