diff --git a/contrib/compiler-rt/lib/builtins/floatdidf.c b/contrib/compiler-rt/lib/builtins/floatdidf.c index fccb29072407..681fecef9682 100644 --- a/contrib/compiler-rt/lib/builtins/floatdidf.c +++ b/contrib/compiler-rt/lib/builtins/floatdidf.c @@ -104,7 +104,7 @@ __floatdidf(di_int a) } #endif -#if defined(__AEABI__) +#if defined(__ARM_EABI__) AEABI_RTABI double __aeabi_l2d(di_int a) { return __floatdidf(a); } diff --git a/contrib/libc++/include/iterator b/contrib/libc++/include/iterator index 47a7811a3004..4aa44746dc96 100644 --- a/contrib/libc++/include/iterator +++ b/contrib/libc++/include/iterator @@ -64,14 +64,23 @@ struct forward_iterator_tag : public input_iterator_tag {}; struct bidirectional_iterator_tag : public forward_iterator_tag {}; struct random_access_iterator_tag : public bidirectional_iterator_tag {}; +// 27.4.3, iterator operations // extension: second argument not conforming to C++03 -template -void advance(InputIterator& i, +template // constexpr in C++17 + constexpr void advance(InputIterator& i, typename iterator_traits::difference_type n); -template -typename iterator_traits::difference_type -distance(InputIterator first, InputIterator last); +template // constexpr in C++17 + constexpr typename iterator_traits::difference_type + distance(InputIterator first, InputIterator last); + +template // constexpr in C++17 + constexpr InputIterator next(InputIterator x, +typename iterator_traits::difference_type n = 1); + +template // constexpr in C++17 + constexpr BidirectionalIterator prev(BidirectionalIterator x, + typename iterator_traits::difference_type n = 1); template class reverse_iterator @@ -529,7 +538,7 @@ struct _LIBCPP_TEMPLATE_VIS iterator }; template -inline _LIBCPP_INLINE_VISIBILITY +inline _LIBCPP_INLINE_VISIBILITY _LIBCPP_CONSTEXPR_AFTER_CXX14 void __advance(_InputIter& __i, typename iterator_traits<_InputIter>::difference_type __n, input_iterator_tag) { @@ -538,7 +547,7 @@ void __advance(_InputIter& __i, } template -inline _LIBCPP_INLINE_VISIBILITY +inline _LIBCPP_INLINE_VISIBILITY _LIBCPP_CONSTEXPR_AFTER_CXX14 void __advance(_BiDirIter& __i, typename iterator_traits<_BiDirIter>::difference_type __n, bidirectional_iterator_tag) { @@ -551,7 +560,7 @@ void __advance(_BiDirIter& __i, } template -inline _LIBCPP_INLINE_VISIBILITY +inline _LIBCPP_INLINE_VISIBILITY _LIBCPP_CONSTEXPR_AFTER_CXX14 void __advance(_RandIter& __i, typename iterator_traits<_RandIter>::difference_type __n, random_access_iterator_tag) { @@ -559,7 +568,7 @@ void __advance(_RandIter& __i, } template -inline _LIBCPP_INLINE_VISIBILITY +inline _LIBCPP_INLINE_VISIBILITY _LIBCPP_CONSTEXPR_AFTER_CXX14 void advance(_InputIter& __i, typename iterator_traits<_InputIter>::difference_type __n) { @@ -567,7 +576,7 @@ void advance(_InputIter& __i, } template -inline _LIBCPP_INLINE_VISIBILITY +inline _LIBCPP_INLINE_VISIBILITY _LIBCPP_CONSTEXPR_AFTER_CXX14 typename iterator_traits<_InputIter>::difference_type __distance(_InputIter __first, _InputIter __last, input_iterator_tag) { @@ -578,7 +587,7 @@ __distance(_InputIter __first, _InputIter __last, input_iterator_tag) } template -inline _LIBCPP_INLINE_VISIBILITY +inline _LIBCPP_INLINE_VISIBILITY _LIBCPP_CONSTEXPR_AFTER_CXX14 typename iterator_traits<_RandIter>::difference_type __distance(_RandIter __first, _RandIter __last, random_access_iterator_tag) { @@ -586,7 +595,7 @@ __distance(_RandIter __first, _RandIter __last, random_access_iterator_tag) } template -inline _LIBCPP_INLINE_VISIBILITY +inline _LIBCPP_INLINE_VISIBILITY _LIBCPP_CONSTEXPR_AFTER_CXX14 typename iterator_traits<_InputIter>::difference_type distance(_InputIter __first, _InputIter __last) { @@ -594,7 +603,7 @@ distance(_InputIter __first, _InputIter __last) } template -inline _LIBCPP_INLINE_VISIBILITY +inline _LIBCPP_INLINE_VISIBILITY _LIBCPP_CONSTEXPR_AFTER_CXX14 _InputIter next(_InputIter __x, typename iterator_traits<_InputIter>::difference_type __n = 1, @@ -605,7 +614,7 @@ next(_InputIter __x, } template -inline _LIBCPP_INLINE_VISIBILITY +inline _LIBCPP_INLINE_VISIBILITY _LIBCPP_CONSTEXPR_AFTER_CXX14 _BidiretionalIter prev(_BidiretionalIter __x, typename iterator_traits<_BidiretionalIter>::difference_type __n = 1, diff --git a/contrib/libc++/include/optional b/contrib/libc++/include/optional index 8c7a242113a0..70b6eb44dcd6 100644 --- a/contrib/libc++/include/optional +++ b/contrib/libc++/include/optional @@ -599,8 +599,8 @@ private: public: _LIBCPP_INLINE_VISIBILITY constexpr optional() noexcept {} - _LIBCPP_INLINE_VISIBILITY optional(const optional&) = default; - _LIBCPP_INLINE_VISIBILITY optional(optional&&) = default; + _LIBCPP_INLINE_VISIBILITY constexpr optional(const optional&) = default; + _LIBCPP_INLINE_VISIBILITY constexpr optional(optional&&) = default; _LIBCPP_INLINE_VISIBILITY constexpr optional(nullopt_t) noexcept {} template #include @@ -26,6 +27,50 @@ namespace llvm { +/// ForwardIterator for the bits that are set. +/// Iterators get invalidated when resize / reserve is called. +template class const_set_bits_iterator_impl { + const BitVectorT &Parent; + int Current = 0; + + void advance() { + assert(Current != -1 && "Trying to advance past end."); + Current = Parent.find_next(Current); + } + +public: + const_set_bits_iterator_impl(const BitVectorT &Parent, int Current) + : Parent(Parent), Current(Current) {} + explicit const_set_bits_iterator_impl(const BitVectorT &Parent) + : const_set_bits_iterator_impl(Parent, Parent.find_first()) {} + const_set_bits_iterator_impl(const const_set_bits_iterator_impl &) = default; + + const_set_bits_iterator_impl operator++(int) { + auto Prev = *this; + advance(); + return Prev; + } + + const_set_bits_iterator_impl &operator++() { + advance(); + return *this; + } + + unsigned operator*() const { return Current; } + + bool operator==(const const_set_bits_iterator_impl &Other) const { + assert(&Parent == &Other.Parent && + "Comparing iterators from different BitVectors"); + return Current == Other.Current; + } + + bool operator!=(const const_set_bits_iterator_impl &Other) const { + assert(&Parent == &Other.Parent && + "Comparing iterators from different BitVectors"); + return Current != Other.Current; + } +}; + class BitVector { typedef unsigned long BitWord; @@ -73,6 +118,18 @@ class BitVector { } }; + typedef const_set_bits_iterator_impl const_set_bits_iterator; + typedef const_set_bits_iterator set_iterator; + + const_set_bits_iterator set_bits_begin() const { + return const_set_bits_iterator(*this); + } + const_set_bits_iterator set_bits_end() const { + return const_set_bits_iterator(*this, -1); + } + iterator_range set_bits() const { + return make_range(set_bits_begin(), set_bits_end()); + } /// BitVector default ctor - Creates an empty bitvector. BitVector() : Size(0) {} @@ -146,138 +203,164 @@ class BitVector { return !any(); } - /// find_first - Returns the index of the first set bit, -1 if none - /// of the bits are set. - int find_first() const { - for (unsigned i = 0; i < NumBitWords(size()); ++i) - if (Bits[i] != 0) - return i * BITWORD_SIZE + countTrailingZeros(Bits[i]); + /// find_first_in - Returns the index of the first set bit in the range + /// [Begin, End). Returns -1 if all bits in the range are unset. + int find_first_in(unsigned Begin, unsigned End) const { + assert(Begin <= End && End <= Size); + if (Begin == End) + return -1; + + unsigned FirstWord = Begin / BITWORD_SIZE; + unsigned LastWord = (End - 1) / BITWORD_SIZE; + + // Check subsequent words. + for (unsigned i = FirstWord; i <= LastWord; ++i) { + BitWord Copy = Bits[i]; + + if (i == FirstWord) { + unsigned FirstBit = Begin % BITWORD_SIZE; + Copy &= maskTrailingZeros(FirstBit); + } + + if (i == LastWord) { + unsigned LastBit = (End - 1) % BITWORD_SIZE; + Copy &= maskTrailingOnes(LastBit + 1); + } + if (Copy != 0) + return i * BITWORD_SIZE + countTrailingZeros(Copy); + } return -1; } + /// find_last_in - Returns the index of the last set bit in the range + /// [Begin, End). Returns -1 if all bits in the range are unset. + int find_last_in(unsigned Begin, unsigned End) const { + assert(Begin <= End && End <= Size); + if (Begin == End) + return -1; + + unsigned LastWord = (End - 1) / BITWORD_SIZE; + unsigned FirstWord = Begin / BITWORD_SIZE; + + for (unsigned i = LastWord + 1; i >= FirstWord + 1; --i) { + unsigned CurrentWord = i - 1; + + BitWord Copy = Bits[CurrentWord]; + if (CurrentWord == LastWord) { + unsigned LastBit = (End - 1) % BITWORD_SIZE; + Copy &= maskTrailingOnes(LastBit + 1); + } + + if (CurrentWord == FirstWord) { + unsigned FirstBit = Begin % BITWORD_SIZE; + Copy &= maskTrailingZeros(FirstBit); + } + + if (Copy != 0) + return (CurrentWord + 1) * BITWORD_SIZE - countLeadingZeros(Copy) - 1; + } + + return -1; + } + + /// find_first_unset_in - Returns the index of the first unset bit in the + /// range [Begin, End). Returns -1 if all bits in the range are set. + int find_first_unset_in(unsigned Begin, unsigned End) const { + assert(Begin <= End && End <= Size); + if (Begin == End) + return -1; + + unsigned FirstWord = Begin / BITWORD_SIZE; + unsigned LastWord = (End - 1) / BITWORD_SIZE; + + // Check subsequent words. + for (unsigned i = FirstWord; i <= LastWord; ++i) { + BitWord Copy = Bits[i]; + + if (i == FirstWord) { + unsigned FirstBit = Begin % BITWORD_SIZE; + Copy |= maskTrailingOnes(FirstBit); + } + + if (i == LastWord) { + unsigned LastBit = (End - 1) % BITWORD_SIZE; + Copy |= maskTrailingZeros(LastBit + 1); + } + if (Copy != ~0UL) { + unsigned Result = i * BITWORD_SIZE + countTrailingOnes(Copy); + return Result < size() ? Result : -1; + } + } + return -1; + } + + /// find_last_unset_in - Returns the index of the last unset bit in the + /// range [Begin, End). Returns -1 if all bits in the range are set. + int find_last_unset_in(unsigned Begin, unsigned End) const { + assert(Begin <= End && End <= Size); + if (Begin == End) + return -1; + + unsigned LastWord = (End - 1) / BITWORD_SIZE; + unsigned FirstWord = Begin / BITWORD_SIZE; + + for (unsigned i = LastWord + 1; i >= FirstWord + 1; --i) { + unsigned CurrentWord = i - 1; + + BitWord Copy = Bits[CurrentWord]; + if (CurrentWord == LastWord) { + unsigned LastBit = (End - 1) % BITWORD_SIZE; + Copy |= maskTrailingZeros(LastBit + 1); + } + + if (CurrentWord == FirstWord) { + unsigned FirstBit = Begin % BITWORD_SIZE; + Copy |= maskTrailingOnes(FirstBit); + } + + if (Copy != ~0UL) { + unsigned Result = + (CurrentWord + 1) * BITWORD_SIZE - countLeadingOnes(Copy) - 1; + return Result < Size ? Result : -1; + } + } + return -1; + } + + /// find_first - Returns the index of the first set bit, -1 if none + /// of the bits are set. + int find_first() const { return find_first_in(0, Size); } + /// find_last - Returns the index of the last set bit, -1 if none of the bits /// are set. - int find_last() const { - if (Size == 0) - return -1; - - unsigned N = NumBitWords(size()); - assert(N > 0); - - unsigned i = N - 1; - while (i > 0 && Bits[i] == BitWord(0)) - --i; - - return int((i + 1) * BITWORD_SIZE - countLeadingZeros(Bits[i])) - 1; - } - - /// find_first_unset - Returns the index of the first unset bit, -1 if all - /// of the bits are set. - int find_first_unset() const { - for (unsigned i = 0; i < NumBitWords(size()); ++i) - if (Bits[i] != ~0UL) { - unsigned Result = i * BITWORD_SIZE + countTrailingOnes(Bits[i]); - return Result < size() ? Result : -1; - } - return -1; - } - - /// find_last_unset - Returns the index of the last unset bit, -1 if all of - /// the bits are set. - int find_last_unset() const { - if (Size == 0) - return -1; - - const unsigned N = NumBitWords(size()); - assert(N > 0); - - unsigned i = N - 1; - BitWord W = Bits[i]; - - // The last word in the BitVector has some unused bits, so we need to set - // them all to 1 first. Set them all to 1 so they don't get treated as - // valid unset bits. - unsigned UnusedCount = BITWORD_SIZE - size() % BITWORD_SIZE; - W |= maskLeadingOnes(UnusedCount); - - while (W == ~BitWord(0) && --i > 0) - W = Bits[i]; - - return int((i + 1) * BITWORD_SIZE - countLeadingOnes(W)) - 1; - } + int find_last() const { return find_last_in(0, Size); } /// find_next - Returns the index of the next set bit following the /// "Prev" bit. Returns -1 if the next set bit is not found. - int find_next(unsigned Prev) const { - ++Prev; - if (Prev >= Size) - return -1; + int find_next(unsigned Prev) const { return find_first_in(Prev + 1, Size); } - unsigned WordPos = Prev / BITWORD_SIZE; - unsigned BitPos = Prev % BITWORD_SIZE; - BitWord Copy = Bits[WordPos]; - // Mask off previous bits. - Copy &= maskTrailingZeros(BitPos); + /// find_prev - Returns the index of the first set bit that precedes the + /// the bit at \p PriorTo. Returns -1 if all previous bits are unset. + int find_prev(unsigned PriorTo) const { return find_last_in(0, PriorTo); } - if (Copy != 0) - return WordPos * BITWORD_SIZE + countTrailingZeros(Copy); - - // Check subsequent words. - for (unsigned i = WordPos+1; i < NumBitWords(size()); ++i) - if (Bits[i] != 0) - return i * BITWORD_SIZE + countTrailingZeros(Bits[i]); - return -1; - } + /// find_first_unset - Returns the index of the first unset bit, -1 if all + /// of the bits are set. + int find_first_unset() const { return find_first_unset_in(0, Size); } /// find_next_unset - Returns the index of the next unset bit following the /// "Prev" bit. Returns -1 if all remaining bits are set. int find_next_unset(unsigned Prev) const { - ++Prev; - if (Prev >= Size) - return -1; - - unsigned WordPos = Prev / BITWORD_SIZE; - unsigned BitPos = Prev % BITWORD_SIZE; - BitWord Copy = Bits[WordPos]; - // Mask in previous bits. - BitWord Mask = (1 << BitPos) - 1; - Copy |= Mask; - - if (Copy != ~0UL) - return next_unset_in_word(WordPos, Copy); - - // Check subsequent words. - for (unsigned i = WordPos + 1; i < NumBitWords(size()); ++i) - if (Bits[i] != ~0UL) - return next_unset_in_word(i, Bits[i]); - return -1; + return find_first_unset_in(Prev + 1, Size); } - /// find_prev - Returns the index of the first set bit that precedes the - /// the bit at \p PriorTo. Returns -1 if all previous bits are unset. - int find_prev(unsigned PriorTo) const { - if (PriorTo == 0) - return -1; + /// find_last_unset - Returns the index of the last unset bit, -1 if all of + /// the bits are set. + int find_last_unset() const { return find_last_unset_in(0, Size); } - --PriorTo; - - unsigned WordPos = PriorTo / BITWORD_SIZE; - unsigned BitPos = PriorTo % BITWORD_SIZE; - BitWord Copy = Bits[WordPos]; - // Mask off next bits. - Copy &= maskTrailingOnes(BitPos + 1); - - if (Copy != 0) - return (WordPos + 1) * BITWORD_SIZE - countLeadingZeros(Copy) - 1; - - // Check previous words. - for (unsigned i = 1; i <= WordPos; ++i) { - unsigned Index = WordPos - i; - if (Bits[Index] == 0) - continue; - return (Index + 1) * BITWORD_SIZE - countLeadingZeros(Bits[Index]) - 1; - } - return -1; + /// find_prev_unset - Returns the index of the first unset bit that precedes + /// the bit at \p PriorTo. Returns -1 if all previous bits are set. + int find_prev_unset(unsigned PriorTo) { + return find_last_unset_in(0, PriorTo); } /// clear - Removes all bits from the bitvector. Does not change capacity. diff --git a/contrib/llvm/include/llvm/ADT/PostOrderIterator.h b/contrib/llvm/include/llvm/ADT/PostOrderIterator.h index 8fc08eb252eb..a179d29956b1 100644 --- a/contrib/llvm/include/llvm/ADT/PostOrderIterator.h +++ b/contrib/llvm/include/llvm/ADT/PostOrderIterator.h @@ -96,24 +96,14 @@ template , public po_iterator_storage { - typedef std::iterator super; - typedef typename GT::NodeRef NodeRef; - typedef typename GT::ChildIteratorType ChildItTy; + using super = std::iterator; + using NodeRef = typename GT::NodeRef; + using ChildItTy = typename GT::ChildIteratorType; // VisitStack - Used to maintain the ordering. Top = current block // First element is basic block pointer, second is the 'next child' to visit std::vector> VisitStack; - void traverseChild() { - while (VisitStack.back().second != GT::child_end(VisitStack.back().first)) { - NodeRef BB = *VisitStack.back().second++; - if (this->insertEdge(Optional(VisitStack.back().first), BB)) { - // If the block is not visited... - VisitStack.push_back(std::make_pair(BB, GT::child_begin(BB))); - } - } - } - po_iterator(NodeRef BB) { this->insertEdge(Optional(), BB); VisitStack.push_back(std::make_pair(BB, GT::child_begin(BB))); @@ -134,8 +124,18 @@ class po_iterator : po_iterator_storage(S) { } // End is when stack is empty. + void traverseChild() { + while (VisitStack.back().second != GT::child_end(VisitStack.back().first)) { + NodeRef BB = *VisitStack.back().second++; + if (this->insertEdge(Optional(VisitStack.back().first), BB)) { + // If the block is not visited... + VisitStack.push_back(std::make_pair(BB, GT::child_begin(BB))); + } + } + } + public: - typedef typename super::pointer pointer; + using pointer = typename super::pointer; // Provide static "constructors"... static po_iterator begin(GraphT G) { @@ -286,7 +286,8 @@ inverse_post_order_ext(const T &G, SetType &S) { template> class ReversePostOrderTraversal { - typedef typename GT::NodeRef NodeRef; + using NodeRef = typename GT::NodeRef; + std::vector Blocks; // Block list in normal PO order void Initialize(NodeRef BB) { @@ -294,7 +295,7 @@ class ReversePostOrderTraversal { } public: - typedef typename std::vector::reverse_iterator rpo_iterator; + using rpo_iterator = typename std::vector::reverse_iterator; ReversePostOrderTraversal(GraphT G) { Initialize(GT::getEntryNode(G)); } diff --git a/contrib/llvm/include/llvm/ADT/PriorityWorklist.h b/contrib/llvm/include/llvm/ADT/PriorityWorklist.h index 3198dd438700..35891e931801 100644 --- a/contrib/llvm/include/llvm/ADT/PriorityWorklist.h +++ b/contrib/llvm/include/llvm/ADT/PriorityWorklist.h @@ -17,13 +17,14 @@ #define LLVM_ADT_PRIORITYWORKLIST_H #include "llvm/ADT/DenseMap.h" -#include "llvm/ADT/STLExtras.h" -#include "llvm/ADT/Sequence.h" #include "llvm/ADT/SmallVector.h" +#include "llvm/ADT/STLExtras.h" #include "llvm/Support/Compiler.h" #include #include #include +#include +#include #include namespace llvm { @@ -55,11 +56,11 @@ template , typename MapT = DenseMap> class PriorityWorklist { public: - typedef T value_type; - typedef T key_type; - typedef T& reference; - typedef const T& const_reference; - typedef typename MapT::size_type size_type; + using value_type = T; + using key_type = T; + using reference = T&; + using const_reference = const T&; + using size_type = typename MapT::size_type; /// Construct an empty PriorityWorklist PriorityWorklist() = default; diff --git a/contrib/llvm/include/llvm/ADT/SCCIterator.h b/contrib/llvm/include/llvm/ADT/SCCIterator.h index 9a8a7b168fce..734a58f87da2 100644 --- a/contrib/llvm/include/llvm/ADT/SCCIterator.h +++ b/contrib/llvm/include/llvm/ADT/SCCIterator.h @@ -1,4 +1,4 @@ -//===---- ADT/SCCIterator.h - Strongly Connected Comp. Iter. ----*- C++ -*-===// +//===- ADT/SCCIterator.h - Strongly Connected Comp. Iter. -------*- C++ -*-===// // // The LLVM Compiler Infrastructure // @@ -43,10 +43,10 @@ template > class scc_iterator : public iterator_facade_base< scc_iterator, std::forward_iterator_tag, const std::vector, ptrdiff_t> { - typedef typename GT::NodeRef NodeRef; - typedef typename GT::ChildIteratorType ChildItTy; - typedef std::vector SccTy; - typedef typename scc_iterator::reference reference; + using NodeRef = typename GT::NodeRef; + using ChildItTy = typename GT::ChildIteratorType; + using SccTy = std::vector; + using reference = typename scc_iterator::reference; /// Element of VisitStack during DFS. struct StackElement { diff --git a/contrib/llvm/include/llvm/ADT/Sequence.h b/contrib/llvm/include/llvm/ADT/Sequence.h index 5d36831cc128..3d4a897bf9a9 100644 --- a/contrib/llvm/include/llvm/ADT/Sequence.h +++ b/contrib/llvm/include/llvm/ADT/Sequence.h @@ -13,27 +13,31 @@ /// //===----------------------------------------------------------------------===// -#ifndef LLVM_ADT_SEQ_H -#define LLVM_ADT_SEQ_H +#ifndef LLVM_ADT_SEQUENCE_H +#define LLVM_ADT_SEQUENCE_H #include "llvm/ADT/iterator.h" #include "llvm/ADT/iterator_range.h" +#include +#include +#include namespace llvm { namespace detail { + template class value_sequence_iterator : public iterator_facade_base, std::random_access_iterator_tag, const ValueT> { - typedef typename value_sequence_iterator::iterator_facade_base BaseT; + using BaseT = typename value_sequence_iterator::iterator_facade_base; ValueT Value; public: - typedef typename BaseT::difference_type difference_type; - typedef typename BaseT::reference reference; + using difference_type = typename BaseT::difference_type; + using reference = typename BaseT::reference; value_sequence_iterator() = default; value_sequence_iterator(const value_sequence_iterator &) = default; @@ -65,7 +69,8 @@ class value_sequence_iterator reference operator*() const { return Value; } }; -} // End detail namespace. + +} // end namespace detail template iterator_range> seq(ValueT Begin, @@ -74,6 +79,6 @@ iterator_range> seq(ValueT Begin, detail::value_sequence_iterator(End)); } -} +} // end namespace llvm -#endif +#endif // LLVM_ADT_SEQUENCE_H diff --git a/contrib/llvm/include/llvm/ADT/SetVector.h b/contrib/llvm/include/llvm/ADT/SetVector.h index 13378aa3a04e..04ed52fc543f 100644 --- a/contrib/llvm/include/llvm/ADT/SetVector.h +++ b/contrib/llvm/include/llvm/ADT/SetVector.h @@ -40,17 +40,17 @@ template , typename Set = DenseSet> class SetVector { public: - typedef T value_type; - typedef T key_type; - typedef T& reference; - typedef const T& const_reference; - typedef Set set_type; - typedef Vector vector_type; - typedef typename vector_type::const_iterator iterator; - typedef typename vector_type::const_iterator const_iterator; - typedef typename vector_type::const_reverse_iterator reverse_iterator; - typedef typename vector_type::const_reverse_iterator const_reverse_iterator; - typedef typename vector_type::size_type size_type; + using value_type = T; + using key_type = T; + using reference = T&; + using const_reference = const T&; + using set_type = Set; + using vector_type = Vector; + using iterator = typename vector_type::const_iterator; + using const_iterator = typename vector_type::const_iterator; + using reverse_iterator = typename vector_type::const_reverse_iterator; + using const_reverse_iterator = typename vector_type::const_reverse_iterator; + using size_type = typename vector_type::size_type; /// \brief Construct an empty SetVector SetVector() = default; diff --git a/contrib/llvm/include/llvm/ADT/SmallBitVector.h b/contrib/llvm/include/llvm/ADT/SmallBitVector.h index 0eeacc162543..0ff427066959 100644 --- a/contrib/llvm/include/llvm/ADT/SmallBitVector.h +++ b/contrib/llvm/include/llvm/ADT/SmallBitVector.h @@ -134,6 +134,19 @@ class SmallBitVector { } public: + typedef const_set_bits_iterator_impl const_set_bits_iterator; + typedef const_set_bits_iterator set_iterator; + + const_set_bits_iterator set_bits_begin() const { + return const_set_bits_iterator(*this); + } + const_set_bits_iterator set_bits_end() const { + return const_set_bits_iterator(*this, -1); + } + iterator_range set_bits() const { + return make_range(set_bits_begin(), set_bits_end()); + } + /// Creates an empty bitvector. SmallBitVector() : X(1) {} diff --git a/contrib/llvm/include/llvm/ADT/SmallPtrSet.h b/contrib/llvm/include/llvm/ADT/SmallPtrSet.h index 196ab6338047..b49d216e0b6e 100644 --- a/contrib/llvm/include/llvm/ADT/SmallPtrSet.h +++ b/contrib/llvm/include/llvm/ADT/SmallPtrSet.h @@ -27,15 +27,13 @@ #include #include -#if LLVM_ENABLE_ABI_BREAKING_CHECKS namespace llvm { + +#if LLVM_ENABLE_ABI_BREAKING_CHECKS template struct ReverseIterate { static bool value; }; template bool ReverseIterate::value = false; -} #endif -namespace llvm { - /// SmallPtrSetImplBase - This is the common code shared among all the /// SmallPtrSet<>'s, which is almost everything. SmallPtrSet has two modes, one /// for small and one for large sets. @@ -92,7 +90,7 @@ class SmallPtrSetImplBase { } public: - typedef unsigned size_type; + using size_type = unsigned; SmallPtrSetImplBase &operator=(const SmallPtrSetImplBase &) = delete; @@ -273,14 +271,14 @@ class SmallPtrSetIteratorImpl { /// SmallPtrSetIterator - This implements a const_iterator for SmallPtrSet. template class SmallPtrSetIterator : public SmallPtrSetIteratorImpl { - typedef PointerLikeTypeTraits PtrTraits; + using PtrTraits = PointerLikeTypeTraits; public: - typedef PtrTy value_type; - typedef PtrTy reference; - typedef PtrTy pointer; - typedef std::ptrdiff_t difference_type; - typedef std::forward_iterator_tag iterator_category; + using value_type = PtrTy; + using reference = PtrTy; + using pointer = PtrTy; + using difference_type = std::ptrdiff_t; + using iterator_category = std::forward_iterator_tag; explicit SmallPtrSetIterator(const void *const *BP, const void *const *E) : SmallPtrSetIteratorImpl(BP, E) {} @@ -351,8 +349,8 @@ struct RoundUpToPowerOfTwo { template class SmallPtrSetImpl : public SmallPtrSetImplBase { using ConstPtrType = typename add_const_past_pointer::type; - typedef PointerLikeTypeTraits PtrTraits; - typedef PointerLikeTypeTraits ConstPtrTraits; + using PtrTraits = PointerLikeTypeTraits; + using ConstPtrTraits = PointerLikeTypeTraits; protected: // Constructors that forward to the base. @@ -365,8 +363,8 @@ class SmallPtrSetImpl : public SmallPtrSetImplBase { : SmallPtrSetImplBase(SmallStorage, SmallSize) {} public: - typedef SmallPtrSetIterator iterator; - typedef SmallPtrSetIterator const_iterator; + using iterator = SmallPtrSetIterator; + using const_iterator = SmallPtrSetIterator; SmallPtrSetImpl(const SmallPtrSetImpl &) = delete; @@ -431,7 +429,7 @@ class SmallPtrSet : public SmallPtrSetImpl { // DenseSet<> instead if you expect many elements in the set. static_assert(SmallSize <= 32, "SmallSize should be small"); - typedef SmallPtrSetImpl BaseT; + using BaseT = SmallPtrSetImpl; // Make sure that SmallSize is a power of two, round up if not. enum { SmallSizePowTwo = RoundUpToPowerOfTwo::Val }; diff --git a/contrib/llvm/include/llvm/ADT/SmallVector.h b/contrib/llvm/include/llvm/ADT/SmallVector.h index b9588214023c..bd24eab93b50 100644 --- a/contrib/llvm/include/llvm/ADT/SmallVector.h +++ b/contrib/llvm/include/llvm/ADT/SmallVector.h @@ -71,7 +71,7 @@ class SmallVectorTemplateCommon : public SmallVectorBase { // Allocate raw space for N elements of type T. If T has a ctor or dtor, we // don't want it to be automatically run, so we need to represent the space as // something else. Use an array of char of sufficient alignment. - typedef AlignedCharArrayUnion U; + using U = AlignedCharArrayUnion; U FirstEl; // Space after 'FirstEl' is clobbered, do not add any instance vars after it. @@ -96,19 +96,19 @@ class SmallVectorTemplateCommon : public SmallVectorBase { void setEnd(T *P) { this->EndX = P; } public: - typedef size_t size_type; - typedef ptrdiff_t difference_type; - typedef T value_type; - typedef T *iterator; - typedef const T *const_iterator; + using size_type = size_t; + using difference_type = ptrdiff_t; + using value_type = T; + using iterator = T *; + using const_iterator = const T *; - typedef std::reverse_iterator const_reverse_iterator; - typedef std::reverse_iterator reverse_iterator; + using const_reverse_iterator = std::reverse_iterator; + using reverse_iterator = std::reverse_iterator; - typedef T &reference; - typedef const T &const_reference; - typedef T *pointer; - typedef const T *const_pointer; + using reference = T &; + using const_reference = const T &; + using pointer = T *; + using const_pointer = const T *; // forward iterator creation methods. LLVM_ATTRIBUTE_ALWAYS_INLINE @@ -319,12 +319,12 @@ class SmallVectorTemplateBase : public SmallVectorTemplateCommon { /// reduce code duplication based on the SmallVector 'N' template parameter. template class SmallVectorImpl : public SmallVectorTemplateBase::value> { - typedef SmallVectorTemplateBase::value > SuperClass; + using SuperClass = SmallVectorTemplateBase::value>; public: - typedef typename SuperClass::iterator iterator; - typedef typename SuperClass::const_iterator const_iterator; - typedef typename SuperClass::size_type size_type; + using iterator = typename SuperClass::iterator; + using const_iterator = typename SuperClass::const_iterator; + using size_type = typename SuperClass::size_type; protected: // Default ctor - Initialize to empty. @@ -845,8 +845,7 @@ class SmallVector : public SmallVectorImpl { SmallVectorStorage Storage; public: - SmallVector() : SmallVectorImpl(N) { - } + SmallVector() : SmallVectorImpl(N) {} explicit SmallVector(size_t Size, const T &Value = T()) : SmallVectorImpl(N) { @@ -883,16 +882,16 @@ class SmallVector : public SmallVectorImpl { SmallVectorImpl::operator=(::std::move(RHS)); } - const SmallVector &operator=(SmallVector &&RHS) { - SmallVectorImpl::operator=(::std::move(RHS)); - return *this; - } - SmallVector(SmallVectorImpl &&RHS) : SmallVectorImpl(N) { if (!RHS.empty()) SmallVectorImpl::operator=(::std::move(RHS)); } + const SmallVector &operator=(SmallVector &&RHS) { + SmallVectorImpl::operator=(::std::move(RHS)); + return *this; + } + const SmallVector &operator=(SmallVectorImpl &&RHS) { SmallVectorImpl::operator=(::std::move(RHS)); return *this; diff --git a/contrib/llvm/include/llvm/ADT/SparseBitVector.h b/contrib/llvm/include/llvm/ADT/SparseBitVector.h index a82cef6028f9..4cbf40c76805 100644 --- a/contrib/llvm/include/llvm/ADT/SparseBitVector.h +++ b/contrib/llvm/include/llvm/ADT/SparseBitVector.h @@ -1,4 +1,4 @@ -//===- llvm/ADT/SparseBitVector.h - Efficient Sparse BitVector -*- C++ -*- ===// +//===- llvm/ADT/SparseBitVector.h - Efficient Sparse BitVector --*- C++ -*-===// // // The LLVM Compiler Infrastructure // @@ -41,8 +41,8 @@ namespace llvm { template struct SparseBitVectorElement { public: - typedef unsigned long BitWord; - typedef unsigned size_type; + using BitWord = unsigned long; + using size_type = unsigned; enum { BITWORD_SIZE = sizeof(BitWord) * CHAR_BIT, BITWORDS_PER_ELEMENT = (ElementSize + BITWORD_SIZE - 1) / BITWORD_SIZE, @@ -100,7 +100,7 @@ template struct SparseBitVectorElement { Bits[Idx / BITWORD_SIZE] |= 1L << (Idx % BITWORD_SIZE); } - bool test_and_set (unsigned Idx) { + bool test_and_set(unsigned Idx) { bool old = test(Idx); if (!old) { set(Idx); @@ -254,9 +254,9 @@ template struct SparseBitVectorElement { template class SparseBitVector { - typedef std::list> ElementList; - typedef typename ElementList::iterator ElementListIter; - typedef typename ElementList::const_iterator ElementListConstIter; + using ElementList = std::list>; + using ElementListIter = typename ElementList::iterator; + using ElementListConstIter = typename ElementList::const_iterator; enum { BITWORD_SIZE = SparseBitVectorElement::BITWORD_SIZE }; @@ -421,14 +421,12 @@ class SparseBitVector { }; public: - typedef SparseBitVectorIterator iterator; + using iterator = SparseBitVectorIterator; SparseBitVector() { CurrElementIter = Elements.begin(); } - ~SparseBitVector() = default; - // SparseBitVector copy ctor. SparseBitVector(const SparseBitVector &RHS) { ElementListConstIter ElementIter = RHS.Elements.begin(); @@ -440,6 +438,8 @@ class SparseBitVector { CurrElementIter = Elements.begin (); } + ~SparseBitVector() = default; + // Clear. void clear() { Elements.clear(); diff --git a/contrib/llvm/include/llvm/ADT/SparseMultiSet.h b/contrib/llvm/include/llvm/ADT/SparseMultiSet.h index 08da4b68ebaa..b3a413aa3aa5 100644 --- a/contrib/llvm/include/llvm/ADT/SparseMultiSet.h +++ b/contrib/llvm/include/llvm/ADT/SparseMultiSet.h @@ -1,4 +1,4 @@ -//===--- llvm/ADT/SparseMultiSet.h - Sparse multiset ------------*- C++ -*-===// +//===- llvm/ADT/SparseMultiSet.h - Sparse multiset --------------*- C++ -*-===// // // The LLVM Compiler Infrastructure // @@ -101,7 +101,7 @@ class SparseMultiSet { unsigned Prev; unsigned Next; - SMSNode(ValueT D, unsigned P, unsigned N) : Data(D), Prev(P), Next(N) { } + SMSNode(ValueT D, unsigned P, unsigned N) : Data(D), Prev(P), Next(N) {} /// List tails have invalid Nexts. bool isTail() const { @@ -118,8 +118,8 @@ class SparseMultiSet { bool isValid() const { return Prev != INVALID; } }; - typedef typename KeyFunctorT::argument_type KeyT; - typedef SmallVector DenseT; + using KeyT = typename KeyFunctorT::argument_type; + using DenseT = SmallVector; DenseT Dense; SparseT *Sparse = nullptr; unsigned Universe = 0; @@ -183,12 +183,12 @@ class SparseMultiSet { } public: - typedef ValueT value_type; - typedef ValueT &reference; - typedef const ValueT &const_reference; - typedef ValueT *pointer; - typedef const ValueT *const_pointer; - typedef unsigned size_type; + using value_type = ValueT; + using reference = ValueT &; + using const_reference = const ValueT &; + using pointer = ValueT *; + using const_pointer = const ValueT *; + using size_type = unsigned; SparseMultiSet() = default; SparseMultiSet(const SparseMultiSet &) = delete; @@ -227,7 +227,7 @@ class SparseMultiSet { unsigned SparseIdx; iterator_base(SMSPtrTy P, unsigned I, unsigned SI) - : SMS(P), Idx(I), SparseIdx(SI) { } + : SMS(P), Idx(I), SparseIdx(SI) {} /// Whether our iterator has fallen outside our dense vector. bool isEnd() const { @@ -248,11 +248,11 @@ class SparseMultiSet { void setNext(unsigned N) { SMS->Dense[Idx].Next = N; } public: - typedef std::iterator super; - typedef typename super::value_type value_type; - typedef typename super::difference_type difference_type; - typedef typename super::pointer pointer; - typedef typename super::reference reference; + using super = std::iterator; + using value_type = typename super::value_type; + using difference_type = typename super::difference_type; + using pointer = typename super::pointer; + using reference = typename super::reference; reference operator*() const { assert(isKeyed() && SMS->sparseIndex(SMS->Dense[Idx].Data) == SparseIdx && @@ -308,11 +308,12 @@ class SparseMultiSet { return I; } }; - typedef iterator_base iterator; - typedef iterator_base const_iterator; + + using iterator = iterator_base; + using const_iterator = iterator_base; // Convenience types - typedef std::pair RangePair; + using RangePair = std::pair; /// Returns an iterator past this container. Note that such an iterator cannot /// be decremented, but will compare equal to other end iterators. diff --git a/contrib/llvm/include/llvm/ADT/SparseSet.h b/contrib/llvm/include/llvm/ADT/SparseSet.h index 00c18c743219..25ade8831922 100644 --- a/contrib/llvm/include/llvm/ADT/SparseSet.h +++ b/contrib/llvm/include/llvm/ADT/SparseSet.h @@ -1,4 +1,4 @@ -//===--- llvm/ADT/SparseSet.h - Sparse set ----------------------*- C++ -*-===// +//===- llvm/ADT/SparseSet.h - Sparse set ------------------------*- C++ -*-===// // // The LLVM Compiler Infrastructure // @@ -125,9 +125,9 @@ class SparseSet { !std::numeric_limits::is_signed, "SparseT must be an unsigned integer type"); - typedef typename KeyFunctorT::argument_type KeyT; - typedef SmallVector DenseT; - typedef unsigned size_type; + using KeyT = typename KeyFunctorT::argument_type; + using DenseT = SmallVector; + using size_type = unsigned; DenseT Dense; SparseT *Sparse = nullptr; unsigned Universe = 0; @@ -135,11 +135,11 @@ class SparseSet { SparseSetValFunctor ValIndexOf; public: - typedef ValueT value_type; - typedef ValueT &reference; - typedef const ValueT &const_reference; - typedef ValueT *pointer; - typedef const ValueT *const_pointer; + using value_type = ValueT; + using reference = ValueT &; + using const_reference = const ValueT &; + using pointer = ValueT *; + using const_pointer = const ValueT *; SparseSet() = default; SparseSet(const SparseSet &) = delete; @@ -168,8 +168,8 @@ class SparseSet { } // Import trivial vector stuff from DenseT. - typedef typename DenseT::iterator iterator; - typedef typename DenseT::const_iterator const_iterator; + using iterator = typename DenseT::iterator; + using const_iterator = typename DenseT::const_iterator; const_iterator begin() const { return Dense.begin(); } const_iterator end() const { return Dense.end(); } diff --git a/contrib/llvm/include/llvm/ADT/StringExtras.h b/contrib/llvm/include/llvm/ADT/StringExtras.h index 1c109be3bab3..e22a3f688c40 100644 --- a/contrib/llvm/include/llvm/ADT/StringExtras.h +++ b/contrib/llvm/include/llvm/ADT/StringExtras.h @@ -1,4 +1,4 @@ -//===-- llvm/ADT/StringExtras.h - Useful string functions -------*- C++ -*-===// +//===- llvm/ADT/StringExtras.h - Useful string functions --------*- C++ -*-===// // // The LLVM Compiler Infrastructure // @@ -15,12 +15,18 @@ #define LLVM_ADT_STRINGEXTRAS_H #include "llvm/ADT/StringRef.h" -#include "llvm/Support/DataTypes.h" #include +#include +#include +#include +#include +#include +#include namespace llvm { -class raw_ostream; + template class SmallVectorImpl; +class raw_ostream; /// hexdigit - Return the hexadecimal character for the /// given number \p X (which should be less than 16). @@ -128,7 +134,6 @@ static inline std::string utostr(uint64_t X, bool isNeg = false) { return std::string(BufPtr, std::end(Buffer)); } - static inline std::string itostr(int64_t X) { if (X < 0) return utostr(static_cast(-X), true); @@ -261,13 +266,14 @@ template inline size_t join_items_size(const A1 &A, Args &&... Items) { return join_one_item_size(A) + join_items_size(std::forward(Items)...); } -} + +} // end namespace detail /// Joins the strings in the range [Begin, End), adding Separator between /// the elements. template inline std::string join(IteratorT Begin, IteratorT End, StringRef Separator) { - typedef typename std::iterator_traits::iterator_category tag; + using tag = typename std::iterator_traits::iterator_category; return detail::join_impl(Begin, End, Separator, tag()); } @@ -295,6 +301,6 @@ inline std::string join_items(Sep Separator, Args &&... Items) { return Result; } -} // End llvm namespace +} // end namespace llvm -#endif +#endif // LLVM_ADT_STRINGEXTRAS_H diff --git a/contrib/llvm/include/llvm/ADT/StringMap.h b/contrib/llvm/include/llvm/ADT/StringMap.h index c36fda7d6906..d573148665a1 100644 --- a/contrib/llvm/include/llvm/ADT/StringMap.h +++ b/contrib/llvm/include/llvm/ADT/StringMap.h @@ -1,4 +1,4 @@ -//===--- StringMap.h - String Hash table map interface ----------*- C++ -*-===// +//===- StringMap.h - String Hash table map interface ------------*- C++ -*-===// // // The LLVM Compiler Infrastructure // @@ -16,25 +16,23 @@ #include "llvm/ADT/StringRef.h" #include "llvm/ADT/iterator.h" +#include "llvm/ADT/iterator_range.h" #include "llvm/Support/Allocator.h" #include "llvm/Support/PointerLikeTypeTraits.h" +#include #include #include #include #include #include -#include +#include #include namespace llvm { - template - class StringMapConstIterator; - template - class StringMapIterator; - template class StringMapKeyIterator; - template - class StringMapEntry; +template class StringMapConstIterator; +template class StringMapIterator; +template class StringMapKeyIterator; /// StringMapEntryBase - Shared base class of StringMapEntry instances. class StringMapEntryBase { @@ -53,17 +51,15 @@ class StringMapImpl { // Array of NumBuckets pointers to entries, null pointers are holes. // TheTable[NumBuckets] contains a sentinel value for easy iteration. Followed // by an array of the actual hash values as unsigned integers. - StringMapEntryBase **TheTable; - unsigned NumBuckets; - unsigned NumItems; - unsigned NumTombstones; + StringMapEntryBase **TheTable = nullptr; + unsigned NumBuckets = 0; + unsigned NumItems = 0; + unsigned NumTombstones = 0; unsigned ItemSize; protected: explicit StringMapImpl(unsigned itemSize) - : TheTable(nullptr), - // Initialize the map with zero buckets to allocation. - NumBuckets(0), NumItems(0), NumTombstones(0), ItemSize(itemSize) {} + : ItemSize(itemSize) {} StringMapImpl(StringMapImpl &&RHS) : TheTable(RHS.TheTable), NumBuckets(RHS.NumBuckets), NumItems(RHS.NumItems), NumTombstones(RHS.NumTombstones), @@ -225,9 +221,10 @@ class StringMap : public StringMapImpl { AllocatorTy Allocator; public: - typedef StringMapEntry MapEntryTy; + using MapEntryTy = StringMapEntry; StringMap() : StringMapImpl(static_cast(sizeof(MapEntryTy))) {} + explicit StringMap(unsigned InitialSize) : StringMapImpl(InitialSize, static_cast(sizeof(MapEntryTy))) {} @@ -248,12 +245,6 @@ class StringMap : public StringMapImpl { StringMap(StringMap &&RHS) : StringMapImpl(std::move(RHS)), Allocator(std::move(RHS.Allocator)) {} - StringMap &operator=(StringMap RHS) { - StringMapImpl::swap(RHS); - std::swap(Allocator, RHS.Allocator); - return *this; - } - StringMap(const StringMap &RHS) : StringMapImpl(static_cast(sizeof(MapEntryTy))), Allocator(RHS.Allocator) { @@ -289,16 +280,37 @@ class StringMap : public StringMapImpl { // not worthwhile. } + StringMap &operator=(StringMap RHS) { + StringMapImpl::swap(RHS); + std::swap(Allocator, RHS.Allocator); + return *this; + } + + ~StringMap() { + // Delete all the elements in the map, but don't reset the elements + // to default values. This is a copy of clear(), but avoids unnecessary + // work not required in the destructor. + if (!empty()) { + for (unsigned I = 0, E = NumBuckets; I != E; ++I) { + StringMapEntryBase *Bucket = TheTable[I]; + if (Bucket && Bucket != getTombstoneVal()) { + static_cast(Bucket)->Destroy(Allocator); + } + } + } + free(TheTable); + } + AllocatorTy &getAllocator() { return Allocator; } const AllocatorTy &getAllocator() const { return Allocator; } - typedef const char* key_type; - typedef ValueTy mapped_type; - typedef StringMapEntry value_type; - typedef size_t size_type; + using key_type = const char*; + using mapped_type = ValueTy; + using value_type = StringMapEntry; + using size_type = size_t; - typedef StringMapConstIterator const_iterator; - typedef StringMapIterator iterator; + using const_iterator = StringMapConstIterator; + using iterator = StringMapIterator; iterator begin() { return iterator(TheTable, NumBuckets == 0); @@ -313,7 +325,7 @@ class StringMap : public StringMapImpl { return const_iterator(TheTable+NumBuckets, true); } - llvm::iterator_range> keys() const { + iterator_range> keys() const { return make_range(StringMapKeyIterator(begin()), StringMapKeyIterator(end())); } @@ -433,21 +445,6 @@ class StringMap : public StringMapImpl { erase(I); return true; } - - ~StringMap() { - // Delete all the elements in the map, but don't reset the elements - // to default values. This is a copy of clear(), but avoids unnecessary - // work not required in the destructor. - if (!empty()) { - for (unsigned I = 0, E = NumBuckets; I != E; ++I) { - StringMapEntryBase *Bucket = TheTable[I]; - if (Bucket && Bucket != getTombstoneVal()) { - static_cast(Bucket)->Destroy(Allocator); - } - } - } - free(TheTable); - } }; template @@ -542,7 +539,6 @@ class StringMapKeyIterator public: StringMapKeyIterator() = default; - explicit StringMapKeyIterator(StringMapConstIterator Iter) : base(std::move(Iter)) {} diff --git a/contrib/llvm/include/llvm/ADT/StringRef.h b/contrib/llvm/include/llvm/ADT/StringRef.h index ce48f6d3bad3..4b25f56432df 100644 --- a/contrib/llvm/include/llvm/ADT/StringRef.h +++ b/contrib/llvm/include/llvm/ADT/StringRef.h @@ -1,4 +1,4 @@ -//===--- StringRef.h - Constant String Reference Wrapper --------*- C++ -*-===// +//===- StringRef.h - Constant String Reference Wrapper ----------*- C++ -*-===// // // The LLVM Compiler Infrastructure // @@ -15,16 +15,18 @@ #include "llvm/Support/Compiler.h" #include #include +#include #include #include +#include #include #include namespace llvm { - template - class SmallVectorImpl; + class APInt; class hash_code; + template class SmallVectorImpl; class StringRef; /// Helper functions for StringRef::getAsInteger. @@ -46,10 +48,11 @@ namespace llvm { /// general safe to store a StringRef. class StringRef { public: - typedef const char *iterator; - typedef const char *const_iterator; static const size_t npos = ~size_t(0); - typedef size_t size_type; + + using iterator = const char *; + using const_iterator = const char *; + using size_type = size_t; private: /// The start of the string, in an external buffer. @@ -906,6 +909,7 @@ namespace llvm { // StringRefs can be treated like a POD type. template struct isPodLike; template <> struct isPodLike { static const bool value = true; }; -} -#endif +} // end namespace llvm + +#endif // LLVM_ADT_STRINGREF_H diff --git a/contrib/llvm/include/llvm/ADT/StringSet.h b/contrib/llvm/include/llvm/ADT/StringSet.h index c32c2a497438..9af44c07df79 100644 --- a/contrib/llvm/include/llvm/ADT/StringSet.h +++ b/contrib/llvm/include/llvm/ADT/StringSet.h @@ -1,4 +1,4 @@ -//===--- StringSet.h - The LLVM Compiler Driver -----------------*- C++ -*-===// +//===- StringSet.h - The LLVM Compiler Driver -------------------*- C++ -*-===// // // The LLVM Compiler Infrastructure // @@ -15,13 +15,19 @@ #define LLVM_ADT_STRINGSET_H #include "llvm/ADT/StringMap.h" +#include "llvm/ADT/StringRef.h" +#include "llvm/Support/Allocator.h" +#include +#include +#include namespace llvm { /// StringSet - A wrapper for StringMap that provides set-like functionality. - template - class StringSet : public llvm::StringMap { - typedef llvm::StringMap base; + template + class StringSet : public StringMap { + using base = StringMap; + public: StringSet() = default; StringSet(std::initializer_list S) { @@ -40,6 +46,7 @@ namespace llvm { base::insert(std::make_pair(*It, '\0')); } }; -} + +} // end namespace llvm #endif // LLVM_ADT_STRINGSET_H diff --git a/contrib/llvm/include/llvm/ADT/TinyPtrVector.h b/contrib/llvm/include/llvm/ADT/TinyPtrVector.h index ca43b6046193..79740713f75b 100644 --- a/contrib/llvm/include/llvm/ADT/TinyPtrVector.h +++ b/contrib/llvm/include/llvm/ADT/TinyPtrVector.h @@ -30,9 +30,9 @@ namespace llvm { template class TinyPtrVector { public: - typedef SmallVector VecTy; - typedef typename VecTy::value_type value_type; - typedef PointerUnion PtrUnion; + using VecTy = SmallVector; + using value_type = typename VecTy::value_type; + using PtrUnion = PointerUnion; private: PtrUnion Val; @@ -167,10 +167,10 @@ class TinyPtrVector { return Val.template get()->size(); } - typedef EltTy *iterator; - typedef const EltTy *const_iterator; - typedef std::reverse_iterator reverse_iterator; - typedef std::reverse_iterator const_reverse_iterator; + using iterator = EltTy *; + using const_iterator = const EltTy *; + using reverse_iterator = std::reverse_iterator; + using const_reverse_iterator = std::reverse_iterator; iterator begin() { if (Val.template is()) diff --git a/contrib/llvm/include/llvm/ADT/UniqueVector.h b/contrib/llvm/include/llvm/ADT/UniqueVector.h index e1ab4b56023f..b17fb2392baf 100644 --- a/contrib/llvm/include/llvm/ADT/UniqueVector.h +++ b/contrib/llvm/include/llvm/ADT/UniqueVector.h @@ -1,4 +1,4 @@ -//===-- llvm/ADT/UniqueVector.h ---------------------------------*- C++ -*-===// +//===- llvm/ADT/UniqueVector.h ----------------------------------*- C++ -*-===// // // The LLVM Compiler Infrastructure // @@ -24,16 +24,15 @@ namespace llvm { /// Entries can be fetched using operator[] with the entry ID. template class UniqueVector { public: - typedef typename std::vector VectorType; - typedef typename VectorType::iterator iterator; - typedef typename VectorType::const_iterator const_iterator; + using VectorType = typename std::vector; + using iterator = typename VectorType::iterator; + using const_iterator = typename VectorType::const_iterator; private: // Map - Used to handle the correspondence of entry to ID. std::map Map; // Vector - ID ordered vector of entries. Entries can be indexed by ID - 1. - // VectorType Vector; public: @@ -68,7 +67,6 @@ template class UniqueVector { } /// operator[] - Returns a reference to the entry with the specified ID. - /// const T &operator[](unsigned ID) const { assert(ID-1 < size() && "ID is 0 or out of range!"); return Vector[ID - 1]; @@ -87,21 +85,18 @@ template class UniqueVector { const_iterator end() const { return Vector.end(); } /// size - Returns the number of entries in the vector. - /// size_t size() const { return Vector.size(); } /// empty - Returns true if the vector is empty. - /// bool empty() const { return Vector.empty(); } /// reset - Clears all the entries. - /// void reset() { Map.clear(); Vector.resize(0, 0); } }; -} // End of namespace llvm +} // end namespace llvm #endif // LLVM_ADT_UNIQUEVECTOR_H diff --git a/contrib/llvm/include/llvm/Analysis/ProfileSummaryInfo.h b/contrib/llvm/include/llvm/Analysis/ProfileSummaryInfo.h index c5f97083af4d..6aaabe1d1889 100644 --- a/contrib/llvm/include/llvm/Analysis/ProfileSummaryInfo.h +++ b/contrib/llvm/include/llvm/Analysis/ProfileSummaryInfo.h @@ -55,6 +55,21 @@ class ProfileSummaryInfo { ProfileSummaryInfo(ProfileSummaryInfo &&Arg) : M(Arg.M), Summary(std::move(Arg.Summary)) {} + /// \brief Returns true if profile summary is available. + bool hasProfileSummary() { return computeSummary(); } + + /// \brief Returns true if module \c M has sample profile. + bool hasSampleProfile() { + return hasProfileSummary() && + Summary->getKind() == ProfileSummary::PSK_Sample; + } + + /// \brief Returns true if module \c M has instrumentation profile. + bool hasInstrumentationProfile() { + return hasProfileSummary() && + Summary->getKind() == ProfileSummary::PSK_Instr; + } + /// Handle the invalidation of this information. /// /// When used as a result of \c ProfileSummaryAnalysis this method will be diff --git a/contrib/llvm/include/llvm/DebugInfo/CodeView/CVTypeVisitor.h b/contrib/llvm/include/llvm/DebugInfo/CodeView/CVTypeVisitor.h index f3122f0bf7f0..6d9f345755ab 100644 --- a/contrib/llvm/include/llvm/DebugInfo/CodeView/CVTypeVisitor.h +++ b/contrib/llvm/include/llvm/DebugInfo/CodeView/CVTypeVisitor.h @@ -28,7 +28,7 @@ class CVTypeVisitor { Error visitTypeRecord(CVType &Record, TypeIndex Index); Error visitTypeRecord(CVType &Record); - Error visitMemberRecord(CVMemberRecord &Record); + Error visitMemberRecord(CVMemberRecord Record); /// Visits the type records in Data. Sets the error flag on parse failures. Error visitTypeStream(const CVTypeArray &Types); @@ -47,6 +47,36 @@ class CVTypeVisitor { TinyPtrVector Handlers; }; +enum VisitorDataSource { + VDS_BytesPresent, // The record bytes are passed into the the visitation + // function. The algorithm should first deserialize them + // before passing them on through the pipeline. + VDS_BytesExternal // The record bytes are not present, and it is the + // responsibility of the visitor callback interface to + // supply the bytes. +}; + +Error visitTypeRecord(CVType &Record, TypeIndex Index, + TypeVisitorCallbacks &Callbacks, + VisitorDataSource Source = VDS_BytesPresent, + TypeServerHandler *TS = nullptr); +Error visitTypeRecord(CVType &Record, TypeVisitorCallbacks &Callbacks, + VisitorDataSource Source = VDS_BytesPresent, + TypeServerHandler *TS = nullptr); + +Error visitMemberRecord(CVMemberRecord Record, TypeVisitorCallbacks &Callbacks, + VisitorDataSource Source = VDS_BytesPresent); +Error visitMemberRecord(TypeLeafKind Kind, ArrayRef Record, + TypeVisitorCallbacks &Callbacks); + +Error visitMemberRecordStream(ArrayRef FieldList, + TypeVisitorCallbacks &Callbacks); + +Error visitTypeStream(const CVTypeArray &Types, TypeVisitorCallbacks &Callbacks, + TypeServerHandler *TS = nullptr); +Error visitTypeStream(CVTypeRange Types, TypeVisitorCallbacks &Callbacks, + TypeServerHandler *TS = nullptr); + } // end namespace codeview } // end namespace llvm diff --git a/contrib/llvm/include/llvm/DebugInfo/CodeView/RandomAccessTypeVisitor.h b/contrib/llvm/include/llvm/DebugInfo/CodeView/RandomAccessTypeVisitor.h index 35a8010f1163..21288df89be2 100644 --- a/contrib/llvm/include/llvm/DebugInfo/CodeView/RandomAccessTypeVisitor.h +++ b/contrib/llvm/include/llvm/DebugInfo/CodeView/RandomAccessTypeVisitor.h @@ -11,13 +11,10 @@ #define LLVM_DEBUGINFO_CODEVIEW_RANDOMACCESSTYPEVISITOR_H #include "llvm/ADT/TinyPtrVector.h" -#include "llvm/DebugInfo/CodeView/CVTypeVisitor.h" #include "llvm/DebugInfo/CodeView/TypeDatabase.h" #include "llvm/DebugInfo/CodeView/TypeDatabaseVisitor.h" -#include "llvm/DebugInfo/CodeView/TypeDeserializer.h" #include "llvm/DebugInfo/CodeView/TypeIndex.h" #include "llvm/DebugInfo/CodeView/TypeRecord.h" -#include "llvm/DebugInfo/CodeView/TypeVisitorCallbackPipeline.h" #include "llvm/Support/Error.h" namespace llvm { @@ -73,18 +70,6 @@ class RandomAccessTypeVisitor { /// The database visitor which adds new records to the database. TypeDatabaseVisitor DatabaseVisitor; - /// The deserializer which deserializes new records. - TypeDeserializer Deserializer; - - /// The visitation callback pipeline to use. By default this contains a - /// deserializer and a type database visitor. But the callback specified - /// in the constructor is also added. - TypeVisitorCallbackPipeline Pipeline; - - /// The visitor used to visit the internal pipeline for deserialization and - /// database maintenance. - CVTypeVisitor InternalVisitor; - /// A vector mapping type indices to type offset. For every record that has /// been visited, contains the absolute offset of that record in the record /// array. diff --git a/contrib/llvm/include/llvm/DebugInfo/DWARF/DWARFAttribute.h b/contrib/llvm/include/llvm/DebugInfo/DWARF/DWARFAttribute.h index 5919aaddea40..c3953b62d780 100644 --- a/contrib/llvm/include/llvm/DebugInfo/DWARF/DWARFAttribute.h +++ b/contrib/llvm/include/llvm/DebugInfo/DWARF/DWARFAttribute.h @@ -31,10 +31,10 @@ struct DWARFAttribute { dwarf::Attribute Attr; /// The form and value for this attribute. DWARFFormValue Value; - + DWARFAttribute(uint32_t O, dwarf::Attribute A = dwarf::Attribute(0), dwarf::Form F = dwarf::Form(0)) : Attr(A), Value(F) {} - + bool isValid() const { return Offset != 0 && Attr != dwarf::Attribute(0); } diff --git a/contrib/llvm/include/llvm/DebugInfo/DWARF/DWARFDebugArangeSet.h b/contrib/llvm/include/llvm/DebugInfo/DWARF/DWARFDebugArangeSet.h index 40eb7e9a8836..2d82104ea098 100644 --- a/contrib/llvm/include/llvm/DebugInfo/DWARF/DWARFDebugArangeSet.h +++ b/contrib/llvm/include/llvm/DebugInfo/DWARF/DWARFDebugArangeSet.h @@ -22,19 +22,19 @@ class raw_ostream; class DWARFDebugArangeSet { public: struct Header { - // The total length of the entries for that set, not including the length - // field itself. + /// The total length of the entries for that set, not including the length + /// field itself. uint32_t Length; - // The offset from the beginning of the .debug_info section of the - // compilation unit entry referenced by the table. + /// The offset from the beginning of the .debug_info section of the + /// compilation unit entry referenced by the table. uint32_t CuOffset; - // The DWARF version number. + /// The DWARF version number. uint16_t Version; - // The size in bytes of an address on the target architecture. For segmented - // addressing, this is the size of the offset portion of the address. + /// The size in bytes of an address on the target architecture. For segmented + /// addressing, this is the size of the offset portion of the address. uint8_t AddrSize; - // The size in bytes of a segment descriptor on the target architecture. - // If the target system uses a flat address space, this value is 0. + /// The size in bytes of a segment descriptor on the target architecture. + /// If the target system uses a flat address space, this value is 0. uint8_t SegSize; }; diff --git a/contrib/llvm/include/llvm/DebugInfo/DWARF/DWARFDebugAranges.h b/contrib/llvm/include/llvm/DebugInfo/DWARF/DWARFDebugAranges.h index c06771d6afb4..2237aa361d18 100644 --- a/contrib/llvm/include/llvm/DebugInfo/DWARF/DWARFDebugAranges.h +++ b/contrib/llvm/include/llvm/DebugInfo/DWARF/DWARFDebugAranges.h @@ -28,7 +28,7 @@ class DWARFDebugAranges { void clear(); void extract(DataExtractor DebugArangesData); - // Call appendRange multiple times and then call construct. + /// Call appendRange multiple times and then call construct. void appendRange(uint32_t CUOffset, uint64_t LowPC, uint64_t HighPC); void construct(); @@ -58,9 +58,9 @@ class DWARFDebugAranges { return LowPC < other.LowPC; } - uint64_t LowPC; // Start of address range. - uint32_t Length; // End of address range (not including this address). - uint32_t CUOffset; // Offset of the compile unit or die. + uint64_t LowPC; /// Start of address range. + uint32_t Length; /// End of address range (not including this address). + uint32_t CUOffset; /// Offset of the compile unit or die. }; struct RangeEndpoint { diff --git a/contrib/llvm/include/llvm/DebugInfo/DWARF/DWARFDebugRangeList.h b/contrib/llvm/include/llvm/DebugInfo/DWARF/DWARFDebugRangeList.h index 23a573b7a9fa..95ec1be62a79 100644 --- a/contrib/llvm/include/llvm/DebugInfo/DWARF/DWARFDebugRangeList.h +++ b/contrib/llvm/include/llvm/DebugInfo/DWARF/DWARFDebugRangeList.h @@ -33,31 +33,31 @@ typedef std::vector DWARFAddressRangesVector; class DWARFDebugRangeList { public: struct RangeListEntry { - // A beginning address offset. This address offset has the size of an - // address and is relative to the applicable base address of the - // compilation unit referencing this range list. It marks the beginning - // of an address range. + /// A beginning address offset. This address offset has the size of an + /// address and is relative to the applicable base address of the + /// compilation unit referencing this range list. It marks the beginning + /// of an address range. uint64_t StartAddress; - // An ending address offset. This address offset again has the size of - // an address and is relative to the applicable base address of the - // compilation unit referencing this range list. It marks the first - // address past the end of the address range. The ending address must - // be greater than or equal to the beginning address. + /// An ending address offset. This address offset again has the size of + /// an address and is relative to the applicable base address of the + /// compilation unit referencing this range list. It marks the first + /// address past the end of the address range. The ending address must + /// be greater than or equal to the beginning address. uint64_t EndAddress; - // The end of any given range list is marked by an end of list entry, - // which consists of a 0 for the beginning address offset - // and a 0 for the ending address offset. + /// The end of any given range list is marked by an end of list entry, + /// which consists of a 0 for the beginning address offset + /// and a 0 for the ending address offset. bool isEndOfListEntry() const { return (StartAddress == 0) && (EndAddress == 0); } - // A base address selection entry consists of: - // 1. The value of the largest representable address offset - // (for example, 0xffffffff when the size of an address is 32 bits). - // 2. An address, which defines the appropriate base address for - // use in interpreting the beginning and ending address offsets of - // subsequent entries of the location list. + /// A base address selection entry consists of: + /// 1. The value of the largest representable address offset + /// (for example, 0xffffffff when the size of an address is 32 bits). + /// 2. An address, which defines the appropriate base address for + /// use in interpreting the beginning and ending address offsets of + /// subsequent entries of the location list. bool isBaseAddressSelectionEntry(uint8_t AddressSize) const { assert(AddressSize == 4 || AddressSize == 8); if (AddressSize == 4) @@ -68,7 +68,7 @@ class DWARFDebugRangeList { }; private: - // Offset in .debug_ranges section. + /// Offset in .debug_ranges section. uint32_t Offset; uint8_t AddressSize; std::vector Entries; diff --git a/contrib/llvm/include/llvm/DebugInfo/DWARF/DWARFDie.h b/contrib/llvm/include/llvm/DebugInfo/DWARF/DWARFDie.h index ee06125ea278..ca94a90fabfc 100644 --- a/contrib/llvm/include/llvm/DebugInfo/DWARF/DWARFDie.h +++ b/contrib/llvm/include/llvm/DebugInfo/DWARF/DWARFDie.h @@ -24,10 +24,10 @@ #include namespace llvm { - + class DWARFUnit; class raw_ostream; - + //===----------------------------------------------------------------------===// /// Utility class that carries the DWARF compile/type unit and the debug info /// entry in an object. @@ -47,7 +47,7 @@ class DWARFDie { public: DWARFDie() = default; DWARFDie(DWARFUnit *Unit, const DWARFDebugInfoEntry * D) : U(Unit), Die(D) {} - + bool isValid() const { return U && Die; } explicit operator bool() const { return isValid(); } const DWARFDebugInfoEntry *getDebugInfoEntry() const { return Die; } @@ -68,7 +68,7 @@ class DWARFDie { assert(isValid() && "must check validity prior to calling"); return Die->getOffset(); } - + dwarf::Tag getTag() const { auto AbbrevDecl = getAbbreviationDeclarationPtr(); if (AbbrevDecl) @@ -80,7 +80,7 @@ class DWARFDie { assert(isValid() && "must check validity prior to calling"); return Die->hasChildren(); } - + /// Returns true for a valid DIE that terminates a sibling chain. bool isNULL() const { return getAbbreviationDeclarationPtr() == nullptr; @@ -97,13 +97,13 @@ class DWARFDie { /// \returns a valid DWARFDie instance if this object has a parent or an /// invalid DWARFDie instance if it doesn't. DWARFDie getParent() const; - + /// Get the sibling of this DIE object. /// /// \returns a valid DWARFDie instance if this object has a sibling or an /// invalid DWARFDie instance if it doesn't. DWARFDie getSibling() const; - + /// Get the first child of this DIE object. /// /// \returns a valid DWARFDie instance if this object has children or an @@ -113,7 +113,7 @@ class DWARFDie { return DWARFDie(U, Die + 1); return DWARFDie(); } - + /// Dump the DIE and all of its attributes to the supplied stream. /// /// \param OS the stream to use for output. @@ -121,7 +121,7 @@ class DWARFDie { /// children. /// \param indent the number of characters to indent each line that is output. void dump(raw_ostream &OS, unsigned recurseDepth, unsigned indent = 0) const; - + /// Extract the specified attribute from this DIE. /// /// Extract an attribute value from this DIE only. This call doesn't look @@ -132,7 +132,7 @@ class DWARFDie { /// \returns an optional DWARFFormValue that will have the form value if the /// attribute was successfully extracted. Optional find(dwarf::Attribute Attr) const; - + /// Extract the first value of any attribute in Attrs from this DIE. /// /// Extract the first attribute that matches from this DIE only. This call @@ -180,7 +180,7 @@ class DWARFDie { /// /// \returns anm optional absolute section offset value for the attribute. Optional getRangesBaseAttribute() const; - + /// Get the DW_AT_high_pc attribute value as an address. /// /// In DWARF version 4 and later the high PC can be encoded as an offset from @@ -196,7 +196,7 @@ class DWARFDie { /// Retrieves DW_AT_low_pc and DW_AT_high_pc from CU. /// Returns true if both attributes are present. bool getLowAndHighPC(uint64_t &LowPC, uint64_t &HighPC) const; - + /// Get the address ranges for this DIE. /// /// Get the hi/low PC range if both attributes are available or exrtracts the @@ -208,7 +208,7 @@ class DWARFDie { /// \returns a address range vector that might be empty if no address range /// information is available. DWARFAddressRangesVector getAddressRanges() const; - + /// Get all address ranges for any DW_TAG_subprogram DIEs in this DIE or any /// of its children. /// @@ -218,19 +218,19 @@ class DWARFDie { /// /// \param Ranges the addres range vector to fill in. void collectChildrenAddressRanges(DWARFAddressRangesVector &Ranges) const; - + bool addressRangeContainsAddress(const uint64_t Address) const; - + /// If a DIE represents a subprogram (or inlined subroutine), returns its /// mangled name (or short name, if mangled is missing). This name may be /// fetched from specification or abstract origin for this subprogram. /// Returns null if no name is found. const char *getSubroutineName(DINameKind Kind) const; - + /// Return the DIE name resolving DW_AT_sepcification or DW_AT_abstract_origin /// references if necessary. Returns null if no name is found. const char *getName(DINameKind Kind) const; - + /// Returns the declaration line (start line) for a DIE, assuming it specifies /// a subprogram. This may be fetched from specification or abstract origin /// for this subprogram by resolving DW_AT_sepcification or @@ -251,21 +251,21 @@ class DWARFDie { /// there is no DW_AT_GNU_discriminator attribute in this DIE. void getCallerFrame(uint32_t &CallFile, uint32_t &CallLine, uint32_t &CallColumn, uint32_t &CallDiscriminator) const; - + class attribute_iterator; /// Get an iterator range to all attributes in the current DIE only. /// /// \returns an iterator range for the attributes of the current DIE. iterator_range attributes() const; - + class iterator; - + iterator begin() const; iterator end() const; iterator_range children() const; }; - + class DWARFDie::attribute_iterator : public iterator_facade_base { @@ -275,7 +275,7 @@ class DWARFDie::attribute_iterator : DWARFAttribute AttrValue; /// The attribute index within the abbreviation declaration in Die. uint32_t Index; - + /// Update the attribute index and attempt to read the attribute value. If the /// attribute is able to be read, update AttrValue and the Index member /// variable. If the attribute value is not able to be read, an appropriate diff --git a/contrib/llvm/include/llvm/DebugInfo/DWARF/DWARFFormValue.h b/contrib/llvm/include/llvm/DebugInfo/DWARF/DWARFFormValue.h index f3516ebdecba..a30e0be9c3c3 100644 --- a/contrib/llvm/include/llvm/DebugInfo/DWARF/DWARFFormValue.h +++ b/contrib/llvm/include/llvm/DebugInfo/DWARF/DWARFFormValue.h @@ -49,9 +49,9 @@ class DWARFFormValue { const uint8_t *data = nullptr; }; - dwarf::Form Form; // Form for this value. - ValueType Value; // Contains all data for the form. - const DWARFUnit *U = nullptr; // Remember the DWARFUnit at extract time. + dwarf::Form Form; /// Form for this value. + ValueType Value; /// Contains all data for the form. + const DWARFUnit *U = nullptr; /// Remember the DWARFUnit at extract time. public: DWARFFormValue(dwarf::Form F = dwarf::Form(0)) : Form(F) {} @@ -72,11 +72,14 @@ class DWARFFormValue { const DWARFUnit *getUnit() const { return U; } void dump(raw_ostream &OS) const; - /// \brief extracts a value in data at offset *offset_ptr. + /// Extracts a value in \p Data at offset \p *OffsetPtr. /// /// The passed DWARFUnit is allowed to be nullptr, in which /// case no relocation processing will be performed and some /// kind of forms that depend on Unit information are disallowed. + /// \param Data The DataExtractor to use. + /// \param OffsetPtr The offset within DataExtractor where the data starts. + /// \param U The optional DWARFUnit supplying information for some forms. /// \returns whether the extraction succeeded. bool extractValue(const DataExtractor &Data, uint32_t *OffsetPtr, const DWARFUnit *U); diff --git a/contrib/llvm/include/llvm/DebugInfo/DWARF/DWARFGdbIndex.h b/contrib/llvm/include/llvm/DebugInfo/DWARF/DWARFGdbIndex.h index 7a52218663b9..8d1ac5c83c23 100644 --- a/contrib/llvm/include/llvm/DebugInfo/DWARF/DWARFGdbIndex.h +++ b/contrib/llvm/include/llvm/DebugInfo/DWARF/DWARFGdbIndex.h @@ -29,25 +29,25 @@ class DWARFGdbIndex { uint32_t ConstantPoolOffset; struct CompUnitEntry { - uint64_t Offset; // Offset of a CU in the .debug_info section. - uint64_t Length; // Length of that CU. + uint64_t Offset; /// Offset of a CU in the .debug_info section. + uint64_t Length; /// Length of that CU. }; SmallVector CuList; struct AddressEntry { - uint64_t LowAddress; // The low address. - uint64_t HighAddress; // The high address. - uint32_t CuIndex; // The CU index. + uint64_t LowAddress; /// The low address. + uint64_t HighAddress; /// The high address. + uint32_t CuIndex; /// The CU index. }; SmallVector AddressArea; struct SymTableEntry { - uint32_t NameOffset; // Offset of the symbol's name in the constant pool. - uint32_t VecOffset; // Offset of the CU vector in the constant pool. + uint32_t NameOffset; /// Offset of the symbol's name in the constant pool. + uint32_t VecOffset; /// Offset of the CU vector in the constant pool. }; SmallVector SymbolTable; - // Each value is CU index + attributes. + /// Each value is CU index + attributes. SmallVector>, 0> ConstantPoolVectors; diff --git a/contrib/llvm/include/llvm/DebugInfo/DWARF/DWARFRelocMap.h b/contrib/llvm/include/llvm/DebugInfo/DWARF/DWARFRelocMap.h index f1e03bb4c2e1..ec0397a0fb09 100644 --- a/contrib/llvm/include/llvm/DebugInfo/DWARF/DWARFRelocMap.h +++ b/contrib/llvm/include/llvm/DebugInfo/DWARF/DWARFRelocMap.h @@ -17,15 +17,14 @@ namespace llvm { struct RelocAddrEntry { - uint8_t Width; int64_t Value; }; -// In place of applying the relocations to the data we've read from disk we use -// a separate mapping table to the side and checking that at locations in the -// dwarf where we expect relocated values. This adds a bit of complexity to the -// dwarf parsing/extraction at the benefit of not allocating memory for the -// entire size of the debug info sections. +/// In place of applying the relocations to the data we've read from disk we use +/// a separate mapping table to the side and checking that at locations in the +/// dwarf where we expect relocated values. This adds a bit of complexity to the +/// dwarf parsing/extraction at the benefit of not allocating memory for the +/// entire size of the debug info sections. typedef DenseMap RelocAddrMap; } // end namespace llvm diff --git a/contrib/llvm/include/llvm/DebugInfo/DWARF/DWARFUnit.h b/contrib/llvm/include/llvm/DebugInfo/DWARF/DWARFUnit.h index 68e541bac73c..c15e27f36a8b 100644 --- a/contrib/llvm/include/llvm/DebugInfo/DWARF/DWARFUnit.h +++ b/contrib/llvm/include/llvm/DebugInfo/DWARF/DWARFUnit.h @@ -111,7 +111,7 @@ class DWARFUnitSection final : public SmallVector, 1>, class DWARFUnit { DWARFContext &Context; - // Section containing this DWARFUnit. + /// Section containing this DWARFUnit. const DWARFSection &InfoSection; const DWARFDebugAbbrev *Abbrev; @@ -133,12 +133,12 @@ class DWARFUnit { uint8_t UnitType; uint8_t AddrSize; uint64_t BaseAddr; - // The compile unit debug information entry items. + /// The compile unit debug information entry items. std::vector DieArray; - // Map from range's start address to end address and corresponding DIE. - // IntervalMap does not support range removal, as a result, we use the - // std::map::upper_bound for address range lookup. + /// Map from range's start address to end address and corresponding DIE. + /// IntervalMap does not support range removal, as a result, we use the + /// std::map::upper_bound for address range lookup. std::map> AddrDieMap; typedef iterator_range::iterator> die_iterator_range; @@ -189,7 +189,7 @@ class DWARFUnit { AddrOffsetSectionBase = Base; } - // Recursively update address to Die map. + /// Recursively update address to Die map. void updateAddressDieMap(DWARFDie Die); void setRangesSection(const DWARFSection *RS, uint32_t Base) { diff --git a/contrib/llvm/include/llvm/DebugInfo/PDB/Native/TpiStream.h b/contrib/llvm/include/llvm/DebugInfo/PDB/Native/TpiStream.h index 4579cbf4227b..c5549983ed43 100644 --- a/contrib/llvm/include/llvm/DebugInfo/PDB/Native/TpiStream.h +++ b/contrib/llvm/include/llvm/DebugInfo/PDB/Native/TpiStream.h @@ -51,6 +51,7 @@ class TpiStream { HashTable &getHashAdjusters(); codeview::CVTypeRange types(bool *HadError) const; + const codeview::CVTypeArray &typeArray() const { return TypeRecords; } Error commit(); diff --git a/contrib/llvm/include/llvm/IR/IntrinsicsPowerPC.td b/contrib/llvm/include/llvm/IR/IntrinsicsPowerPC.td index 64240a929782..6321bb81b8cb 100644 --- a/contrib/llvm/include/llvm/IR/IntrinsicsPowerPC.td +++ b/contrib/llvm/include/llvm/IR/IntrinsicsPowerPC.td @@ -1132,4 +1132,6 @@ def int_ppc_tsuspend : GCCBuiltin<"__builtin_tsuspend">, def int_ppc_ttest : GCCBuiltin<"__builtin_ttest">, Intrinsic<[llvm_i64_ty], [], []>; + +def int_ppc_cfence : Intrinsic<[], [llvm_anyint_ty], []>; } diff --git a/contrib/llvm/include/llvm/Target/GlobalISel/SelectionDAGCompat.td b/contrib/llvm/include/llvm/Target/GlobalISel/SelectionDAGCompat.td index a06c67fe814c..071ec2edb538 100644 --- a/contrib/llvm/include/llvm/Target/GlobalISel/SelectionDAGCompat.td +++ b/contrib/llvm/include/llvm/Target/GlobalISel/SelectionDAGCompat.td @@ -62,6 +62,7 @@ def : GINodeEquiv; def : GINodeEquiv; def : GINodeEquiv; def : GINodeEquiv; +def : GINodeEquiv; def : GINodeEquiv; // Specifies the GlobalISel equivalents for SelectionDAG's ComplexPattern. diff --git a/contrib/llvm/lib/Analysis/DependenceAnalysis.cpp b/contrib/llvm/lib/Analysis/DependenceAnalysis.cpp index a4672efeedd6..e4d58bf1b4eb 100644 --- a/contrib/llvm/lib/Analysis/DependenceAnalysis.cpp +++ b/contrib/llvm/lib/Analysis/DependenceAnalysis.cpp @@ -2984,7 +2984,7 @@ bool DependenceInfo::propagate(const SCEV *&Src, const SCEV *&Dst, SmallVectorImpl &Constraints, bool &Consistent) { bool Result = false; - for (int LI = Loops.find_first(); LI >= 0; LI = Loops.find_next(LI)) { + for (unsigned LI : Loops.set_bits()) { DEBUG(dbgs() << "\t Constraint[" << LI << "] is"); DEBUG(Constraints[LI].dump(dbgs())); if (Constraints[LI].isDistance()) @@ -3266,7 +3266,7 @@ bool DependenceInfo::tryDelinearize(Instruction *Src, Instruction *Dst, // For debugging purposes, dump a small bit vector to dbgs(). static void dumpSmallBitVector(SmallBitVector &BV) { dbgs() << "{"; - for (int VI = BV.find_first(); VI >= 0; VI = BV.find_next(VI)) { + for (unsigned VI : BV.set_bits()) { dbgs() << VI; if (BV.find_next(VI) >= 0) dbgs() << ' '; @@ -3506,7 +3506,7 @@ DependenceInfo::depends(Instruction *Src, Instruction *Dst, NewConstraint.setAny(SE); // test separable subscripts - for (int SI = Separable.find_first(); SI >= 0; SI = Separable.find_next(SI)) { + for (unsigned SI : Separable.set_bits()) { DEBUG(dbgs() << "testing subscript " << SI); switch (Pair[SI].Classification) { case Subscript::ZIV: @@ -3545,14 +3545,14 @@ DependenceInfo::depends(Instruction *Src, Instruction *Dst, SmallVector Constraints(MaxLevels + 1); for (unsigned II = 0; II <= MaxLevels; ++II) Constraints[II].setAny(SE); - for (int SI = Coupled.find_first(); SI >= 0; SI = Coupled.find_next(SI)) { + for (unsigned SI : Coupled.set_bits()) { DEBUG(dbgs() << "testing subscript group " << SI << " { "); SmallBitVector Group(Pair[SI].Group); SmallBitVector Sivs(Pairs); SmallBitVector Mivs(Pairs); SmallBitVector ConstrainedLevels(MaxLevels + 1); SmallVector PairsInGroup; - for (int SJ = Group.find_first(); SJ >= 0; SJ = Group.find_next(SJ)) { + for (unsigned SJ : Group.set_bits()) { DEBUG(dbgs() << SJ << " "); if (Pair[SJ].Classification == Subscript::SIV) Sivs.set(SJ); @@ -3564,7 +3564,7 @@ DependenceInfo::depends(Instruction *Src, Instruction *Dst, DEBUG(dbgs() << "}\n"); while (Sivs.any()) { bool Changed = false; - for (int SJ = Sivs.find_first(); SJ >= 0; SJ = Sivs.find_next(SJ)) { + for (unsigned SJ : Sivs.set_bits()) { DEBUG(dbgs() << "testing subscript " << SJ << ", SIV\n"); // SJ is an SIV subscript that's part of the current coupled group unsigned Level; @@ -3588,7 +3588,7 @@ DependenceInfo::depends(Instruction *Src, Instruction *Dst, DEBUG(dbgs() << " propagating\n"); DEBUG(dbgs() << "\tMivs = "); DEBUG(dumpSmallBitVector(Mivs)); - for (int SJ = Mivs.find_first(); SJ >= 0; SJ = Mivs.find_next(SJ)) { + for (unsigned SJ : Mivs.set_bits()) { // SJ is an MIV subscript that's part of the current coupled group DEBUG(dbgs() << "\tSJ = " << SJ << "\n"); if (propagate(Pair[SJ].Src, Pair[SJ].Dst, Pair[SJ].Loops, @@ -3622,7 +3622,7 @@ DependenceInfo::depends(Instruction *Src, Instruction *Dst, } // test & propagate remaining RDIVs - for (int SJ = Mivs.find_first(); SJ >= 0; SJ = Mivs.find_next(SJ)) { + for (unsigned SJ : Mivs.set_bits()) { if (Pair[SJ].Classification == Subscript::RDIV) { DEBUG(dbgs() << "RDIV test\n"); if (testRDIV(Pair[SJ].Src, Pair[SJ].Dst, Result)) @@ -3635,7 +3635,7 @@ DependenceInfo::depends(Instruction *Src, Instruction *Dst, // test remaining MIVs // This code is temporary. // Better to somehow test all remaining subscripts simultaneously. - for (int SJ = Mivs.find_first(); SJ >= 0; SJ = Mivs.find_next(SJ)) { + for (unsigned SJ : Mivs.set_bits()) { if (Pair[SJ].Classification == Subscript::MIV) { DEBUG(dbgs() << "MIV test\n"); if (testMIV(Pair[SJ].Src, Pair[SJ].Dst, Pair[SJ].Loops, Result)) @@ -3647,9 +3647,8 @@ DependenceInfo::depends(Instruction *Src, Instruction *Dst, // update Result.DV from constraint vector DEBUG(dbgs() << " updating\n"); - for (int SJ = ConstrainedLevels.find_first(); SJ >= 0; - SJ = ConstrainedLevels.find_next(SJ)) { - if (SJ > (int)CommonLevels) + for (unsigned SJ : ConstrainedLevels.set_bits()) { + if (SJ > CommonLevels) break; updateDirection(Result.DV[SJ - 1], Constraints[SJ]); if (Result.DV[SJ - 1].Direction == Dependence::DVEntry::NONE) @@ -3859,7 +3858,7 @@ const SCEV *DependenceInfo::getSplitIteration(const Dependence &Dep, NewConstraint.setAny(SE); // test separable subscripts - for (int SI = Separable.find_first(); SI >= 0; SI = Separable.find_next(SI)) { + for (unsigned SI : Separable.set_bits()) { switch (Pair[SI].Classification) { case Subscript::SIV: { unsigned Level; @@ -3886,12 +3885,12 @@ const SCEV *DependenceInfo::getSplitIteration(const Dependence &Dep, SmallVector Constraints(MaxLevels + 1); for (unsigned II = 0; II <= MaxLevels; ++II) Constraints[II].setAny(SE); - for (int SI = Coupled.find_first(); SI >= 0; SI = Coupled.find_next(SI)) { + for (unsigned SI : Coupled.set_bits()) { SmallBitVector Group(Pair[SI].Group); SmallBitVector Sivs(Pairs); SmallBitVector Mivs(Pairs); SmallBitVector ConstrainedLevels(MaxLevels + 1); - for (int SJ = Group.find_first(); SJ >= 0; SJ = Group.find_next(SJ)) { + for (unsigned SJ : Group.set_bits()) { if (Pair[SJ].Classification == Subscript::SIV) Sivs.set(SJ); else @@ -3899,7 +3898,7 @@ const SCEV *DependenceInfo::getSplitIteration(const Dependence &Dep, } while (Sivs.any()) { bool Changed = false; - for (int SJ = Sivs.find_first(); SJ >= 0; SJ = Sivs.find_next(SJ)) { + for (unsigned SJ : Sivs.set_bits()) { // SJ is an SIV subscript that's part of the current coupled group unsigned Level; const SCEV *SplitIter = nullptr; @@ -3914,7 +3913,7 @@ const SCEV *DependenceInfo::getSplitIteration(const Dependence &Dep, } if (Changed) { // propagate, possibly creating new SIVs and ZIVs - for (int SJ = Mivs.find_first(); SJ >= 0; SJ = Mivs.find_next(SJ)) { + for (unsigned SJ : Mivs.set_bits()) { // SJ is an MIV subscript that's part of the current coupled group if (propagate(Pair[SJ].Src, Pair[SJ].Dst, Pair[SJ].Loops, Constraints, Result.Consistent)) { diff --git a/contrib/llvm/lib/Analysis/InlineCost.cpp b/contrib/llvm/lib/Analysis/InlineCost.cpp index 44c14cb17c22..4702569126c6 100644 --- a/contrib/llvm/lib/Analysis/InlineCost.cpp +++ b/contrib/llvm/lib/Analysis/InlineCost.cpp @@ -669,21 +669,33 @@ void CallAnalyzer::updateThreshold(CallSite CS, Function &Callee) { Threshold = MaxIfValid(Threshold, Params.HintThreshold); if (PSI) { BlockFrequencyInfo *CallerBFI = GetBFI ? &((*GetBFI)(*Caller)) : nullptr; - if (PSI->isHotCallSite(CS, CallerBFI)) { - DEBUG(dbgs() << "Hot callsite.\n"); - Threshold = Params.HotCallSiteThreshold.getValue(); - } else if (PSI->isFunctionEntryHot(&Callee)) { - DEBUG(dbgs() << "Hot callee.\n"); - // If callsite hotness can not be determined, we may still know - // that the callee is hot and treat it as a weaker hint for threshold - // increase. - Threshold = MaxIfValid(Threshold, Params.HintThreshold); - } else if (PSI->isColdCallSite(CS, CallerBFI)) { - DEBUG(dbgs() << "Cold callsite.\n"); - Threshold = MinIfValid(Threshold, Params.ColdCallSiteThreshold); - } else if (PSI->isFunctionEntryCold(&Callee)) { - DEBUG(dbgs() << "Cold callee.\n"); - Threshold = MinIfValid(Threshold, Params.ColdThreshold); + // FIXME: After switching to the new passmanager, simplify the logic below + // by checking only the callsite hotness/coldness. The check for CallerBFI + // exists only because we do not have BFI available with the old PM. + // + // Use callee's hotness information only if we have no way of determining + // callsite's hotness information. Callsite hotness can be determined if + // sample profile is used (which adds hotness metadata to calls) or if + // caller's BlockFrequencyInfo is available. + if (CallerBFI || PSI->hasSampleProfile()) { + if (PSI->isHotCallSite(CS, CallerBFI)) { + DEBUG(dbgs() << "Hot callsite.\n"); + Threshold = Params.HotCallSiteThreshold.getValue(); + } else if (PSI->isColdCallSite(CS, CallerBFI)) { + DEBUG(dbgs() << "Cold callsite.\n"); + Threshold = MinIfValid(Threshold, Params.ColdCallSiteThreshold); + } + } else { + if (PSI->isFunctionEntryHot(&Callee)) { + DEBUG(dbgs() << "Hot callee.\n"); + // If callsite hotness can not be determined, we may still know + // that the callee is hot and treat it as a weaker hint for threshold + // increase. + Threshold = MaxIfValid(Threshold, Params.HintThreshold); + } else if (PSI->isFunctionEntryCold(&Callee)) { + DEBUG(dbgs() << "Cold callee.\n"); + Threshold = MinIfValid(Threshold, Params.ColdThreshold); + } } } } diff --git a/contrib/llvm/lib/Analysis/InstructionSimplify.cpp b/contrib/llvm/lib/Analysis/InstructionSimplify.cpp index 5728887cc1e9..5652248a60ce 100644 --- a/contrib/llvm/lib/Analysis/InstructionSimplify.cpp +++ b/contrib/llvm/lib/Analysis/InstructionSimplify.cpp @@ -1752,6 +1752,24 @@ static Value *SimplifyAndInst(Value *Op0, Value *Op1, const SimplifyQuery &Q, (A == Op0 || B == Op0)) return Op0; + // A mask that only clears known zeros of a shifted value is a no-op. + Value *X; + const APInt *Mask; + const APInt *ShAmt; + if (match(Op1, m_APInt(Mask))) { + // If all bits in the inverted and shifted mask are clear: + // and (shl X, ShAmt), Mask --> shl X, ShAmt + if (match(Op0, m_Shl(m_Value(X), m_APInt(ShAmt))) && + (~(*Mask)).lshr(*ShAmt).isNullValue()) + return Op0; + + // If all bits in the inverted and shifted mask are clear: + // and (lshr X, ShAmt), Mask --> lshr X, ShAmt + if (match(Op0, m_LShr(m_Value(X), m_APInt(ShAmt))) && + (~(*Mask)).shl(*ShAmt).isNullValue()) + return Op0; + } + // A & (-A) = A if A is a power of two or zero. if (match(Op0, m_Neg(m_Specific(Op1))) || match(Op1, m_Neg(m_Specific(Op0)))) { diff --git a/contrib/llvm/lib/Analysis/ProfileSummaryInfo.cpp b/contrib/llvm/lib/Analysis/ProfileSummaryInfo.cpp index 502f4205b689..12b86daa602b 100644 --- a/contrib/llvm/lib/Analysis/ProfileSummaryInfo.cpp +++ b/contrib/llvm/lib/Analysis/ProfileSummaryInfo.cpp @@ -75,7 +75,7 @@ ProfileSummaryInfo::getProfileCount(const Instruction *Inst, return None; assert((isa(Inst) || isa(Inst)) && "We can only get profile count for call/invoke instruction."); - if (computeSummary() && Summary->getKind() == ProfileSummary::PSK_Sample) { + if (hasSampleProfile()) { // In sample PGO mode, check if there is a profile metadata on the // instruction. If it is present, determine hotness solely based on that, // since the sampled entry count may not be accurate. diff --git a/contrib/llvm/lib/Analysis/ScalarEvolution.cpp b/contrib/llvm/lib/Analysis/ScalarEvolution.cpp index 800354d2f5b4..a746ddfd7a63 100644 --- a/contrib/llvm/lib/Analysis/ScalarEvolution.cpp +++ b/contrib/llvm/lib/Analysis/ScalarEvolution.cpp @@ -629,19 +629,19 @@ static int CompareSCEVComplexity( const SCEVAddRecExpr *LA = cast(LHS); const SCEVAddRecExpr *RA = cast(RHS); - // If there is a dominance relationship between the loops, sort by the - // dominance. Otherwise, sort by depth. We require such order in getAddExpr. + // There is always a dominance between two recs that are used by one SCEV, + // so we can safely sort recs by loop header dominance. We require such + // order in getAddExpr. const Loop *LLoop = LA->getLoop(), *RLoop = RA->getLoop(); if (LLoop != RLoop) { const BasicBlock *LHead = LLoop->getHeader(), *RHead = RLoop->getHeader(); assert(LHead != RHead && "Two loops share the same header?"); if (DT.dominates(LHead, RHead)) return 1; - else if (DT.dominates(RHead, LHead)) - return -1; - unsigned LDepth = LLoop->getLoopDepth(), RDepth = RLoop->getLoopDepth(); - if (LDepth != RDepth) - return (int)LDepth - (int)RDepth; + else + assert(DT.dominates(RHead, LHead) && + "No dominance between recurrences used by one SCEV?"); + return -1; } // Addrec complexity grows with operand count. @@ -2512,22 +2512,23 @@ const SCEV *ScalarEvolution::getAddExpr(SmallVectorImpl &Ops, SmallVector AddRecOps(AddRec->op_begin(), AddRec->op_end()); for (; OtherIdx != Ops.size() && isa(Ops[OtherIdx]); - ++OtherIdx) - if (const auto *OtherAddRec = dyn_cast(Ops[OtherIdx])) - if (OtherAddRec->getLoop() == AddRecLoop) { - for (unsigned i = 0, e = OtherAddRec->getNumOperands(); - i != e; ++i) { - if (i >= AddRecOps.size()) { - AddRecOps.append(OtherAddRec->op_begin()+i, - OtherAddRec->op_end()); - break; - } - SmallVector TwoOps = { - AddRecOps[i], OtherAddRec->getOperand(i)}; - AddRecOps[i] = getAddExpr(TwoOps, SCEV::FlagAnyWrap, Depth + 1); + ++OtherIdx) { + const auto *OtherAddRec = cast(Ops[OtherIdx]); + if (OtherAddRec->getLoop() == AddRecLoop) { + for (unsigned i = 0, e = OtherAddRec->getNumOperands(); + i != e; ++i) { + if (i >= AddRecOps.size()) { + AddRecOps.append(OtherAddRec->op_begin()+i, + OtherAddRec->op_end()); + break; } - Ops.erase(Ops.begin() + OtherIdx); --OtherIdx; + SmallVector TwoOps = { + AddRecOps[i], OtherAddRec->getOperand(i)}; + AddRecOps[i] = getAddExpr(TwoOps, SCEV::FlagAnyWrap, Depth + 1); } + Ops.erase(Ops.begin() + OtherIdx); --OtherIdx; + } + } // Step size has changed, so we cannot guarantee no self-wraparound. Ops[Idx] = getAddRecExpr(AddRecOps, AddRecLoop, SCEV::FlagAnyWrap); return getAddExpr(Ops, SCEV::FlagAnyWrap, Depth + 1); diff --git a/contrib/llvm/lib/CodeGen/AggressiveAntiDepBreaker.cpp b/contrib/llvm/lib/CodeGen/AggressiveAntiDepBreaker.cpp index 3a57772cc7f5..43b245c66400 100644 --- a/contrib/llvm/lib/CodeGen/AggressiveAntiDepBreaker.cpp +++ b/contrib/llvm/lib/CodeGen/AggressiveAntiDepBreaker.cpp @@ -128,8 +128,7 @@ AggressiveAntiDepBreaker::AggressiveAntiDepBreaker( } DEBUG(dbgs() << "AntiDep Critical-Path Registers:"); - DEBUG(for (int r = CriticalPathSet.find_first(); r != -1; - r = CriticalPathSet.find_next(r)) + DEBUG(for (unsigned r : CriticalPathSet.set_bits()) dbgs() << " " << TRI->getName(r)); DEBUG(dbgs() << '\n'); } @@ -571,7 +570,7 @@ bool AggressiveAntiDepBreaker::FindSuitableFreeRegisters( DEBUG({ dbgs() << " ::"; - for (int r = BV.find_first(); r != -1; r = BV.find_next(r)) + for (unsigned r : BV.set_bits()) dbgs() << " " << TRI->getName(r); dbgs() << "\n"; }); diff --git a/contrib/llvm/lib/CodeGen/AsmPrinter/CodeViewDebug.cpp b/contrib/llvm/lib/CodeGen/AsmPrinter/CodeViewDebug.cpp index 98163bffb60b..7d945690e9c3 100644 --- a/contrib/llvm/lib/CodeGen/AsmPrinter/CodeViewDebug.cpp +++ b/contrib/llvm/lib/CodeGen/AsmPrinter/CodeViewDebug.cpp @@ -501,7 +501,7 @@ void CodeViewDebug::emitTypeInformation() { Error E = Reader.readArray(Types, Reader.getLength()); if (!E) { TypeVisitorCallbacks C; - E = CVTypeVisitor(C).visitTypeStream(Types); + E = codeview::visitTypeStream(Types, C); } if (E) { logAllUnhandledErrors(std::move(E), errs(), "error: "); diff --git a/contrib/llvm/lib/CodeGen/AsmPrinter/DbgValueHistoryCalculator.cpp b/contrib/llvm/lib/CodeGen/AsmPrinter/DbgValueHistoryCalculator.cpp index 22fd7bb46056..20e1467b30c3 100644 --- a/contrib/llvm/lib/CodeGen/AsmPrinter/DbgValueHistoryCalculator.cpp +++ b/contrib/llvm/lib/CodeGen/AsmPrinter/DbgValueHistoryCalculator.cpp @@ -209,8 +209,7 @@ void llvm::calculateDbgValueHistory(const MachineFunction *MF, } else if (MO.isRegMask()) { // If this is a register mask operand, clobber all debug values in // non-CSRs. - for (int I = ChangingRegs.find_first(); I != -1; - I = ChangingRegs.find_next(I)) { + for (unsigned I : ChangingRegs.set_bits()) { // Don't consider SP to be clobbered by register masks. if (unsigned(I) != SP && TRI->isPhysicalRegister(I) && MO.clobbersPhysReg(I)) { diff --git a/contrib/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp b/contrib/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp index 811858f136eb..77dfb13ac1f2 100644 --- a/contrib/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp +++ b/contrib/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp @@ -1129,6 +1129,11 @@ void IRTranslator::finalizeFunction() { ValToVReg.clear(); FrameIndices.clear(); MachinePreds.clear(); + // MachineIRBuilder::DebugLoc can outlive the DILocation it holds. Clear it + // to avoid accessing free’d memory (in runOnMachineFunction) and to avoid + // destroying it twice (in ~IRTranslator() and ~LLVMContext()) + EntryBuilder = MachineIRBuilder(); + CurBuilder = MachineIRBuilder(); } bool IRTranslator::runOnMachineFunction(MachineFunction &CurMF) { diff --git a/contrib/llvm/lib/CodeGen/MachineVerifier.cpp b/contrib/llvm/lib/CodeGen/MachineVerifier.cpp index ab433273b189..b53b002f55a6 100644 --- a/contrib/llvm/lib/CodeGen/MachineVerifier.cpp +++ b/contrib/llvm/lib/CodeGen/MachineVerifier.cpp @@ -760,7 +760,7 @@ MachineVerifier::visitMachineBasicBlockBefore(const MachineBasicBlock *MBB) { const MachineFrameInfo &MFI = MF->getFrameInfo(); BitVector PR = MFI.getPristineRegs(*MF); - for (int I = PR.find_first(); I>0; I = PR.find_next(I)) { + for (unsigned I : PR.set_bits()) { for (MCSubRegIterator SubRegs(I, TRI, /*IncludeSelf=*/true); SubRegs.isValid(); ++SubRegs) regsLive.insert(*SubRegs); diff --git a/contrib/llvm/lib/CodeGen/RegAllocGreedy.cpp b/contrib/llvm/lib/CodeGen/RegAllocGreedy.cpp index 06500289c971..47d726f6da7a 100644 --- a/contrib/llvm/lib/CodeGen/RegAllocGreedy.cpp +++ b/contrib/llvm/lib/CodeGen/RegAllocGreedy.cpp @@ -285,8 +285,7 @@ class RAGreedy : public MachineFunctionPass, // Set B[i] = C for every live bundle where B[i] was NoCand. unsigned getBundles(SmallVectorImpl &B, unsigned C) { unsigned Count = 0; - for (int i = LiveBundles.find_first(); i >= 0; - i = LiveBundles.find_next(i)) + for (unsigned i : LiveBundles.set_bits()) if (B[i] == NoCand) { B[i] = C; Count++; @@ -1162,9 +1161,8 @@ bool RAGreedy::calcCompactRegion(GlobalSplitCandidate &Cand) { } DEBUG({ - for (int i = Cand.LiveBundles.find_first(); i>=0; - i = Cand.LiveBundles.find_next(i)) - dbgs() << " EB#" << i; + for (int i : Cand.LiveBundles.set_bits()) + dbgs() << " EB#" << i; dbgs() << ".\n"; }); return true; @@ -1482,8 +1480,7 @@ unsigned RAGreedy::calculateRegionSplitCost(LiveInterval &VirtReg, DEBUG({ dbgs() << ", total = "; MBFI->printBlockFreq(dbgs(), Cost) << " with bundles"; - for (int i = Cand.LiveBundles.find_first(); i>=0; - i = Cand.LiveBundles.find_next(i)) + for (int i : Cand.LiveBundles.set_bits()) dbgs() << " EB#" << i; dbgs() << ".\n"; }); diff --git a/contrib/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/contrib/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp index caf5cb497a71..0ccee175abfb 100644 --- a/contrib/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp +++ b/contrib/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp @@ -13087,14 +13087,28 @@ SDValue DAGCombiner::visitSTORE(SDNode *N) { } } - // If this is a store followed by a store with the same value to the same - // location, then the store is dead/noop. if (StoreSDNode *ST1 = dyn_cast(Chain)) { - if (ST1->getBasePtr() == Ptr && ST->getMemoryVT() == ST1->getMemoryVT() && - ST1->getValue() == Value && ST->isUnindexed() && !ST->isVolatile() && - ST1->isUnindexed() && !ST1->isVolatile()) { - // The store is dead, remove it. - return Chain; + if (ST->isUnindexed() && !ST->isVolatile() && ST1->isUnindexed() && + !ST1->isVolatile() && ST1->getBasePtr() == Ptr && + ST->getMemoryVT() == ST1->getMemoryVT()) { + // If this is a store followed by a store with the same value to the same + // location, then the store is dead/noop. + if (ST1->getValue() == Value) { + // The store is dead, remove it. + return Chain; + } + + // If this is a store who's preceeding store to the same location + // and no one other node is chained to that store we can effectively + // drop the store. Do not remove stores to undef as they may be used as + // data sinks. + if (OptLevel != CodeGenOpt::None && ST1->hasOneUse() && + !ST1->getBasePtr().isUndef()) { + // ST1 is fully overwritten and can be elided. Combine with it's chain + // value. + CombineTo(ST1, ST1->getChain()); + return SDValue(); + } } } diff --git a/contrib/llvm/lib/CodeGen/SpillPlacement.cpp b/contrib/llvm/lib/CodeGen/SpillPlacement.cpp index f10c98ef4e50..43cbf4add0f8 100644 --- a/contrib/llvm/lib/CodeGen/SpillPlacement.cpp +++ b/contrib/llvm/lib/CodeGen/SpillPlacement.cpp @@ -310,7 +310,7 @@ void SpillPlacement::addLinks(ArrayRef Links) { bool SpillPlacement::scanActiveBundles() { RecentPositive.clear(); - for (int n = ActiveNodes->find_first(); n>=0; n = ActiveNodes->find_next(n)) { + for (unsigned n : ActiveNodes->set_bits()) { update(n); // A node that must spill, or a node without any links is not going to // change its value ever again, so exclude it from iterations. @@ -365,7 +365,7 @@ SpillPlacement::finish() { // Write preferences back to ActiveNodes. bool Perfect = true; - for (int n = ActiveNodes->find_first(); n>=0; n = ActiveNodes->find_next(n)) + for (unsigned n : ActiveNodes->set_bits()) if (!nodes[n].preferReg()) { ActiveNodes->reset(n); Perfect = false; diff --git a/contrib/llvm/lib/CodeGen/StackColoring.cpp b/contrib/llvm/lib/CodeGen/StackColoring.cpp index f51d959a089a..86a16187fcb6 100644 --- a/contrib/llvm/lib/CodeGen/StackColoring.cpp +++ b/contrib/llvm/lib/CodeGen/StackColoring.cpp @@ -703,12 +703,10 @@ void StackColoring::calculateLiveIntervals(unsigned NumSlots) { // Create the interval of the blocks that we previously found to be 'alive'. BlockLifetimeInfo &MBBLiveness = BlockLiveness[&MBB]; - for (int pos = MBBLiveness.LiveIn.find_first(); pos != -1; - pos = MBBLiveness.LiveIn.find_next(pos)) { + for (unsigned pos : MBBLiveness.LiveIn.set_bits()) { Starts[pos] = Indexes->getMBBStartIdx(&MBB); } - for (int pos = MBBLiveness.LiveOut.find_first(); pos != -1; - pos = MBBLiveness.LiveOut.find_next(pos)) { + for (unsigned pos : MBBLiveness.LiveOut.set_bits()) { Finishes[pos] = Indexes->getMBBEndIdx(&MBB); } diff --git a/contrib/llvm/lib/CodeGen/TargetLoweringBase.cpp b/contrib/llvm/lib/CodeGen/TargetLoweringBase.cpp index 39aa946fa840..5f63fd4320bb 100644 --- a/contrib/llvm/lib/CodeGen/TargetLoweringBase.cpp +++ b/contrib/llvm/lib/CodeGen/TargetLoweringBase.cpp @@ -1312,7 +1312,7 @@ TargetLoweringBase::findRepresentativeClass(const TargetRegisterInfo *TRI, // Find the first legal register class with the largest spill size. const TargetRegisterClass *BestRC = RC; - for (int i = SuperRegRC.find_first(); i >= 0; i = SuperRegRC.find_next(i)) { + for (unsigned i : SuperRegRC.set_bits()) { const TargetRegisterClass *SuperRC = TRI->getRegClass(i); // We want the largest possible spill size. if (TRI->getSpillSize(*SuperRC) <= TRI->getSpillSize(*BestRC)) diff --git a/contrib/llvm/lib/CodeGen/TargetPassConfig.cpp b/contrib/llvm/lib/CodeGen/TargetPassConfig.cpp index e6c5d8753b83..9724cb074584 100644 --- a/contrib/llvm/lib/CodeGen/TargetPassConfig.cpp +++ b/contrib/llvm/lib/CodeGen/TargetPassConfig.cpp @@ -564,6 +564,14 @@ void TargetPassConfig::addISelPrepare() { addPass(createVerifierPass()); } +/// -regalloc=... command line option. +static FunctionPass *useDefaultRegisterAllocator() { return nullptr; } +static cl::opt > +RegAlloc("regalloc", + cl::init(&useDefaultRegisterAllocator), + cl::desc("Register allocator to use")); + /// Add the complete set of target-independent postISel code generator passes. /// /// This can be read as the standard order of major LLVM CodeGen stages. Stages @@ -625,8 +633,12 @@ void TargetPassConfig::addMachinePasses() { // including phi elimination and scheduling. if (getOptimizeRegAlloc()) addOptimizedRegAlloc(createRegAllocPass(true)); - else + else { + if (RegAlloc != &useDefaultRegisterAllocator && + RegAlloc != &createFastRegisterAllocator) + report_fatal_error("Must use fast (default) register allocator for unoptimized regalloc."); addFastRegAlloc(createRegAllocPass(false)); + } // Run post-ra passes. addPostRegAlloc(); @@ -759,19 +771,12 @@ MachinePassRegistry RegisterRegAlloc::Registry; /// A dummy default pass factory indicates whether the register allocator is /// overridden on the command line. static llvm::once_flag InitializeDefaultRegisterAllocatorFlag; -static FunctionPass *useDefaultRegisterAllocator() { return nullptr; } + static RegisterRegAlloc defaultRegAlloc("default", "pick register allocator based on -O option", useDefaultRegisterAllocator); -/// -regalloc=... command line option. -static cl::opt > -RegAlloc("regalloc", - cl::init(&useDefaultRegisterAllocator), - cl::desc("Register allocator to use")); - static void initializeDefaultRegisterAllocatorOnce() { RegisterRegAlloc::FunctionPassCtor Ctor = RegisterRegAlloc::getDefault(); @@ -781,7 +786,6 @@ static void initializeDefaultRegisterAllocatorOnce() { } } - /// Instantiate the default register allocator pass for this target for either /// the optimized or unoptimized allocation path. This will be added to the pass /// manager by addFastRegAlloc in the unoptimized case or addOptimizedRegAlloc diff --git a/contrib/llvm/lib/CodeGen/TargetRegisterInfo.cpp b/contrib/llvm/lib/CodeGen/TargetRegisterInfo.cpp index f6e4c17d514c..41ec082a24cf 100644 --- a/contrib/llvm/lib/CodeGen/TargetRegisterInfo.cpp +++ b/contrib/llvm/lib/CodeGen/TargetRegisterInfo.cpp @@ -50,8 +50,7 @@ bool TargetRegisterInfo::checkAllSuperRegsMarked(const BitVector &RegisterSet, ArrayRef Exceptions) const { // Check that all super registers of reserved regs are reserved as well. BitVector Checked(getNumRegs()); - for (int Reg = RegisterSet.find_first(); Reg>=0; - Reg = RegisterSet.find_next(Reg)) { + for (unsigned Reg : RegisterSet.set_bits()) { if (Checked[Reg]) continue; for (MCSuperRegIterator SR(Reg, this); SR.isValid(); ++SR) { diff --git a/contrib/llvm/lib/DebugInfo/CodeView/CVTypeDumper.cpp b/contrib/llvm/lib/DebugInfo/CodeView/CVTypeDumper.cpp index bcc8218d9446..02e1682f76e7 100644 --- a/contrib/llvm/lib/DebugInfo/CodeView/CVTypeDumper.cpp +++ b/contrib/llvm/lib/DebugInfo/CodeView/CVTypeDumper.cpp @@ -11,7 +11,6 @@ #include "llvm/DebugInfo/CodeView/CVTypeVisitor.h" #include "llvm/DebugInfo/CodeView/TypeDatabase.h" #include "llvm/DebugInfo/CodeView/TypeDatabaseVisitor.h" -#include "llvm/DebugInfo/CodeView/TypeDeserializer.h" #include "llvm/DebugInfo/CodeView/TypeRecord.h" #include "llvm/DebugInfo/CodeView/TypeVisitorCallbackPipeline.h" #include "llvm/Support/BinaryByteStream.h" @@ -21,38 +20,23 @@ using namespace llvm::codeview; Error CVTypeDumper::dump(const CVType &Record, TypeVisitorCallbacks &Dumper) { TypeDatabaseVisitor DBV(TypeDB); - TypeDeserializer Deserializer; TypeVisitorCallbackPipeline Pipeline; - Pipeline.addCallbackToPipeline(Deserializer); Pipeline.addCallbackToPipeline(DBV); Pipeline.addCallbackToPipeline(Dumper); - CVTypeVisitor Visitor(Pipeline); - if (Handler) - Visitor.addTypeServerHandler(*Handler); - CVType RecordCopy = Record; - if (auto EC = Visitor.visitTypeRecord(RecordCopy)) - return EC; - return Error::success(); + return codeview::visitTypeRecord(RecordCopy, Pipeline, VDS_BytesPresent, + Handler); } Error CVTypeDumper::dump(const CVTypeArray &Types, TypeVisitorCallbacks &Dumper) { TypeDatabaseVisitor DBV(TypeDB); - TypeDeserializer Deserializer; TypeVisitorCallbackPipeline Pipeline; - Pipeline.addCallbackToPipeline(Deserializer); Pipeline.addCallbackToPipeline(DBV); Pipeline.addCallbackToPipeline(Dumper); - CVTypeVisitor Visitor(Pipeline); - if (Handler) - Visitor.addTypeServerHandler(*Handler); - - if (auto EC = Visitor.visitTypeStream(Types)) - return EC; - return Error::success(); + return codeview::visitTypeStream(Types, Pipeline, Handler); } Error CVTypeDumper::dump(ArrayRef Data, TypeVisitorCallbacks &Dumper) { diff --git a/contrib/llvm/lib/DebugInfo/CodeView/CVTypeVisitor.cpp b/contrib/llvm/lib/DebugInfo/CodeView/CVTypeVisitor.cpp index b6ed0453d9c4..0f7f5f667790 100644 --- a/contrib/llvm/lib/DebugInfo/CodeView/CVTypeVisitor.cpp +++ b/contrib/llvm/lib/DebugInfo/CodeView/CVTypeVisitor.cpp @@ -59,13 +59,8 @@ static Expected deserializeTypeServerRecord(CVType &Record) { }; TypeServer2Record R(TypeRecordKind::TypeServer2); - TypeDeserializer Deserializer; StealTypeServerVisitor Thief(R); - TypeVisitorCallbackPipeline Pipeline; - Pipeline.addCallbackToPipeline(Deserializer); - Pipeline.addCallbackToPipeline(Thief); - CVTypeVisitor Visitor(Pipeline); - if (auto EC = Visitor.visitTypeRecord(Record)) + if (auto EC = visitTypeRecord(Record, Thief)) return std::move(EC); return R; @@ -178,7 +173,7 @@ static Error visitMemberRecord(CVMemberRecord &Record, return Error::success(); } -Error CVTypeVisitor::visitMemberRecord(CVMemberRecord &Record) { +Error CVTypeVisitor::visitMemberRecord(CVMemberRecord Record) { return ::visitMemberRecord(Record, Callbacks); } @@ -224,3 +219,93 @@ Error CVTypeVisitor::visitFieldListMemberStream(ArrayRef Data) { BinaryStreamReader SR(S); return visitFieldListMemberStream(SR); } + +namespace { +struct FieldListVisitHelper { + FieldListVisitHelper(TypeVisitorCallbacks &Callbacks, ArrayRef Data, + VisitorDataSource Source) + : Stream(Data, llvm::support::little), Reader(Stream), + Deserializer(Reader), + Visitor((Source == VDS_BytesPresent) ? Pipeline : Callbacks) { + if (Source == VDS_BytesPresent) { + Pipeline.addCallbackToPipeline(Deserializer); + Pipeline.addCallbackToPipeline(Callbacks); + } + } + + BinaryByteStream Stream; + BinaryStreamReader Reader; + FieldListDeserializer Deserializer; + TypeVisitorCallbackPipeline Pipeline; + CVTypeVisitor Visitor; +}; + +struct VisitHelper { + VisitHelper(TypeVisitorCallbacks &Callbacks, VisitorDataSource Source, + TypeServerHandler *TS) + : Visitor((Source == VDS_BytesPresent) ? Pipeline : Callbacks) { + if (TS) + Visitor.addTypeServerHandler(*TS); + if (Source == VDS_BytesPresent) { + Pipeline.addCallbackToPipeline(Deserializer); + Pipeline.addCallbackToPipeline(Callbacks); + } + } + + TypeDeserializer Deserializer; + TypeVisitorCallbackPipeline Pipeline; + CVTypeVisitor Visitor; +}; +} + +Error llvm::codeview::visitTypeRecord(CVType &Record, TypeIndex Index, + TypeVisitorCallbacks &Callbacks, + VisitorDataSource Source, + TypeServerHandler *TS) { + VisitHelper Helper(Callbacks, Source, TS); + return Helper.Visitor.visitTypeRecord(Record, Index); +} + +Error llvm::codeview::visitTypeRecord(CVType &Record, + TypeVisitorCallbacks &Callbacks, + VisitorDataSource Source, + TypeServerHandler *TS) { + VisitHelper Helper(Callbacks, Source, TS); + return Helper.Visitor.visitTypeRecord(Record); +} + +Error llvm::codeview::visitMemberRecordStream(ArrayRef FieldList, + TypeVisitorCallbacks &Callbacks) { + CVTypeVisitor Visitor(Callbacks); + return Visitor.visitFieldListMemberStream(FieldList); +} + +Error llvm::codeview::visitMemberRecord(CVMemberRecord Record, + TypeVisitorCallbacks &Callbacks, + VisitorDataSource Source) { + FieldListVisitHelper Helper(Callbacks, Record.Data, Source); + return Helper.Visitor.visitMemberRecord(Record); +} + +Error llvm::codeview::visitMemberRecord(TypeLeafKind Kind, + ArrayRef Record, + TypeVisitorCallbacks &Callbacks) { + CVMemberRecord R; + R.Data = Record; + R.Kind = Kind; + return visitMemberRecord(R, Callbacks, VDS_BytesPresent); +} + +Error llvm::codeview::visitTypeStream(const CVTypeArray &Types, + TypeVisitorCallbacks &Callbacks, + TypeServerHandler *TS) { + VisitHelper Helper(Callbacks, VDS_BytesPresent, TS); + return Helper.Visitor.visitTypeStream(Types); +} + +Error llvm::codeview::visitTypeStream(CVTypeRange Types, + TypeVisitorCallbacks &Callbacks, + TypeServerHandler *TS) { + VisitHelper Helper(Callbacks, VDS_BytesPresent, TS); + return Helper.Visitor.visitTypeStream(Types); +} diff --git a/contrib/llvm/lib/DebugInfo/CodeView/RandomAccessTypeVisitor.cpp b/contrib/llvm/lib/DebugInfo/CodeView/RandomAccessTypeVisitor.cpp index 4cb9acbe07d9..704d1131108a 100644 --- a/contrib/llvm/lib/DebugInfo/CodeView/RandomAccessTypeVisitor.cpp +++ b/contrib/llvm/lib/DebugInfo/CodeView/RandomAccessTypeVisitor.cpp @@ -9,6 +9,7 @@ #include "llvm/DebugInfo/CodeView/RandomAccessTypeVisitor.h" +#include "llvm/DebugInfo/CodeView/CVTypeVisitor.h" #include "llvm/DebugInfo/CodeView/TypeDatabase.h" #include "llvm/DebugInfo/CodeView/TypeServerHandler.h" #include "llvm/DebugInfo/CodeView/TypeVisitorCallbacks.h" @@ -20,9 +21,7 @@ RandomAccessTypeVisitor::RandomAccessTypeVisitor( const CVTypeArray &Types, uint32_t NumRecords, PartialOffsetArray PartialOffsets) : Database(NumRecords), Types(Types), DatabaseVisitor(Database), - InternalVisitor(Pipeline), PartialOffsets(PartialOffsets) { - Pipeline.addCallbackToPipeline(Deserializer); - Pipeline.addCallbackToPipeline(DatabaseVisitor); + PartialOffsets(PartialOffsets) { KnownOffsets.resize(Database.capacity()); } @@ -38,8 +37,7 @@ Error RandomAccessTypeVisitor::visitTypeIndex(TypeIndex TI, assert(Database.contains(TI)); auto &Record = Database.getTypeRecord(TI); - CVTypeVisitor V(Callbacks); - return V.visitTypeRecord(Record, TI); + return codeview::visitTypeRecord(Record, TI, Callbacks); } Error RandomAccessTypeVisitor::visitRangeForType(TypeIndex TI) { @@ -78,7 +76,7 @@ Error RandomAccessTypeVisitor::visitRange(TypeIndex Begin, uint32_t BeginOffset, while (Begin != End) { assert(!Database.contains(Begin)); - if (auto EC = InternalVisitor.visitTypeRecord(*RI, Begin)) + if (auto EC = codeview::visitTypeRecord(*RI, Begin, DatabaseVisitor)) return EC; KnownOffsets[Begin.toArrayIndex()] = BeginOffset; diff --git a/contrib/llvm/lib/DebugInfo/CodeView/TypeDumpVisitor.cpp b/contrib/llvm/lib/DebugInfo/CodeView/TypeDumpVisitor.cpp index 27a6e0987886..9485c9cfedff 100644 --- a/contrib/llvm/lib/DebugInfo/CodeView/TypeDumpVisitor.cpp +++ b/contrib/llvm/lib/DebugInfo/CodeView/TypeDumpVisitor.cpp @@ -216,8 +216,7 @@ Error TypeDumpVisitor::visitMemberEnd(CVMemberRecord &Record) { Error TypeDumpVisitor::visitKnownRecord(CVType &CVR, FieldListRecord &FieldList) { - CVTypeVisitor Visitor(*this); - if (auto EC = Visitor.visitFieldListMemberStream(FieldList.Data)) + if (auto EC = codeview::visitMemberRecordStream(FieldList.Data, *this)) return EC; return Error::success(); diff --git a/contrib/llvm/lib/DebugInfo/CodeView/TypeStreamMerger.cpp b/contrib/llvm/lib/DebugInfo/CodeView/TypeStreamMerger.cpp index aad20ae6dda1..51f24fa3f135 100644 --- a/contrib/llvm/lib/DebugInfo/CodeView/TypeStreamMerger.cpp +++ b/contrib/llvm/lib/DebugInfo/CodeView/TypeStreamMerger.cpp @@ -361,8 +361,7 @@ Error TypeStreamMerger::visitKnownRecord(CVType &, FieldListRecord &R) { // Visit the members inside the field list. HadUntranslatedMember = false; FieldListBuilder.begin(); - CVTypeVisitor Visitor(*this); - if (auto EC = Visitor.visitFieldListMemberStream(R.Data)) + if (auto EC = codeview::visitMemberRecordStream(R.Data, *this)) return EC; // Write the record if we translated all field list members. @@ -440,18 +439,9 @@ Error TypeStreamMerger::visitUnknownType(CVType &Rec) { Error TypeStreamMerger::mergeStream(const CVTypeArray &Types) { assert(IndexMap.empty()); - TypeVisitorCallbackPipeline Pipeline; LastError = Error::success(); - TypeDeserializer Deserializer; - Pipeline.addCallbackToPipeline(Deserializer); - Pipeline.addCallbackToPipeline(*this); - - CVTypeVisitor Visitor(Pipeline); - if (Handler) - Visitor.addTypeServerHandler(*Handler); - - if (auto EC = Visitor.visitTypeStream(Types)) + if (auto EC = codeview::visitTypeStream(Types, *this, Handler)) return EC; // If we found bad indices but no other errors, try doing another pass and see @@ -466,7 +456,8 @@ Error TypeStreamMerger::mergeStream(const CVTypeArray &Types) { IsSecondPass = true; NumBadIndices = 0; CurIndex = TypeIndex(TypeIndex::FirstNonSimpleIndex); - if (auto EC = Visitor.visitTypeStream(Types)) + + if (auto EC = codeview::visitTypeStream(Types, *this, Handler)) return EC; assert(NumBadIndices <= BadIndicesRemaining && diff --git a/contrib/llvm/lib/DebugInfo/DWARF/DWARFContext.cpp b/contrib/llvm/lib/DebugInfo/DWARF/DWARFContext.cpp index 59a060d143ff..61e75a2b56ab 100644 --- a/contrib/llvm/lib/DebugInfo/DWARF/DWARFContext.cpp +++ b/contrib/llvm/lib/DebugInfo/DWARF/DWARFContext.cpp @@ -1086,49 +1086,32 @@ DWARFContextInMemory::DWARFContextInMemory(const object::ObjectFile &Obj, continue; } + if (Section.relocation_begin() == Section.relocation_end()) + continue; + std::map AddrCache; - if (Section.relocation_begin() != Section.relocation_end()) { - uint64_t SectionSize = RelocatedSection->getSize(); - for (const RelocationRef &Reloc : Section.relocations()) { - // FIXME: it's not clear how to correctly handle scattered - // relocations. - if (isRelocScattered(Obj, Reloc)) - continue; + for (const RelocationRef &Reloc : Section.relocations()) { + // FIXME: it's not clear how to correctly handle scattered + // relocations. + if (isRelocScattered(Obj, Reloc)) + continue; - Expected SymAddrOrErr = - getSymbolAddress(Obj, Reloc, L, AddrCache); - if (!SymAddrOrErr) { - errs() << toString(SymAddrOrErr.takeError()) << '\n'; - continue; - } - - object::RelocVisitor V(Obj); - object::RelocToApply R(V.visit(Reloc.getType(), Reloc, *SymAddrOrErr)); - if (V.error()) { - SmallString<32> Name; - Reloc.getTypeName(Name); - errs() << "error: failed to compute relocation: " - << Name << "\n"; - continue; - } - uint64_t Address = Reloc.getOffset(); - if (Address + R.Width > SectionSize) { - errs() << "error: " << R.Width << "-byte relocation starting " - << Address << " bytes into section " << name << " which is " - << SectionSize << " bytes long.\n"; - continue; - } - if (R.Width > 8) { - errs() << "error: can't handle a relocation of more than 8 bytes at " - "a time.\n"; - continue; - } - DEBUG(dbgs() << "Writing " << format("%p", R.Value) - << " at " << format("%p", Address) - << " with width " << format("%d", R.Width) - << "\n"); - Map->insert({Address, {(uint8_t)R.Width, R.Value}}); + Expected SymAddrOrErr = + getSymbolAddress(Obj, Reloc, L, AddrCache); + if (!SymAddrOrErr) { + errs() << toString(SymAddrOrErr.takeError()) << '\n'; + continue; } + + object::RelocVisitor V(Obj); + object::RelocToApply R(V.visit(Reloc.getType(), Reloc, *SymAddrOrErr)); + if (V.error()) { + SmallString<32> Name; + Reloc.getTypeName(Name); + errs() << "error: failed to compute relocation: " << Name << "\n"; + continue; + } + Map->insert({Reloc.getOffset(), {R.Value}}); } } } diff --git a/contrib/llvm/lib/DebugInfo/PDB/Native/PDBTypeServerHandler.cpp b/contrib/llvm/lib/DebugInfo/PDB/Native/PDBTypeServerHandler.cpp index 629f3e80b0ed..cb783cf4fea7 100644 --- a/contrib/llvm/lib/DebugInfo/PDB/Native/PDBTypeServerHandler.cpp +++ b/contrib/llvm/lib/DebugInfo/PDB/Native/PDBTypeServerHandler.cpp @@ -55,9 +55,8 @@ PDBTypeServerHandler::handleInternal(PDBFile &File, auto ExpectedTpi = File.getPDBTpiStream(); if (!ExpectedTpi) return ExpectedTpi.takeError(); - CVTypeVisitor Visitor(Callbacks); - if (auto EC = Visitor.visitTypeStream(ExpectedTpi->types(nullptr))) + if (auto EC = codeview::visitTypeStream(ExpectedTpi->typeArray(), Callbacks)) return std::move(EC); return true; diff --git a/contrib/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyld.cpp b/contrib/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyld.cpp index e9a4b71c903d..ab86e5d6a0fd 100644 --- a/contrib/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyld.cpp +++ b/contrib/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyld.cpp @@ -705,7 +705,7 @@ RuntimeDyldImpl::emitSection(const ObjectFile &Obj, unsigned Alignment = (unsigned)Alignment64 & 0xffffffffL; unsigned PaddingSize = 0; unsigned StubBufSize = 0; - bool IsRequired = isRequiredForExecution(Section) || ProcessAllSections; + bool IsRequired = isRequiredForExecution(Section); bool IsVirtual = Section.isVirtual(); bool IsZeroInit = isZeroInit(Section); bool IsReadOnly = isReadOnlyData(Section); @@ -745,8 +745,8 @@ RuntimeDyldImpl::emitSection(const ObjectFile &Obj, Alignment = std::max(Alignment, getStubAlignment()); // Some sections, such as debug info, don't need to be loaded for execution. - // Leave those where they are. - if (IsRequired) { + // Process those only if explicitly requested. + if (IsRequired || ProcessAllSections) { Allocate = DataSize + PaddingSize + StubBufSize; if (!Allocate) Allocate = 1; @@ -790,6 +790,10 @@ RuntimeDyldImpl::emitSection(const ObjectFile &Obj, Sections.push_back( SectionEntry(Name, Addr, DataSize, Allocate, (uintptr_t)pData)); + // Debug info sections are linked as if their load address was zero + if (!IsRequired) + Sections.back().setLoadAddress(0); + if (Checker) Checker->registerSection(Obj.getFileName(), SectionID); diff --git a/contrib/llvm/lib/Support/CrashRecoveryContext.cpp b/contrib/llvm/lib/Support/CrashRecoveryContext.cpp index 98865f5e065e..bd38dd88201f 100644 --- a/contrib/llvm/lib/Support/CrashRecoveryContext.cpp +++ b/contrib/llvm/lib/Support/CrashRecoveryContext.cpp @@ -78,6 +78,9 @@ static bool gCrashRecoveryEnabled = false; static ManagedStatic> tlIsRecoveringFromCrash; +static void installExceptionOrSignalHandlers(); +static void uninstallExceptionOrSignalHandlers(); + CrashRecoveryContextCleanup::~CrashRecoveryContextCleanup() {} CrashRecoveryContext::~CrashRecoveryContext() { @@ -113,6 +116,23 @@ CrashRecoveryContext *CrashRecoveryContext::GetCurrent() { return CRCI->CRC; } +void CrashRecoveryContext::Enable() { + sys::ScopedLock L(*gCrashRecoveryContextMutex); + // FIXME: Shouldn't this be a refcount or something? + if (gCrashRecoveryEnabled) + return; + gCrashRecoveryEnabled = true; + installExceptionOrSignalHandlers(); +} + +void CrashRecoveryContext::Disable() { + sys::ScopedLock L(*gCrashRecoveryContextMutex); + if (!gCrashRecoveryEnabled) + return; + gCrashRecoveryEnabled = false; + uninstallExceptionOrSignalHandlers(); +} + void CrashRecoveryContext::registerCleanup(CrashRecoveryContextCleanup *cleanup) { if (!cleanup) @@ -140,30 +160,70 @@ CrashRecoveryContext::unregisterCleanup(CrashRecoveryContextCleanup *cleanup) { delete cleanup; } -#ifdef LLVM_ON_WIN32 +#if defined(_MSC_VER) +// If _MSC_VER is defined, we must have SEH. Use it if it's available. It's way +// better than VEH. Vectored exception handling catches all exceptions happening +// on the thread with installed exception handlers, so it can interfere with +// internal exception handling of other libraries on that thread. SEH works +// exactly as you would expect normal exception handling to work: it only +// catches exceptions if they would bubble out from the stack frame with __try / +// __except. -#include "Windows/WindowsSupport.h" +static void installExceptionOrSignalHandlers() {} +static void uninstallExceptionOrSignalHandlers() {} -// On Windows, we can make use of vectored exception handling to -// catch most crashing situations. Note that this does mean -// we will be alerted of exceptions *before* structured exception -// handling has the opportunity to catch it. But that isn't likely -// to cause problems because nowhere in the project is SEH being -// used. +bool CrashRecoveryContext::RunSafely(function_ref Fn) { + if (!gCrashRecoveryEnabled) { + Fn(); + return true; + } + + bool Result = true; + __try { + Fn(); + } __except (1) { // Catch any exception. + Result = false; + } + return Result; +} + +#else // !_MSC_VER + +#if defined(LLVM_ON_WIN32) +// This is a non-MSVC compiler, probably mingw gcc or clang without +// -fms-extensions. Use vectored exception handling (VEH). // -// Vectored exception handling is built on top of SEH, and so it -// works on a per-thread basis. +// On Windows, we can make use of vectored exception handling to catch most +// crashing situations. Note that this does mean we will be alerted of +// exceptions *before* structured exception handling has the opportunity to +// catch it. Unfortunately, this causes problems in practice with other code +// running on threads with LLVM crash recovery contexts, so we would like to +// eventually move away from VEH. +// +// Vectored works on a per-thread basis, which is an advantage over +// SetUnhandledExceptionFilter. SetUnhandledExceptionFilter also doesn't have +// any native support for chaining exception handlers, but VEH allows more than +// one. // // The vectored exception handler functionality was added in Windows // XP, so if support for older versions of Windows is required, // it will have to be added. -// -// If we want to support as far back as Win2k, we could use the -// SetUnhandledExceptionFilter API, but there's a risk of that -// being entirely overwritten (it's not a chain). + +#include "Windows/WindowsSupport.h" static LONG CALLBACK ExceptionHandler(PEXCEPTION_POINTERS ExceptionInfo) { + // DBG_PRINTEXCEPTION_WIDE_C is not properly defined on all supported + // compilers and platforms, so we define it manually. + constexpr ULONG DbgPrintExceptionWideC = 0x4001000AL; + switch (ExceptionInfo->ExceptionRecord->ExceptionCode) + { + case DBG_PRINTEXCEPTION_C: + case DbgPrintExceptionWideC: + case 0x406D1388: // set debugger thread name + return EXCEPTION_CONTINUE_EXECUTION; + } + // Lookup the current thread local recovery object. const CrashRecoveryContextImpl *CRCI = CurrentContext->get(); @@ -192,14 +252,7 @@ static LONG CALLBACK ExceptionHandler(PEXCEPTION_POINTERS ExceptionInfo) // non-NULL, valid VEH handles, or NULL. static sys::ThreadLocal sCurrentExceptionHandle; -void CrashRecoveryContext::Enable() { - sys::ScopedLock L(*gCrashRecoveryContextMutex); - - if (gCrashRecoveryEnabled) - return; - - gCrashRecoveryEnabled = true; - +static void installExceptionOrSignalHandlers() { // We can set up vectored exception handling now. We will install our // handler as the front of the list, though there's no assurances that // it will remain at the front (another call could install itself before @@ -208,14 +261,7 @@ void CrashRecoveryContext::Enable() { sCurrentExceptionHandle.set(handle); } -void CrashRecoveryContext::Disable() { - sys::ScopedLock L(*gCrashRecoveryContextMutex); - - if (!gCrashRecoveryEnabled) - return; - - gCrashRecoveryEnabled = false; - +static void uninstallExceptionOrSignalHandlers() { PVOID currentHandle = const_cast(sCurrentExceptionHandle.get()); if (currentHandle) { // Now we can remove the vectored exception handler from the chain @@ -226,7 +272,7 @@ void CrashRecoveryContext::Disable() { } } -#else +#else // !LLVM_ON_WIN32 // Generic POSIX implementation. // @@ -278,14 +324,7 @@ static void CrashRecoverySignalHandler(int Signal) { const_cast(CRCI)->HandleCrash(); } -void CrashRecoveryContext::Enable() { - sys::ScopedLock L(*gCrashRecoveryContextMutex); - - if (gCrashRecoveryEnabled) - return; - - gCrashRecoveryEnabled = true; - +static void installExceptionOrSignalHandlers() { // Setup the signal handler. struct sigaction Handler; Handler.sa_handler = CrashRecoverySignalHandler; @@ -297,20 +336,13 @@ void CrashRecoveryContext::Enable() { } } -void CrashRecoveryContext::Disable() { - sys::ScopedLock L(*gCrashRecoveryContextMutex); - - if (!gCrashRecoveryEnabled) - return; - - gCrashRecoveryEnabled = false; - +static void uninstallExceptionOrSignalHandlers() { // Restore the previous signal handlers. for (unsigned i = 0; i != NumSignals; ++i) sigaction(Signals[i], &PrevActions[i], nullptr); } -#endif +#endif // !LLVM_ON_WIN32 bool CrashRecoveryContext::RunSafely(function_ref Fn) { // If crash recovery is disabled, do nothing. @@ -328,6 +360,8 @@ bool CrashRecoveryContext::RunSafely(function_ref Fn) { return true; } +#endif // !_MSC_VER + void CrashRecoveryContext::HandleCrash() { CrashRecoveryContextImpl *CRCI = (CrashRecoveryContextImpl *) Impl; assert(CRCI && "Crash recovery context never initialized!"); diff --git a/contrib/llvm/lib/Support/Unix/Path.inc b/contrib/llvm/lib/Support/Unix/Path.inc index cdea09be41e0..fa28ba1b6ab6 100644 --- a/contrib/llvm/lib/Support/Unix/Path.inc +++ b/contrib/llvm/lib/Support/Unix/Path.inc @@ -103,16 +103,13 @@ #define STATVFS_F_FLAG(vfs) (vfs).f_flags #endif -#if defined(__FreeBSD__) || defined(__NetBSD__) -#include -#endif - using namespace llvm; namespace llvm { namespace sys { namespace fs { -#if defined(__Bitrig__) || defined(__OpenBSD__) || defined(__minix) || \ +#if defined(__FreeBSD__) || defined (__NetBSD__) || defined(__Bitrig__) || \ + defined(__OpenBSD__) || defined(__minix) || defined(__FreeBSD_kernel__) || \ defined(__linux__) || defined(__CYGWIN__) || defined(__DragonFly__) || \ defined(_AIX) static int @@ -167,7 +164,7 @@ getprogpath(char ret[PATH_MAX], const char *bin) free(pv); return nullptr; } -#endif // Bitrig || OpenBSD || minix || linux || CYGWIN || DragonFly || AIX +#endif // __FreeBSD__ || __NetBSD__ || __FreeBSD_kernel__ /// GetMainExecutable - Return the path to the main executable, given the /// value of argv[0] from program startup. @@ -183,24 +180,9 @@ std::string getMainExecutable(const char *argv0, void *MainAddr) { if (realpath(exe_path, link_path)) return link_path; } -#elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__) || defined(__NetBSD__) - int mib[4]; - mib[0] = CTL_KERN; -#if defined(__FreeBSD__) || defined(__FreeBSD_kernel__) - mib[1] = KERN_PROC; - mib[2] = KERN_PROC_PATHNAME; - mib[3] = -1; -#else - mib[1] = KERN_PROC_ARGS; - mib[2] = -1; - mib[3] = KERN_PROC_PATHNAME; -#endif - char exe_path[PATH_MAX]; - size_t cb = sizeof(exe_path); - if (sysctl(mib, 4, exe_path, &cb, NULL, 0) == 0) - return exe_path; -#elif defined(__Bitrig__) || defined(__OpenBSD__) || defined(__minix) || \ - defined(__DragonFly__) || defined(_AIX) +#elif defined(__FreeBSD__) || defined (__NetBSD__) || defined(__Bitrig__) || \ + defined(__OpenBSD__) || defined(__minix) || defined(__DragonFly__) || \ + defined(__FreeBSD_kernel__) || defined(_AIX) char exe_path[PATH_MAX]; if (getprogpath(exe_path, argv0) != NULL) diff --git a/contrib/llvm/lib/Target/AArch64/AArch64FrameLowering.cpp b/contrib/llvm/lib/Target/AArch64/AArch64FrameLowering.cpp index dc916c034661..1aec602a2a36 100644 --- a/contrib/llvm/lib/Target/AArch64/AArch64FrameLowering.cpp +++ b/contrib/llvm/lib/Target/AArch64/AArch64FrameLowering.cpp @@ -1158,8 +1158,7 @@ void AArch64FrameLowering::determineCalleeSaves(MachineFunction &MF, } DEBUG(dbgs() << "*** determineCalleeSaves\nUsed CSRs:"; - for (int Reg = SavedRegs.find_first(); Reg != -1; - Reg = SavedRegs.find_next(Reg)) + for (unsigned Reg : SavedRegs.set_bits()) dbgs() << ' ' << PrintReg(Reg, RegInfo); dbgs() << "\n";); diff --git a/contrib/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/contrib/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp index 4f7c2e122390..1af36086ad90 100644 --- a/contrib/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp +++ b/contrib/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp @@ -553,7 +553,6 @@ AArch64TargetLowering::AArch64TargetLowering(const TargetMachine &TM, setTargetDAGCombine(ISD::INTRINSIC_VOID); setTargetDAGCombine(ISD::INTRINSIC_W_CHAIN); setTargetDAGCombine(ISD::INSERT_VECTOR_ELT); - setTargetDAGCombine(ISD::EXTRACT_VECTOR_ELT); MaxStoresPerMemset = MaxStoresPerMemsetOptSize = 8; MaxStoresPerMemcpy = MaxStoresPerMemcpyOptSize = 4; @@ -659,6 +658,19 @@ AArch64TargetLowering::AArch64TargetLowering(const TargetMachine &TM, setOperationAction(ISD::MUL, MVT::v4i32, Custom); setOperationAction(ISD::MUL, MVT::v2i64, Custom); + // Vector reductions + for (MVT VT : MVT::integer_valuetypes()) { + setOperationAction(ISD::VECREDUCE_ADD, VT, Custom); + setOperationAction(ISD::VECREDUCE_SMAX, VT, Custom); + setOperationAction(ISD::VECREDUCE_SMIN, VT, Custom); + setOperationAction(ISD::VECREDUCE_UMAX, VT, Custom); + setOperationAction(ISD::VECREDUCE_UMIN, VT, Custom); + } + for (MVT VT : MVT::fp_valuetypes()) { + setOperationAction(ISD::VECREDUCE_FMAX, VT, Custom); + setOperationAction(ISD::VECREDUCE_FMIN, VT, Custom); + } + setOperationAction(ISD::ANY_EXTEND, MVT::v4i32, Legal); setTruncStoreAction(MVT::v2i32, MVT::v2i16, Expand); // Likewise, narrowing and extending vector loads/stores aren't handled @@ -2606,6 +2618,14 @@ SDValue AArch64TargetLowering::LowerOperation(SDValue Op, return LowerMUL(Op, DAG); case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG); + case ISD::VECREDUCE_ADD: + case ISD::VECREDUCE_SMAX: + case ISD::VECREDUCE_SMIN: + case ISD::VECREDUCE_UMAX: + case ISD::VECREDUCE_UMIN: + case ISD::VECREDUCE_FMAX: + case ISD::VECREDUCE_FMIN: + return LowerVECREDUCE(Op, DAG); } } @@ -7128,6 +7148,47 @@ SDValue AArch64TargetLowering::LowerVSETCC(SDValue Op, return Cmp; } +static SDValue getReductionSDNode(unsigned Op, SDLoc DL, SDValue ScalarOp, + SelectionDAG &DAG) { + SDValue VecOp = ScalarOp.getOperand(0); + auto Rdx = DAG.getNode(Op, DL, VecOp.getSimpleValueType(), VecOp); + return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, ScalarOp.getValueType(), Rdx, + DAG.getConstant(0, DL, MVT::i64)); +} + +SDValue AArch64TargetLowering::LowerVECREDUCE(SDValue Op, + SelectionDAG &DAG) const { + SDLoc dl(Op); + switch (Op.getOpcode()) { + case ISD::VECREDUCE_ADD: + return getReductionSDNode(AArch64ISD::UADDV, dl, Op, DAG); + case ISD::VECREDUCE_SMAX: + return getReductionSDNode(AArch64ISD::SMAXV, dl, Op, DAG); + case ISD::VECREDUCE_SMIN: + return getReductionSDNode(AArch64ISD::SMINV, dl, Op, DAG); + case ISD::VECREDUCE_UMAX: + return getReductionSDNode(AArch64ISD::UMAXV, dl, Op, DAG); + case ISD::VECREDUCE_UMIN: + return getReductionSDNode(AArch64ISD::UMINV, dl, Op, DAG); + case ISD::VECREDUCE_FMAX: { + assert(Op->getFlags().hasNoNaNs() && "fmax vector reduction needs NoNaN flag"); + return DAG.getNode( + ISD::INTRINSIC_WO_CHAIN, dl, Op.getValueType(), + DAG.getConstant(Intrinsic::aarch64_neon_fmaxnmv, dl, MVT::i32), + Op.getOperand(0)); + } + case ISD::VECREDUCE_FMIN: { + assert(Op->getFlags().hasNoNaNs() && "fmin vector reduction needs NoNaN flag"); + return DAG.getNode( + ISD::INTRINSIC_WO_CHAIN, dl, Op.getValueType(), + DAG.getConstant(Intrinsic::aarch64_neon_fminnmv, dl, MVT::i32), + Op.getOperand(0)); + } + default: + llvm_unreachable("Unhandled reduction"); + } +} + /// getTgtMemIntrinsic - Represent NEON load and store intrinsics as /// MemIntrinsicNodes. The associated MachineMemOperands record the alignment /// specified in the intrinsic calls. @@ -9490,266 +9551,6 @@ static SDValue performSTORECombine(SDNode *N, return SDValue(); } -/// This function handles the log2-shuffle pattern produced by the -/// LoopVectorizer for the across vector reduction. It consists of -/// log2(NumVectorElements) steps and, in each step, 2^(s) elements -/// are reduced, where s is an induction variable from 0 to -/// log2(NumVectorElements). -static SDValue tryMatchAcrossLaneShuffleForReduction(SDNode *N, SDValue OpV, - unsigned Op, - SelectionDAG &DAG) { - EVT VTy = OpV->getOperand(0).getValueType(); - if (!VTy.isVector()) - return SDValue(); - - int NumVecElts = VTy.getVectorNumElements(); - if (Op == ISD::FMAXNUM || Op == ISD::FMINNUM) { - if (NumVecElts != 4) - return SDValue(); - } else { - if (NumVecElts != 4 && NumVecElts != 8 && NumVecElts != 16) - return SDValue(); - } - - int NumExpectedSteps = APInt(8, NumVecElts).logBase2(); - SDValue PreOp = OpV; - // Iterate over each step of the across vector reduction. - for (int CurStep = 0; CurStep != NumExpectedSteps; ++CurStep) { - SDValue CurOp = PreOp.getOperand(0); - SDValue Shuffle = PreOp.getOperand(1); - if (Shuffle.getOpcode() != ISD::VECTOR_SHUFFLE) { - // Try to swap the 1st and 2nd operand as add and min/max instructions - // are commutative. - CurOp = PreOp.getOperand(1); - Shuffle = PreOp.getOperand(0); - if (Shuffle.getOpcode() != ISD::VECTOR_SHUFFLE) - return SDValue(); - } - - // Check if the input vector is fed by the operator we want to handle, - // except the last step; the very first input vector is not necessarily - // the same operator we are handling. - if (CurOp.getOpcode() != Op && (CurStep != (NumExpectedSteps - 1))) - return SDValue(); - - // Check if it forms one step of the across vector reduction. - // E.g., - // %cur = add %1, %0 - // %shuffle = vector_shuffle %cur, <2, 3, u, u> - // %pre = add %cur, %shuffle - if (Shuffle.getOperand(0) != CurOp) - return SDValue(); - - int NumMaskElts = 1 << CurStep; - ArrayRef Mask = cast(Shuffle)->getMask(); - // Check mask values in each step. - // We expect the shuffle mask in each step follows a specific pattern - // denoted here by the form, where M is a sequence of integers - // starting from NumMaskElts, increasing by 1, and the number integers - // in M should be NumMaskElts. U is a sequence of UNDEFs and the number - // of undef in U should be NumVecElts - NumMaskElts. - // E.g., for <8 x i16>, mask values in each step should be : - // step 0 : <1,u,u,u,u,u,u,u> - // step 1 : <2,3,u,u,u,u,u,u> - // step 2 : <4,5,6,7,u,u,u,u> - for (int i = 0; i < NumVecElts; ++i) - if ((i < NumMaskElts && Mask[i] != (NumMaskElts + i)) || - (i >= NumMaskElts && !(Mask[i] < 0))) - return SDValue(); - - PreOp = CurOp; - } - unsigned Opcode; - bool IsIntrinsic = false; - - switch (Op) { - default: - llvm_unreachable("Unexpected operator for across vector reduction"); - case ISD::ADD: - Opcode = AArch64ISD::UADDV; - break; - case ISD::SMAX: - Opcode = AArch64ISD::SMAXV; - break; - case ISD::UMAX: - Opcode = AArch64ISD::UMAXV; - break; - case ISD::SMIN: - Opcode = AArch64ISD::SMINV; - break; - case ISD::UMIN: - Opcode = AArch64ISD::UMINV; - break; - case ISD::FMAXNUM: - Opcode = Intrinsic::aarch64_neon_fmaxnmv; - IsIntrinsic = true; - break; - case ISD::FMINNUM: - Opcode = Intrinsic::aarch64_neon_fminnmv; - IsIntrinsic = true; - break; - } - SDLoc DL(N); - - return IsIntrinsic - ? DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, N->getValueType(0), - DAG.getConstant(Opcode, DL, MVT::i32), PreOp) - : DAG.getNode( - ISD::EXTRACT_VECTOR_ELT, DL, N->getValueType(0), - DAG.getNode(Opcode, DL, PreOp.getSimpleValueType(), PreOp), - DAG.getConstant(0, DL, MVT::i64)); -} - -/// Target-specific DAG combine for the across vector min/max reductions. -/// This function specifically handles the final clean-up step of the vector -/// min/max reductions produced by the LoopVectorizer. It is the log2-shuffle -/// pattern, which narrows down and finds the final min/max value from all -/// elements of the vector. -/// For example, for a <16 x i8> vector : -/// svn0 = vector_shuffle %0, undef<8,9,10,11,12,13,14,15,u,u,u,u,u,u,u,u> -/// %smax0 = smax %arr, svn0 -/// %svn1 = vector_shuffle %smax0, undef<4,5,6,7,u,u,u,u,u,u,u,u,u,u,u,u> -/// %smax1 = smax %smax0, %svn1 -/// %svn2 = vector_shuffle %smax1, undef<2,3,u,u,u,u,u,u,u,u,u,u,u,u,u,u> -/// %smax2 = smax %smax1, svn2 -/// %svn3 = vector_shuffle %smax2, undef<1,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u> -/// %sc = setcc %smax2, %svn3, gt -/// %n0 = extract_vector_elt %sc, #0 -/// %n1 = extract_vector_elt %smax2, #0 -/// %n2 = extract_vector_elt $smax2, #1 -/// %result = select %n0, %n1, n2 -/// becomes : -/// %1 = smaxv %0 -/// %result = extract_vector_elt %1, 0 -static SDValue -performAcrossLaneMinMaxReductionCombine(SDNode *N, SelectionDAG &DAG, - const AArch64Subtarget *Subtarget) { - if (!Subtarget->hasNEON()) - return SDValue(); - - SDValue N0 = N->getOperand(0); - SDValue IfTrue = N->getOperand(1); - SDValue IfFalse = N->getOperand(2); - - // Check if the SELECT merges up the final result of the min/max - // from a vector. - if (N0.getOpcode() != ISD::EXTRACT_VECTOR_ELT || - IfTrue.getOpcode() != ISD::EXTRACT_VECTOR_ELT || - IfFalse.getOpcode() != ISD::EXTRACT_VECTOR_ELT) - return SDValue(); - - // Expect N0 is fed by SETCC. - SDValue SetCC = N0.getOperand(0); - EVT SetCCVT = SetCC.getValueType(); - if (SetCC.getOpcode() != ISD::SETCC || !SetCCVT.isVector() || - SetCCVT.getVectorElementType() != MVT::i1) - return SDValue(); - - SDValue VectorOp = SetCC.getOperand(0); - unsigned Op = VectorOp->getOpcode(); - // Check if the input vector is fed by the operator we want to handle. - if (Op != ISD::SMAX && Op != ISD::UMAX && Op != ISD::SMIN && - Op != ISD::UMIN && Op != ISD::FMAXNUM && Op != ISD::FMINNUM) - return SDValue(); - - EVT VTy = VectorOp.getValueType(); - if (!VTy.isVector()) - return SDValue(); - - if (VTy.getSizeInBits() < 64) - return SDValue(); - - EVT EltTy = VTy.getVectorElementType(); - if (Op == ISD::FMAXNUM || Op == ISD::FMINNUM) { - if (EltTy != MVT::f32) - return SDValue(); - } else { - if (EltTy != MVT::i32 && EltTy != MVT::i16 && EltTy != MVT::i8) - return SDValue(); - } - - // Check if extracting from the same vector. - // For example, - // %sc = setcc %vector, %svn1, gt - // %n0 = extract_vector_elt %sc, #0 - // %n1 = extract_vector_elt %vector, #0 - // %n2 = extract_vector_elt $vector, #1 - if (!(VectorOp == IfTrue->getOperand(0) && - VectorOp == IfFalse->getOperand(0))) - return SDValue(); - - // Check if the condition code is matched with the operator type. - ISD::CondCode CC = cast(SetCC->getOperand(2))->get(); - if ((Op == ISD::SMAX && CC != ISD::SETGT && CC != ISD::SETGE) || - (Op == ISD::UMAX && CC != ISD::SETUGT && CC != ISD::SETUGE) || - (Op == ISD::SMIN && CC != ISD::SETLT && CC != ISD::SETLE) || - (Op == ISD::UMIN && CC != ISD::SETULT && CC != ISD::SETULE) || - (Op == ISD::FMAXNUM && CC != ISD::SETOGT && CC != ISD::SETOGE && - CC != ISD::SETUGT && CC != ISD::SETUGE && CC != ISD::SETGT && - CC != ISD::SETGE) || - (Op == ISD::FMINNUM && CC != ISD::SETOLT && CC != ISD::SETOLE && - CC != ISD::SETULT && CC != ISD::SETULE && CC != ISD::SETLT && - CC != ISD::SETLE)) - return SDValue(); - - // Expect to check only lane 0 from the vector SETCC. - if (!isNullConstant(N0.getOperand(1))) - return SDValue(); - - // Expect to extract the true value from lane 0. - if (!isNullConstant(IfTrue.getOperand(1))) - return SDValue(); - - // Expect to extract the false value from lane 1. - if (!isOneConstant(IfFalse.getOperand(1))) - return SDValue(); - - return tryMatchAcrossLaneShuffleForReduction(N, SetCC, Op, DAG); -} - -/// Target-specific DAG combine for the across vector add reduction. -/// This function specifically handles the final clean-up step of the vector -/// add reduction produced by the LoopVectorizer. It is the log2-shuffle -/// pattern, which adds all elements of a vector together. -/// For example, for a <4 x i32> vector : -/// %1 = vector_shuffle %0, <2,3,u,u> -/// %2 = add %0, %1 -/// %3 = vector_shuffle %2, <1,u,u,u> -/// %4 = add %2, %3 -/// %result = extract_vector_elt %4, 0 -/// becomes : -/// %0 = uaddv %0 -/// %result = extract_vector_elt %0, 0 -static SDValue -performAcrossLaneAddReductionCombine(SDNode *N, SelectionDAG &DAG, - const AArch64Subtarget *Subtarget) { - if (!Subtarget->hasNEON()) - return SDValue(); - SDValue N0 = N->getOperand(0); - SDValue N1 = N->getOperand(1); - - // Check if the input vector is fed by the ADD. - if (N0->getOpcode() != ISD::ADD) - return SDValue(); - - // The vector extract idx must constant zero because we only expect the final - // result of the reduction is placed in lane 0. - if (!isNullConstant(N1)) - return SDValue(); - - EVT VTy = N0.getValueType(); - if (!VTy.isVector()) - return SDValue(); - - EVT EltTy = VTy.getVectorElementType(); - if (EltTy != MVT::i32 && EltTy != MVT::i16 && EltTy != MVT::i8) - return SDValue(); - - if (VTy.getSizeInBits() < 64) - return SDValue(); - - return tryMatchAcrossLaneShuffleForReduction(N, N0, ISD::ADD, DAG); -} /// Target-specific DAG combine function for NEON load/store intrinsics /// to merge base address updates. @@ -10428,12 +10229,8 @@ SDValue AArch64TargetLowering::PerformDAGCombine(SDNode *N, return performBitcastCombine(N, DCI, DAG); case ISD::CONCAT_VECTORS: return performConcatVectorsCombine(N, DCI, DAG); - case ISD::SELECT: { - SDValue RV = performSelectCombine(N, DCI); - if (!RV.getNode()) - RV = performAcrossLaneMinMaxReductionCombine(N, DAG, Subtarget); - return RV; - } + case ISD::SELECT: + return performSelectCombine(N, DCI); case ISD::VSELECT: return performVSelectCombine(N, DCI.DAG); case ISD::LOAD: @@ -10455,8 +10252,6 @@ SDValue AArch64TargetLowering::PerformDAGCombine(SDNode *N, return performNVCASTCombine(N); case ISD::INSERT_VECTOR_ELT: return performPostLD1Combine(N, DCI, true); - case ISD::EXTRACT_VECTOR_ELT: - return performAcrossLaneAddReductionCombine(N, DAG, Subtarget); case ISD::INTRINSIC_VOID: case ISD::INTRINSIC_W_CHAIN: switch (cast(N->getOperand(1))->getZExtValue()) { @@ -10676,6 +10471,14 @@ void AArch64TargetLowering::ReplaceNodeResults( case ISD::BITCAST: ReplaceBITCASTResults(N, Results, DAG); return; + case ISD::VECREDUCE_ADD: + case ISD::VECREDUCE_SMAX: + case ISD::VECREDUCE_SMIN: + case ISD::VECREDUCE_UMAX: + case ISD::VECREDUCE_UMIN: + Results.push_back(LowerVECREDUCE(SDValue(N, 0), DAG)); + return; + case AArch64ISD::SADDV: ReplaceReductionResults(N, Results, DAG, ISD::ADD, AArch64ISD::SADDV); return; diff --git a/contrib/llvm/lib/Target/AArch64/AArch64ISelLowering.h b/contrib/llvm/lib/Target/AArch64/AArch64ISelLowering.h index 89db566c219c..ecc2517fb288 100644 --- a/contrib/llvm/lib/Target/AArch64/AArch64ISelLowering.h +++ b/contrib/llvm/lib/Target/AArch64/AArch64ISelLowering.h @@ -568,6 +568,7 @@ class AArch64TargetLowering : public TargetLowering { SDValue LowerVectorOR(SDValue Op, SelectionDAG &DAG) const; SDValue LowerCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG) const; SDValue LowerFSINCOS(SDValue Op, SelectionDAG &DAG) const; + SDValue LowerVECREDUCE(SDValue Op, SelectionDAG &DAG) const; SDValue BuildSDIVPow2(SDNode *N, const APInt &Divisor, SelectionDAG &DAG, std::vector *Created) const override; diff --git a/contrib/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.cpp b/contrib/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.cpp index 7c6f55c06bce..43569af04347 100644 --- a/contrib/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.cpp +++ b/contrib/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.cpp @@ -769,3 +769,28 @@ unsigned AArch64TTIImpl::getMinPrefetchStride() { unsigned AArch64TTIImpl::getMaxPrefetchIterationsAhead() { return ST->getMaxPrefetchIterationsAhead(); } + +bool AArch64TTIImpl::useReductionIntrinsic(unsigned Opcode, Type *Ty, + TTI::ReductionFlags Flags) const { + assert(isa(Ty) && "Expected Ty to be a vector type"); + unsigned ScalarBits = Ty->getScalarSizeInBits(); + switch (Opcode) { + case Instruction::FAdd: + case Instruction::FMul: + case Instruction::And: + case Instruction::Or: + case Instruction::Xor: + case Instruction::Mul: + return false; + case Instruction::Add: + return ScalarBits * Ty->getVectorNumElements() >= 128; + case Instruction::ICmp: + return (ScalarBits < 64) && + (ScalarBits * Ty->getVectorNumElements() >= 128); + case Instruction::FCmp: + return Flags.NoNaN; + default: + llvm_unreachable("Unhandled reduction opcode"); + } + return false; +} diff --git a/contrib/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.h b/contrib/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.h index 280d97f3c502..d0299149c38c 100644 --- a/contrib/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.h +++ b/contrib/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.h @@ -145,6 +145,9 @@ class AArch64TTIImpl : public BasicTTIImplBase { bool shouldExpandReduction(const IntrinsicInst *II) const { return false; } + + bool useReductionIntrinsic(unsigned Opcode, Type *Ty, + TTI::ReductionFlags Flags) const; /// @} }; diff --git a/contrib/llvm/lib/Target/AMDGPU/AMDGPUISelDAGToDAG.cpp b/contrib/llvm/lib/Target/AMDGPU/AMDGPUISelDAGToDAG.cpp index 7c99752b881f..c3ac796a0a44 100644 --- a/contrib/llvm/lib/Target/AMDGPU/AMDGPUISelDAGToDAG.cpp +++ b/contrib/llvm/lib/Target/AMDGPU/AMDGPUISelDAGToDAG.cpp @@ -1707,10 +1707,38 @@ bool AMDGPUDAGToDAGISel::SelectVOP3PMods(SDValue In, SDValue &Src, // FIXME: Look for on separate components if (Src.getOpcode() == ISD::FNEG) { - Mods |= (SISrcMods::NEG | SISrcMods::NEG_HI); + Mods ^= (SISrcMods::NEG | SISrcMods::NEG_HI); Src = Src.getOperand(0); } + if (Src.getOpcode() == ISD::BUILD_VECTOR) { + unsigned VecMods = Mods; + + SDValue Lo = Src.getOperand(0); + SDValue Hi = Src.getOperand(1); + + if (Lo.getOpcode() == ISD::FNEG) { + Lo = Lo.getOperand(0); + Mods ^= SISrcMods::NEG; + } + + if (Hi.getOpcode() == ISD::FNEG) { + Hi = Hi.getOperand(0); + Mods ^= SISrcMods::NEG_HI; + } + + if (Lo == Hi && !isInlineImmediate(Lo.getNode())) { + // Really a scalar input. Just select from the low half of the register to + // avoid packing. + + Src = Lo; + SrcMods = CurDAG->getTargetConstant(Mods, SDLoc(In), MVT::i32); + return true; + } + + Mods = VecMods; + } + // Packed instructions do not have abs modifiers. // FIXME: Handle abs/neg of individual components. diff --git a/contrib/llvm/lib/Target/AMDGPU/AMDGPUSubtarget.h b/contrib/llvm/lib/Target/AMDGPU/AMDGPUSubtarget.h index bed7d326b3dd..e543cae07ada 100644 --- a/contrib/llvm/lib/Target/AMDGPU/AMDGPUSubtarget.h +++ b/contrib/llvm/lib/Target/AMDGPU/AMDGPUSubtarget.h @@ -289,6 +289,10 @@ class AMDGPUSubtarget : public AMDGPUGenSubtargetInfo { return getGeneration() >= GFX9; } + bool hasMin3Max3_16() const { + return getGeneration() >= GFX9; + } + bool hasCARRY() const { return (getGeneration() >= EVERGREEN); } diff --git a/contrib/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/contrib/llvm/lib/Target/AMDGPU/SIISelLowering.cpp index 48a14e4dbea2..286be355bc14 100644 --- a/contrib/llvm/lib/Target/AMDGPU/SIISelLowering.cpp +++ b/contrib/llvm/lib/Target/AMDGPU/SIISelLowering.cpp @@ -4491,7 +4491,8 @@ SDValue SITargetLowering::performMinMaxCombine(SDNode *N, if (Opc != AMDGPUISD::FMIN_LEGACY && Opc != AMDGPUISD::FMAX_LEGACY && - VT != MVT::f64) { + VT != MVT::f64 && + ((VT != MVT::f16 && VT != MVT::i16) || Subtarget->hasMin3Max3_16())) { // max(max(a, b), c) -> max3(a, b, c) // min(min(a, b), c) -> min3(a, b, c) if (Op0.getOpcode() == Opc && Op0.hasOneUse()) { diff --git a/contrib/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp b/contrib/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp index 065fd09eb356..38a16b525a75 100644 --- a/contrib/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp +++ b/contrib/llvm/lib/Target/AMDGPU/SIInstrInfo.cpp @@ -765,7 +765,7 @@ void SIInstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB, .addFrameIndex(FrameIndex) // addr .addMemOperand(MMO) .addReg(MFI->getScratchRSrcReg(), RegState::Implicit) - .addReg(MFI->getScratchWaveOffsetReg(), RegState::Implicit); + .addReg(MFI->getFrameOffsetReg(), RegState::Implicit); // Add the scratch resource registers as implicit uses because we may end up // needing them, and need to ensure that the reserved registers are // correctly handled. @@ -796,7 +796,7 @@ void SIInstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB, .addReg(SrcReg, getKillRegState(isKill)) // data .addFrameIndex(FrameIndex) // addr .addReg(MFI->getScratchRSrcReg()) // scratch_rsrc - .addReg(MFI->getScratchWaveOffsetReg()) // scratch_offset + .addReg(MFI->getFrameOffsetReg()) // scratch_offset .addImm(0) // offset .addMemOperand(MMO); } @@ -869,7 +869,7 @@ void SIInstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB, .addFrameIndex(FrameIndex) // addr .addMemOperand(MMO) .addReg(MFI->getScratchRSrcReg(), RegState::Implicit) - .addReg(MFI->getScratchWaveOffsetReg(), RegState::Implicit); + .addReg(MFI->getFrameOffsetReg(), RegState::Implicit); if (ST.hasScalarStores()) { // m0 is used for offset to scalar stores if used to spill. @@ -892,10 +892,10 @@ void SIInstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB, unsigned Opcode = getVGPRSpillRestoreOpcode(SpillSize); BuildMI(MBB, MI, DL, get(Opcode), DestReg) - .addFrameIndex(FrameIndex) // vaddr - .addReg(MFI->getScratchRSrcReg()) // scratch_rsrc - .addReg(MFI->getScratchWaveOffsetReg()) // scratch_offset - .addImm(0) // offset + .addFrameIndex(FrameIndex) // vaddr + .addReg(MFI->getScratchRSrcReg()) // scratch_rsrc + .addReg(MFI->getFrameOffsetReg()) // scratch_offset + .addImm(0) // offset .addMemOperand(MMO); } diff --git a/contrib/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp b/contrib/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp index 8820e294562b..06cfc95be96a 100644 --- a/contrib/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp +++ b/contrib/llvm/lib/Target/AMDGPU/SIRegisterInfo.cpp @@ -654,11 +654,11 @@ bool SIRegisterInfo::spillSGPR(MachineBasicBlock::iterator MI, int64_t Offset = (ST.getWavefrontSize() * FrOffset) + (EltSize * i); if (Offset != 0) { BuildMI(*MBB, MI, DL, TII->get(AMDGPU::S_ADD_U32), OffsetReg) - .addReg(MFI->getScratchWaveOffsetReg()) + .addReg(MFI->getFrameOffsetReg()) .addImm(Offset); } else { BuildMI(*MBB, MI, DL, TII->get(AMDGPU::S_MOV_B32), OffsetReg) - .addReg(MFI->getScratchWaveOffsetReg()); + .addReg(MFI->getFrameOffsetReg()); } BuildMI(*MBB, MI, DL, TII->get(ScalarStoreOp)) @@ -715,11 +715,11 @@ bool SIRegisterInfo::spillSGPR(MachineBasicBlock::iterator MI, = MF->getMachineMemOperand(PtrInfo, MachineMemOperand::MOStore, EltSize, MinAlign(Align, EltSize * i)); BuildMI(*MBB, MI, DL, TII->get(AMDGPU::SI_SPILL_V32_SAVE)) - .addReg(TmpReg, RegState::Kill) // src - .addFrameIndex(Index) // vaddr - .addReg(MFI->getScratchRSrcReg()) // srrsrc - .addReg(MFI->getScratchWaveOffsetReg()) // soffset - .addImm(i * 4) // offset + .addReg(TmpReg, RegState::Kill) // src + .addFrameIndex(Index) // vaddr + .addReg(MFI->getScratchRSrcReg()) // srrsrc + .addReg(MFI->getFrameOffsetReg()) // soffset + .addImm(i * 4) // offset .addMemOperand(MMO); } } @@ -806,11 +806,11 @@ bool SIRegisterInfo::restoreSGPR(MachineBasicBlock::iterator MI, int64_t Offset = (ST.getWavefrontSize() * FrOffset) + (EltSize * i); if (Offset != 0) { BuildMI(*MBB, MI, DL, TII->get(AMDGPU::S_ADD_U32), OffsetReg) - .addReg(MFI->getScratchWaveOffsetReg()) + .addReg(MFI->getFrameOffsetReg()) .addImm(Offset); } else { BuildMI(*MBB, MI, DL, TII->get(AMDGPU::S_MOV_B32), OffsetReg) - .addReg(MFI->getScratchWaveOffsetReg()); + .addReg(MFI->getFrameOffsetReg()); } auto MIB = @@ -853,10 +853,10 @@ bool SIRegisterInfo::restoreSGPR(MachineBasicBlock::iterator MI, MinAlign(Align, EltSize * i)); BuildMI(*MBB, MI, DL, TII->get(AMDGPU::SI_SPILL_V32_RESTORE), TmpReg) - .addFrameIndex(Index) // vaddr - .addReg(MFI->getScratchRSrcReg()) // srsrc - .addReg(MFI->getScratchWaveOffsetReg()) // soffset - .addImm(i * 4) // offset + .addFrameIndex(Index) // vaddr + .addReg(MFI->getScratchRSrcReg()) // srsrc + .addReg(MFI->getFrameOffsetReg()) // soffset + .addImm(i * 4) // offset .addMemOperand(MMO); auto MIB = diff --git a/contrib/llvm/lib/Target/AMDGPU/VOP3Instructions.td b/contrib/llvm/lib/Target/AMDGPU/VOP3Instructions.td index ffa6c60d6b1f..c0b5069948fb 100644 --- a/contrib/llvm/lib/Target/AMDGPU/VOP3Instructions.td +++ b/contrib/llvm/lib/Target/AMDGPU/VOP3Instructions.td @@ -300,10 +300,19 @@ def V_AND_OR_B32 : VOP3Inst <"v_and_or_b32", VOP3_Profile>; def V_OR3_B32 : VOP3Inst <"v_or3_b32", VOP3_Profile>; def V_XAD_U32 : VOP3Inst <"v_xad_u32", VOP3_Profile>; + def V_MED3_F16 : VOP3Inst <"v_med3_f16", VOP3_Profile, AMDGPUfmed3>; def V_MED3_I16 : VOP3Inst <"v_med3_i16", VOP3_Profile, AMDGPUsmed3>; def V_MED3_U16 : VOP3Inst <"v_med3_u16", VOP3_Profile, AMDGPUumed3>; -} + +def V_MIN3_F16 : VOP3Inst <"v_min3_f16", VOP3_Profile, AMDGPUfmin3>; +def V_MIN3_I16 : VOP3Inst <"v_min3_i16", VOP3_Profile, AMDGPUsmin3>; +def V_MIN3_U16 : VOP3Inst <"v_min3_u16", VOP3_Profile, AMDGPUumin3>; + +def V_MAX3_F16 : VOP3Inst <"v_max3_f16", VOP3_Profile, AMDGPUfmax3>; +def V_MAX3_I16 : VOP3Inst <"v_max3_i16", VOP3_Profile, AMDGPUsmax3>; +def V_MAX3_U16 : VOP3Inst <"v_max3_u16", VOP3_Profile, AMDGPUumax3>; +} // End SubtargetPredicate = isGFX9 //===----------------------------------------------------------------------===// @@ -509,6 +518,15 @@ defm V_OR3_B32 : VOP3_Real_vi <0x202>; defm V_PACK_B32_F16 : VOP3_Real_vi <0x2a0>; defm V_XAD_U32 : VOP3_Real_vi <0x1f3>; + +defm V_MIN3_F16 : VOP3_Real_vi <0x1f4>; +defm V_MIN3_I16 : VOP3_Real_vi <0x1f5>; +defm V_MIN3_U16 : VOP3_Real_vi <0x1f6>; + +defm V_MAX3_F16 : VOP3_Real_vi <0x1f7>; +defm V_MAX3_I16 : VOP3_Real_vi <0x1f8>; +defm V_MAX3_U16 : VOP3_Real_vi <0x1f9>; + defm V_MED3_F16 : VOP3_Real_vi <0x1fa>; defm V_MED3_I16 : VOP3_Real_vi <0x1fb>; defm V_MED3_U16 : VOP3_Real_vi <0x1fc>; diff --git a/contrib/llvm/lib/Target/ARM/ARMInstructionSelector.cpp b/contrib/llvm/lib/Target/ARM/ARMInstructionSelector.cpp index 8c680cdf9b47..b1f059835ff5 100644 --- a/contrib/llvm/lib/Target/ARM/ARMInstructionSelector.cpp +++ b/contrib/llvm/lib/Target/ARM/ARMInstructionSelector.cpp @@ -345,25 +345,10 @@ bool ARMInstructionSelector::select(MachineInstr &I) const { I.setDesc(TII.get(COPY)); return selectCopy(I, TII, MRI, TRI, RBI); } - case G_ADD: case G_GEP: I.setDesc(TII.get(ARM::ADDrr)); MIB.add(predOps(ARMCC::AL)).add(condCodeOp()); break; - case G_SUB: - I.setDesc(TII.get(ARM::SUBrr)); - MIB.add(predOps(ARMCC::AL)).add(condCodeOp()); - break; - case G_MUL: - if (TII.getSubtarget().hasV6Ops()) { - I.setDesc(TII.get(ARM::MUL)); - } else { - assert(TII.getSubtarget().useMulOps() && "Unsupported target"); - I.setDesc(TII.get(ARM::MULv5)); - MIB->getOperand(0).setIsEarlyClobber(true); - } - MIB.add(predOps(ARMCC::AL)).add(condCodeOp()); - break; case G_FRAME_INDEX: // Add 0 to the given frame index and hope it will eventually be folded into // the user(s). diff --git a/contrib/llvm/lib/Target/ARM/Thumb1FrameLowering.cpp b/contrib/llvm/lib/Target/ARM/Thumb1FrameLowering.cpp index d0fd366ab9ed..1a17d4e33e4f 100644 --- a/contrib/llvm/lib/Target/ARM/Thumb1FrameLowering.cpp +++ b/contrib/llvm/lib/Target/ARM/Thumb1FrameLowering.cpp @@ -571,8 +571,7 @@ bool Thumb1FrameLowering::emitPopSpecialFixUp(MachineBasicBlock &MBB, GPRsNoLRSP.reset(ARM::LR); GPRsNoLRSP.reset(ARM::SP); GPRsNoLRSP.reset(ARM::PC); - for (int Register = GPRsNoLRSP.find_first(); Register != -1; - Register = GPRsNoLRSP.find_next(Register)) { + for (unsigned Register : GPRsNoLRSP.set_bits()) { if (!UsedRegs.contains(Register)) { // Remember the first pop-friendly register and exit. if (PopFriendly.test(Register)) { diff --git a/contrib/llvm/lib/Target/Mips/MipsDelaySlotFiller.cpp b/contrib/llvm/lib/Target/Mips/MipsDelaySlotFiller.cpp index ae58c26e145a..1597057ad63f 100644 --- a/contrib/llvm/lib/Target/Mips/MipsDelaySlotFiller.cpp +++ b/contrib/llvm/lib/Target/Mips/MipsDelaySlotFiller.cpp @@ -386,7 +386,7 @@ void RegDefsUses::setCallerSaved(const MachineInstr &MI) { void RegDefsUses::setUnallocatableRegs(const MachineFunction &MF) { BitVector AllocSet = TRI.getAllocatableSet(MF); - for (int R = AllocSet.find_first(); R != -1; R = AllocSet.find_next(R)) + for (unsigned R : AllocSet.set_bits()) for (MCRegAliasIterator AI(R, &TRI, false); AI.isValid(); ++AI) AllocSet.set(*AI); diff --git a/contrib/llvm/lib/Target/PowerPC/PPCFrameLowering.cpp b/contrib/llvm/lib/Target/PowerPC/PPCFrameLowering.cpp index 40bfe3a449f7..57a1d373c88c 100644 --- a/contrib/llvm/lib/Target/PowerPC/PPCFrameLowering.cpp +++ b/contrib/llvm/lib/Target/PowerPC/PPCFrameLowering.cpp @@ -1765,31 +1765,36 @@ void PPCFrameLowering::processFunctionBeforeFrameFinalized(MachineFunction &MF, // Check whether the frame pointer register is allocated. If so, make sure it // is spilled to the correct offset. if (needsFP(MF)) { - HasGPSaveArea = true; - int FI = PFI->getFramePointerSaveIndex(); assert(FI && "No Frame Pointer Save Slot!"); - MFI.setObjectOffset(FI, LowerBound + MFI.getObjectOffset(FI)); + // FP is R31/X31, so no need to update MinGPR/MinG8R. + HasGPSaveArea = true; } if (PFI->usesPICBase()) { - HasGPSaveArea = true; - int FI = PFI->getPICBasePointerSaveIndex(); assert(FI && "No PIC Base Pointer Save Slot!"); - MFI.setObjectOffset(FI, LowerBound + MFI.getObjectOffset(FI)); + + MinGPR = std::min(MinGPR, PPC::R30); + HasGPSaveArea = true; } const PPCRegisterInfo *RegInfo = Subtarget.getRegisterInfo(); if (RegInfo->hasBasePointer(MF)) { - HasGPSaveArea = true; - int FI = PFI->getBasePointerSaveIndex(); assert(FI && "No Base Pointer Save Slot!"); - MFI.setObjectOffset(FI, LowerBound + MFI.getObjectOffset(FI)); + + unsigned BP = RegInfo->getBaseRegister(MF); + if (PPC::G8RCRegClass.contains(BP)) { + MinG8R = std::min(MinG8R, BP); + HasG8SaveArea = true; + } else if (PPC::GPRCRegClass.contains(BP)) { + MinGPR = std::min(MinGPR, BP); + HasGPSaveArea = true; + } } // General register save area starts right below the Floating-point diff --git a/contrib/llvm/lib/Target/PowerPC/PPCISelLowering.cpp b/contrib/llvm/lib/Target/PowerPC/PPCISelLowering.cpp index 17bdd595da10..144aea850833 100644 --- a/contrib/llvm/lib/Target/PowerPC/PPCISelLowering.cpp +++ b/contrib/llvm/lib/Target/PowerPC/PPCISelLowering.cpp @@ -410,6 +410,11 @@ PPCTargetLowering::PPCTargetLowering(const PPCTargetMachine &TM, // To handle counter-based loop conditions. setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i1, Custom); + setOperationAction(ISD::INTRINSIC_VOID, MVT::i8, Custom); + setOperationAction(ISD::INTRINSIC_VOID, MVT::i16, Custom); + setOperationAction(ISD::INTRINSIC_VOID, MVT::i32, Custom); + setOperationAction(ISD::INTRINSIC_VOID, MVT::Other, Custom); + // Comparisons that require checking two conditions. setCondCodeAction(ISD::SETULT, MVT::f32, Expand); setCondCodeAction(ISD::SETULT, MVT::f64, Expand); @@ -8184,6 +8189,26 @@ SDValue PPCTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, return Flags; } +SDValue PPCTargetLowering::LowerINTRINSIC_VOID(SDValue Op, + SelectionDAG &DAG) const { + // SelectionDAGBuilder::visitTargetIntrinsic may insert one extra chain to + // the beginning of the argument list. + int ArgStart = isa(Op.getOperand(0)) ? 0 : 1; + SDLoc DL(Op); + switch (cast(Op.getOperand(ArgStart))->getZExtValue()) { + case Intrinsic::ppc_cfence: { + assert(Subtarget.isPPC64() && "Only 64-bit is supported for now."); + return SDValue(DAG.getMachineNode(PPC::CFENCE8, DL, MVT::Other, + DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, + Op.getOperand(ArgStart + 1))), + 0); + } + default: + break; + } + return SDValue(); +} + SDValue PPCTargetLowering::LowerSIGN_EXTEND_INREG(SDValue Op, SelectionDAG &DAG) const { SDLoc dl(Op); @@ -8649,6 +8674,9 @@ SDValue PPCTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const { // Frame & Return address. case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG); case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG); + + case ISD::INTRINSIC_VOID: + return LowerINTRINSIC_VOID(Op, DAG); } } @@ -8753,12 +8781,19 @@ Instruction *PPCTargetLowering::emitLeadingFence(IRBuilder<> &Builder, Instruction *PPCTargetLowering::emitTrailingFence(IRBuilder<> &Builder, Instruction *Inst, AtomicOrdering Ord) const { - if (Inst->hasAtomicLoad() && isAcquireOrStronger(Ord)) + if (Inst->hasAtomicLoad() && isAcquireOrStronger(Ord)) { + // See http://www.cl.cam.ac.uk/~pes20/cpp/cpp0xmappings.html and + // http://www.rdrop.com/users/paulmck/scalability/paper/N2745r.2011.03.04a.html + // and http://www.cl.cam.ac.uk/~pes20/cppppc/ for justification. + if (isa(Inst) && Subtarget.isPPC64()) + return Builder.CreateCall( + Intrinsic::getDeclaration( + Builder.GetInsertBlock()->getParent()->getParent(), + Intrinsic::ppc_cfence, {Inst->getType()}), + {Inst}); + // FIXME: Can use isync for rmw operation. return callIntrinsic(Builder, Intrinsic::ppc_lwsync); - // FIXME: this is too conservative, a dependent branch + isync is enough. - // See http://www.cl.cam.ac.uk/~pes20/cpp/cpp0xmappings.html and - // http://www.rdrop.com/users/paulmck/scalability/paper/N2745r.2011.03.04a.html - // and http://www.cl.cam.ac.uk/~pes20/cppppc/ for justification. + } return nullptr; } diff --git a/contrib/llvm/lib/Target/PowerPC/PPCISelLowering.h b/contrib/llvm/lib/Target/PowerPC/PPCISelLowering.h index 4fc744257262..acb77943b118 100644 --- a/contrib/llvm/lib/Target/PowerPC/PPCISelLowering.h +++ b/contrib/llvm/lib/Target/PowerPC/PPCISelLowering.h @@ -905,6 +905,7 @@ namespace llvm { SDValue LowerINSERT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const; SDValue LowerEXTRACT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const; SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) const; + SDValue LowerINTRINSIC_VOID(SDValue Op, SelectionDAG &DAG) const; SDValue LowerSCALAR_TO_VECTOR(SDValue Op, SelectionDAG &DAG) const; SDValue LowerSIGN_EXTEND_INREG(SDValue Op, SelectionDAG &DAG) const; SDValue LowerMUL(SDValue Op, SelectionDAG &DAG) const; diff --git a/contrib/llvm/lib/Target/PowerPC/PPCInstr64Bit.td b/contrib/llvm/lib/Target/PowerPC/PPCInstr64Bit.td index a8433919f0f3..a3f894c81a01 100644 --- a/contrib/llvm/lib/Target/PowerPC/PPCInstr64Bit.td +++ b/contrib/llvm/lib/Target/PowerPC/PPCInstr64Bit.td @@ -983,6 +983,10 @@ def LDgotTprelL: Pseudo<(outs g8rc:$rD), (ins s16imm64:$disp, g8rc_nox0:$reg), [(set i64:$rD, (PPCldGotTprelL tglobaltlsaddr:$disp, i64:$reg))]>, isPPC64; + +let isBarrier = 1, isPseudo = 1, Defs = [CR7], Itinerary = IIC_LdStSync in +def CFENCE8 : Pseudo<(outs), (ins g8rc:$cr), "#CFENCE8", []>; + def : Pat<(PPCaddTls i64:$in, tglobaltlsaddr:$g), (ADD8TLS $in, tglobaltlsaddr:$g)>; def ADDIStlsgdHA: Pseudo<(outs g8rc:$rD), (ins g8rc_nox0:$reg, s16imm64:$disp), diff --git a/contrib/llvm/lib/Target/PowerPC/PPCInstrInfo.cpp b/contrib/llvm/lib/Target/PowerPC/PPCInstrInfo.cpp index 790a8902b3d2..3afcec1248d5 100644 --- a/contrib/llvm/lib/Target/PowerPC/PPCInstrInfo.cpp +++ b/contrib/llvm/lib/Target/PowerPC/PPCInstrInfo.cpp @@ -1873,6 +1873,8 @@ PPCInstrInfo::getSerializableBitmaskMachineOperandTargetFlags() const { } bool PPCInstrInfo::expandPostRAPseudo(MachineInstr &MI) const { + auto &MBB = *MI.getParent(); + auto DL = MI.getDebugLoc(); switch (MI.getOpcode()) { case TargetOpcode::LOAD_STACK_GUARD: { assert(Subtarget.isTargetLinux() && @@ -1920,6 +1922,17 @@ bool PPCInstrInfo::expandPostRAPseudo(MachineInstr &MI) const { MI.setDesc(get(Opcode)); return true; } + case PPC::CFENCE8: { + auto Val = MI.getOperand(0).getReg(); + BuildMI(MBB, MI, DL, get(PPC::CMPW), PPC::CR7).addReg(Val).addReg(Val); + BuildMI(MBB, MI, DL, get(PPC::CTRL_DEP)) + .addImm(PPC::PRED_NE_MINUS) + .addReg(PPC::CR7) + .addImm(1); + MI.setDesc(get(PPC::ISYNC)); + MI.RemoveOperand(0); + return true; + } } return false; } diff --git a/contrib/llvm/lib/Target/PowerPC/PPCInstrInfo.td b/contrib/llvm/lib/Target/PowerPC/PPCInstrInfo.td index 1af5e7f28342..0766cfe4a987 100644 --- a/contrib/llvm/lib/Target/PowerPC/PPCInstrInfo.td +++ b/contrib/llvm/lib/Target/PowerPC/PPCInstrInfo.td @@ -1223,9 +1223,15 @@ let isBranch = 1, isTerminator = 1, hasCtrlDep = 1, PPC970_Unit = 7 in { // FIXME: should be able to write a pattern for PPCcondbranch, but can't use // a two-value operand where a dag node expects two operands. :( let isCodeGenOnly = 1 in { - def BCC : BForm<16, 0, 0, (outs), (ins pred:$cond, condbrtarget:$dst), - "b${cond:cc}${cond:pm} ${cond:reg}, $dst" - /*[(PPCcondbranch crrc:$crS, imm:$opc, bb:$dst)]*/>; + class BCC_class : BForm<16, 0, 0, (outs), (ins pred:$cond, condbrtarget:$dst), + "b${cond:cc}${cond:pm} ${cond:reg}, $dst" + /*[(PPCcondbranch crrc:$crS, imm:$opc, bb:$dst)]*/>; + def BCC : BCC_class; + + // The same as BCC, except that it's not a terminator. Used for introducing + // control flow dependency without creating new blocks. + let isTerminator = 0 in def CTRL_DEP : BCC_class; + def BCCA : BForm<16, 1, 0, (outs), (ins pred:$cond, abscondbrtarget:$dst), "b${cond:cc}a${cond:pm} ${cond:reg}, $dst">; diff --git a/contrib/llvm/lib/Target/SystemZ/SystemZTargetTransformInfo.cpp b/contrib/llvm/lib/Target/SystemZ/SystemZTargetTransformInfo.cpp index f56b238f91e6..6a3dc6799c43 100644 --- a/contrib/llvm/lib/Target/SystemZ/SystemZTargetTransformInfo.cpp +++ b/contrib/llvm/lib/Target/SystemZ/SystemZTargetTransformInfo.cpp @@ -325,6 +325,30 @@ int SystemZTTIImpl::getArithmeticInstrCost( unsigned ScalarBits = Ty->getScalarSizeInBits(); + // Div with a constant which is a power of 2 will be converted by + // DAGCombiner to use shifts. With vector shift-element instructions, a + // vector sdiv costs about as much as a scalar one. + const unsigned SDivCostEstimate = 4; + bool SDivPow2 = false; + bool UDivPow2 = false; + if ((Opcode == Instruction::SDiv || Opcode == Instruction::UDiv) && + Args.size() == 2) { + const ConstantInt *CI = nullptr; + if (const Constant *C = dyn_cast(Args[1])) { + if (C->getType()->isVectorTy()) + CI = dyn_cast_or_null(C->getSplatValue()); + else + CI = dyn_cast(C); + } + if (CI != nullptr && + (CI->getValue().isPowerOf2() || (-CI->getValue()).isPowerOf2())) { + if (Opcode == Instruction::SDiv) + SDivPow2 = true; + else + UDivPow2 = true; + } + } + if (Ty->isVectorTy()) { assert (ST->hasVector() && "getArithmeticInstrCost() called with vector type."); unsigned VF = Ty->getVectorNumElements(); @@ -333,10 +357,13 @@ int SystemZTTIImpl::getArithmeticInstrCost( // These vector operations are custom handled, but are still supported // with one instruction per vector, regardless of element size. if (Opcode == Instruction::Shl || Opcode == Instruction::LShr || - Opcode == Instruction::AShr) { + Opcode == Instruction::AShr || UDivPow2) { return NumVectors; } + if (SDivPow2) + return (NumVectors * SDivCostEstimate); + // These FP operations are supported with a single vector instruction for // double (base implementation assumes float generally costs 2). For // FP128, the scalar cost is 1, and there is no overhead since the values @@ -395,6 +422,11 @@ int SystemZTTIImpl::getArithmeticInstrCost( // 2 * ipm sequences ; xor ; shift ; compare return 7; + if (UDivPow2) + return 1; + if (SDivPow2) + return SDivCostEstimate; + // An extra extension for narrow types is needed. if ((Opcode == Instruction::SDiv || Opcode == Instruction::SRem)) // sext of op(s) for narrow types diff --git a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyRegColoring.cpp b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyRegColoring.cpp index 5fd4a8d1949e..ba39b6cdb568 100644 --- a/contrib/llvm/lib/Target/WebAssembly/WebAssemblyRegColoring.cpp +++ b/contrib/llvm/lib/Target/WebAssembly/WebAssemblyRegColoring.cpp @@ -140,8 +140,7 @@ bool WebAssemblyRegColoring::runOnMachineFunction(MachineFunction &MF) { // Check if it's possible to reuse any of the used colors. if (!MRI->isLiveIn(Old)) - for (int C(UsedColors.find_first()); C != -1; - C = UsedColors.find_next(C)) { + for (unsigned C : UsedColors.set_bits()) { if (MRI->getRegClass(SortedIntervals[C]->reg) != RC) continue; for (LiveInterval *OtherLI : Assignments[C]) diff --git a/contrib/llvm/lib/Target/WebAssembly/known_gcc_test_failures.txt b/contrib/llvm/lib/Target/WebAssembly/known_gcc_test_failures.txt index 8e8e5fd1eff1..54619589c341 100644 --- a/contrib/llvm/lib/Target/WebAssembly/known_gcc_test_failures.txt +++ b/contrib/llvm/lib/Target/WebAssembly/known_gcc_test_failures.txt @@ -33,9 +33,6 @@ built-in-setjmp.c pr60003.c # Error in the program / unsupported by Clang. -scal-to-vec1.c -scal-to-vec2.c -scal-to-vec3.c 20000822-1.c 20010209-1.c 20010605-1.c diff --git a/contrib/llvm/lib/Target/X86/X86.td b/contrib/llvm/lib/Target/X86/X86.td index 3a421fe77392..784c3a6557ff 100644 --- a/contrib/llvm/lib/Target/X86/X86.td +++ b/contrib/llvm/lib/Target/X86/X86.td @@ -235,8 +235,6 @@ def FeatureLEAUsesAG : SubtargetFeature<"lea-uses-ag", "LEAUsesAG", "true", "LEA instruction needs inputs at AG stage">; def FeatureSlowLEA : SubtargetFeature<"slow-lea", "SlowLEA", "true", "LEA instruction with certain arguments is slow">; -def FeatureSlow3OpsLEA : SubtargetFeature<"slow-3ops-lea", "Slow3OpsLEA", "true", - "LEA instruction with 3 ops or certain registers is slow">; def FeatureSlowIncDec : SubtargetFeature<"slow-incdec", "SlowIncDec", "true", "INC and DEC instructions are slower than ADD and SUB">; def FeatureSoftFloat @@ -482,7 +480,6 @@ def SNBFeatures : ProcessorFeatures<[], [ FeatureXSAVE, FeatureXSAVEOPT, FeatureLAHFSAHF, - FeatureSlow3OpsLEA, FeatureFastScalarFSQRT, FeatureFastSHLDRotate ]>; diff --git a/contrib/llvm/lib/Target/X86/X86FixupLEAs.cpp b/contrib/llvm/lib/Target/X86/X86FixupLEAs.cpp index 9f649dad8bc0..2cd4c1a3e7b3 100644 --- a/contrib/llvm/lib/Target/X86/X86FixupLEAs.cpp +++ b/contrib/llvm/lib/Target/X86/X86FixupLEAs.cpp @@ -27,26 +27,20 @@ #include "llvm/Target/TargetInstrInfo.h" using namespace llvm; -namespace llvm { -void initializeFixupLEAPassPass(PassRegistry &); -} - -#define FIXUPLEA_DESC "X86 LEA Fixup" -#define FIXUPLEA_NAME "x86-fixup-LEAs" - -#define DEBUG_TYPE FIXUPLEA_NAME +#define DEBUG_TYPE "x86-fixup-LEAs" STATISTIC(NumLEAs, "Number of LEA instructions created"); namespace { class FixupLEAPass : public MachineFunctionPass { enum RegUsageState { RU_NotUsed, RU_Write, RU_Read }; - + static char ID; /// \brief Loop over all of the instructions in the basic block /// replacing applicable instructions with LEA instructions, /// where appropriate. bool processBasicBlock(MachineFunction &MF, MachineFunction::iterator MFI); + StringRef getPassName() const override { return "X86 LEA Fixup"; } /// \brief Given a machine register, look for the instruction /// which writes it in the current basic block. If found, @@ -68,22 +62,6 @@ class FixupLEAPass : public MachineFunctionPass { void processInstructionForSLM(MachineBasicBlock::iterator &I, MachineFunction::iterator MFI); - - /// \brief Given a LEA instruction which is unprofitable - /// on SNB+ try to replace it with other instructions. - /// According to Intel's Optimization Reference Manual: - /// " For LEA instructions with three source operands and some specific - /// situations, instruction latency has increased to 3 cycles, and must - /// dispatch via port 1: - /// - LEA that has all three source operands: base, index, and offset - /// - LEA that uses base and index registers where the base is EBP, RBP, - /// or R13 - /// - LEA that uses RIP relative addressing mode - /// - LEA that uses 16-bit addressing mode " - /// This function currently handles the first 2 cases only. - MachineInstr *processInstrForSlow3OpLEA(MachineInstr &MI, - MachineFunction::iterator MFI); - /// \brief Look for LEAs that add 1 to reg or subtract 1 from reg /// and convert them to INC or DEC respectively. bool fixupIncDec(MachineBasicBlock::iterator &I, @@ -107,13 +85,7 @@ class FixupLEAPass : public MachineFunctionPass { MachineBasicBlock::iterator &MBBI) const; public: - static char ID; - - StringRef getPassName() const override { return FIXUPLEA_DESC; } - - FixupLEAPass() : MachineFunctionPass(ID) { - initializeFixupLEAPassPass(*PassRegistry::getPassRegistry()); - } + FixupLEAPass() : MachineFunctionPass(ID) {} /// \brief Loop over all of the basic blocks, /// replacing instructions by equivalent LEA instructions @@ -132,11 +104,8 @@ class FixupLEAPass : public MachineFunctionPass { bool OptIncDec; bool OptLEA; }; -} - char FixupLEAPass::ID = 0; - -INITIALIZE_PASS(FixupLEAPass, FIXUPLEA_NAME, FIXUPLEA_DESC, false, false) +} MachineInstr * FixupLEAPass::postRAConvertToLEA(MachineFunction::iterator &MFI, @@ -199,7 +168,7 @@ bool FixupLEAPass::runOnMachineFunction(MachineFunction &Func) { MF = &Func; const X86Subtarget &ST = Func.getSubtarget(); OptIncDec = !ST.slowIncDec() || Func.getFunction()->optForMinSize(); - OptLEA = ST.LEAusesAG() || ST.slowLEA() || ST.slow3OpsLEA(); + OptLEA = ST.LEAusesAG() || ST.slowLEA(); if (!OptLEA && !OptIncDec) return false; @@ -273,64 +242,9 @@ FixupLEAPass::searchBackwards(MachineOperand &p, MachineBasicBlock::iterator &I, return MachineBasicBlock::iterator(); } -static inline bool isLEA(const int Opcode) { - return Opcode == X86::LEA16r || Opcode == X86::LEA32r || - Opcode == X86::LEA64r || Opcode == X86::LEA64_32r; -} - -static inline bool isInefficientLEAReg(unsigned int Reg) { - return Reg == X86::EBP || Reg == X86::RBP || Reg == X86::R13; -} - -static inline bool isRegOperand(const MachineOperand &Op) { - return Op.isReg() && Op.getReg() != X86::NoRegister; -} -/// hasIneffecientLEARegs - LEA that uses base and index registers -/// where the base is EBP, RBP, or R13 -static inline bool hasInefficientLEABaseReg(const MachineOperand &Base, - const MachineOperand &Index) { - return Base.isReg() && isInefficientLEAReg(Base.getReg()) && - isRegOperand(Index); -} - -static inline bool hasLEAOffset(const MachineOperand &Offset) { - return (Offset.isImm() && Offset.getImm() != 0) || Offset.isGlobal(); -} - -// LEA instruction that has all three operands: offset, base and index -static inline bool isThreeOperandsLEA(const MachineOperand &Base, - const MachineOperand &Index, - const MachineOperand &Offset) { - return isRegOperand(Base) && isRegOperand(Index) && hasLEAOffset(Offset); -} - -static inline int getADDrrFromLEA(int LEAOpcode) { - switch (LEAOpcode) { - default: - llvm_unreachable("Unexpected LEA instruction"); - case X86::LEA16r: - return X86::ADD16rr; - case X86::LEA32r: - return X86::ADD32rr; - case X86::LEA64_32r: - case X86::LEA64r: - return X86::ADD64rr; - } -} - -static inline int getADDriFromLEA(int LEAOpcode, const MachineOperand &Offset) { - bool IsInt8 = Offset.isImm() && isInt<8>(Offset.getImm()); - switch (LEAOpcode) { - default: - llvm_unreachable("Unexpected LEA instruction"); - case X86::LEA16r: - return IsInt8 ? X86::ADD16ri8 : X86::ADD16ri; - case X86::LEA32r: - case X86::LEA64_32r: - return IsInt8 ? X86::ADD32ri8 : X86::ADD32ri; - case X86::LEA64r: - return IsInt8 ? X86::ADD64ri8 : X86::ADD64ri32; - } +static inline bool isLEA(const int opcode) { + return opcode == X86::LEA16r || opcode == X86::LEA32r || + opcode == X86::LEA64r || opcode == X86::LEA64_32r; } /// isLEASimpleIncOrDec - Does this LEA have one these forms: @@ -423,8 +337,8 @@ void FixupLEAPass::seekLEAFixup(MachineOperand &p, void FixupLEAPass::processInstructionForSLM(MachineBasicBlock::iterator &I, MachineFunction::iterator MFI) { MachineInstr &MI = *I; - const int Opcode = MI.getOpcode(); - if (!isLEA(Opcode)) + const int opcode = MI.getOpcode(); + if (!isLEA(opcode)) return; if (MI.getOperand(5).getReg() != 0 || !MI.getOperand(4).isImm() || !TII->isSafeToClobberEFLAGS(*MFI, I)) @@ -436,144 +350,55 @@ void FixupLEAPass::processInstructionForSLM(MachineBasicBlock::iterator &I, return; if (MI.getOperand(2).getImm() > 1) return; + int addrr_opcode, addri_opcode; + switch (opcode) { + default: + llvm_unreachable("Unexpected LEA instruction"); + case X86::LEA16r: + addrr_opcode = X86::ADD16rr; + addri_opcode = X86::ADD16ri; + break; + case X86::LEA32r: + addrr_opcode = X86::ADD32rr; + addri_opcode = X86::ADD32ri; + break; + case X86::LEA64_32r: + case X86::LEA64r: + addrr_opcode = X86::ADD64rr; + addri_opcode = X86::ADD64ri32; + break; + } DEBUG(dbgs() << "FixLEA: Candidate to replace:"; I->dump();); DEBUG(dbgs() << "FixLEA: Replaced by: ";); MachineInstr *NewMI = nullptr; + const MachineOperand &Dst = MI.getOperand(0); // Make ADD instruction for two registers writing to LEA's destination if (SrcR1 != 0 && SrcR2 != 0) { - const MCInstrDesc &ADDrr = TII->get(getADDrrFromLEA(Opcode)); - const MachineOperand &Src = MI.getOperand(SrcR1 == DstR ? 3 : 1); - NewMI = - BuildMI(*MFI, I, MI.getDebugLoc(), ADDrr, DstR).addReg(DstR).add(Src); + const MachineOperand &Src1 = MI.getOperand(SrcR1 == DstR ? 1 : 3); + const MachineOperand &Src2 = MI.getOperand(SrcR1 == DstR ? 3 : 1); + NewMI = BuildMI(*MF, MI.getDebugLoc(), TII->get(addrr_opcode)) + .add(Dst) + .add(Src1) + .add(Src2); + MFI->insert(I, NewMI); DEBUG(NewMI->dump();); } // Make ADD instruction for immediate if (MI.getOperand(4).getImm() != 0) { - const MCInstrDesc &ADDri = - TII->get(getADDriFromLEA(Opcode, MI.getOperand(4))); const MachineOperand &SrcR = MI.getOperand(SrcR1 == DstR ? 1 : 3); - NewMI = BuildMI(*MFI, I, MI.getDebugLoc(), ADDri, DstR) + NewMI = BuildMI(*MF, MI.getDebugLoc(), TII->get(addri_opcode)) + .add(Dst) .add(SrcR) .addImm(MI.getOperand(4).getImm()); + MFI->insert(I, NewMI); DEBUG(NewMI->dump();); } if (NewMI) { MFI->erase(I); - I = NewMI; + I = static_cast(NewMI); } } -MachineInstr * -FixupLEAPass::processInstrForSlow3OpLEA(MachineInstr &MI, - MachineFunction::iterator MFI) { - - const int LEAOpcode = MI.getOpcode(); - if (!isLEA(LEAOpcode)) - return nullptr; - - const MachineOperand &Dst = MI.getOperand(0); - const MachineOperand &Base = MI.getOperand(1); - const MachineOperand &Scale = MI.getOperand(2); - const MachineOperand &Index = MI.getOperand(3); - const MachineOperand &Offset = MI.getOperand(4); - const MachineOperand &Segment = MI.getOperand(5); - - if (!(isThreeOperandsLEA(Base, Index, Offset) || - hasInefficientLEABaseReg(Base, Index)) || - !TII->isSafeToClobberEFLAGS(*MFI, MI) || - Segment.getReg() != X86::NoRegister) - return nullptr; - - unsigned int DstR = Dst.getReg(); - unsigned int BaseR = Base.getReg(); - unsigned int IndexR = Index.getReg(); - unsigned SSDstR = - (LEAOpcode == X86::LEA64_32r) ? getX86SubSuperRegister(DstR, 64) : DstR; - bool IsScale1 = Scale.getImm() == 1; - bool IsInefficientBase = isInefficientLEAReg(BaseR); - bool IsInefficientIndex = isInefficientLEAReg(IndexR); - - // Skip these cases since it takes more than 2 instructions - // to replace the LEA instruction. - if (IsInefficientBase && SSDstR == BaseR && !IsScale1) - return nullptr; - if (LEAOpcode == X86::LEA64_32r && IsInefficientBase && - (IsInefficientIndex || !IsScale1)) - return nullptr; - - const DebugLoc DL = MI.getDebugLoc(); - const MCInstrDesc &ADDrr = TII->get(getADDrrFromLEA(LEAOpcode)); - const MCInstrDesc &ADDri = TII->get(getADDriFromLEA(LEAOpcode, Offset)); - - DEBUG(dbgs() << "FixLEA: Candidate to replace:"; MI.dump();); - DEBUG(dbgs() << "FixLEA: Replaced by: ";); - - // First try to replace LEA with one or two (for the 3-op LEA case) - // add instructions: - // 1.lea (%base,%index,1), %base => add %index,%base - // 2.lea (%base,%index,1), %index => add %base,%index - if (IsScale1 && (DstR == BaseR || DstR == IndexR)) { - const MachineOperand &Src = DstR == BaseR ? Index : Base; - MachineInstr *NewMI = - BuildMI(*MFI, MI, DL, ADDrr, DstR).addReg(DstR).add(Src); - DEBUG(NewMI->dump();); - // Create ADD instruction for the Offset in case of 3-Ops LEA. - if (hasLEAOffset(Offset)) { - NewMI = BuildMI(*MFI, MI, DL, ADDri, DstR).addReg(DstR).add(Offset); - DEBUG(NewMI->dump();); - } - return NewMI; - } - // If the base is inefficient try switching the index and base operands, - // otherwise just break the 3-Ops LEA inst into 2-Ops LEA + ADD instruction: - // lea offset(%base,%index,scale),%dst => - // lea (%base,%index,scale); add offset,%dst - if (!IsInefficientBase || (!IsInefficientIndex && IsScale1)) { - MachineInstr *NewMI = BuildMI(*MFI, MI, DL, TII->get(LEAOpcode)) - .add(Dst) - .add(IsInefficientBase ? Index : Base) - .add(Scale) - .add(IsInefficientBase ? Base : Index) - .addImm(0) - .add(Segment); - DEBUG(NewMI->dump();); - // Create ADD instruction for the Offset in case of 3-Ops LEA. - if (hasLEAOffset(Offset)) { - NewMI = BuildMI(*MFI, MI, DL, ADDri, DstR).addReg(DstR).add(Offset); - DEBUG(NewMI->dump();); - } - return NewMI; - } - // Handle the rest of the cases with inefficient base register: - assert(SSDstR != BaseR && "SSDstR == BaseR should be handled already!"); - assert(IsInefficientBase && "efficient base should be handled already!"); - - // lea (%base,%index,1), %dst => mov %base,%dst; add %index,%dst - if (IsScale1 && !hasLEAOffset(Offset)) { - TII->copyPhysReg(*MFI, MI, DL, DstR, BaseR, Base.isKill()); - DEBUG(MI.getPrevNode()->dump();); - - MachineInstr *NewMI = - BuildMI(*MFI, MI, DL, ADDrr, DstR).addReg(DstR).add(Index); - DEBUG(NewMI->dump();); - return NewMI; - } - // lea offset(%base,%index,scale), %dst => - // lea offset( ,%index,scale), %dst; add %base,%dst - MachineInstr *NewMI = BuildMI(*MFI, MI, DL, TII->get(LEAOpcode)) - .add(Dst) - .addReg(0) - .add(Scale) - .add(Index) - .add(Offset) - .add(Segment); - DEBUG(NewMI->dump();); - - NewMI = BuildMI(*MFI, MI, DL, ADDrr, DstR).addReg(DstR).add(Base); - DEBUG(NewMI->dump();); - return NewMI; -} - bool FixupLEAPass::processBasicBlock(MachineFunction &MF, MachineFunction::iterator MFI) { @@ -585,16 +410,8 @@ bool FixupLEAPass::processBasicBlock(MachineFunction &MF, if (OptLEA) { if (MF.getSubtarget().isSLM()) processInstructionForSLM(I, MFI); - - else { - if (MF.getSubtarget().slow3OpsLEA()) { - if (auto *NewMI = processInstrForSlow3OpLEA(*I, MFI)) { - MFI->erase(I); - I = NewMI; - } - } else - processInstruction(I, MFI); - } + else + processInstruction(I, MFI); } } return false; diff --git a/contrib/llvm/lib/Target/X86/X86InstructionSelector.cpp b/contrib/llvm/lib/Target/X86/X86InstructionSelector.cpp index de58d719acb4..5eb5ad52840a 100644 --- a/contrib/llvm/lib/Target/X86/X86InstructionSelector.cpp +++ b/contrib/llvm/lib/Target/X86/X86InstructionSelector.cpp @@ -19,6 +19,7 @@ #include "X86Subtarget.h" #include "X86TargetMachine.h" #include "llvm/CodeGen/GlobalISel/InstructionSelector.h" +#include "llvm/CodeGen/GlobalISel/Utils.h" #include "llvm/CodeGen/MachineBasicBlock.h" #include "llvm/CodeGen/MachineFunction.h" #include "llvm/CodeGen/MachineInstr.h" @@ -72,6 +73,9 @@ class X86InstructionSelector : public InstructionSelector { bool selectCmp(MachineInstr &I, MachineRegisterInfo &MRI, MachineFunction &MF) const; + bool selectUadde(MachineInstr &I, MachineRegisterInfo &MRI, + MachineFunction &MF) const; + const X86TargetMachine &TM; const X86Subtarget &STI; const X86InstrInfo &TII; @@ -243,6 +247,8 @@ bool X86InstructionSelector::select(MachineInstr &I) const { return true; if (selectCmp(I, MRI, MF)) return true; + if (selectUadde(I, MRI, MF)) + return true; return false; } @@ -564,6 +570,66 @@ bool X86InstructionSelector::selectCmp(MachineInstr &I, return true; } +bool X86InstructionSelector::selectUadde(MachineInstr &I, + MachineRegisterInfo &MRI, + MachineFunction &MF) const { + if (I.getOpcode() != TargetOpcode::G_UADDE) + return false; + + const unsigned DstReg = I.getOperand(0).getReg(); + const unsigned CarryOutReg = I.getOperand(1).getReg(); + const unsigned Op0Reg = I.getOperand(2).getReg(); + const unsigned Op1Reg = I.getOperand(3).getReg(); + unsigned CarryInReg = I.getOperand(4).getReg(); + + const LLT DstTy = MRI.getType(DstReg); + + if (DstTy != LLT::scalar(32)) + return false; + + // find CarryIn def instruction. + MachineInstr *Def = MRI.getVRegDef(CarryInReg); + while (Def->getOpcode() == TargetOpcode::G_TRUNC) { + CarryInReg = Def->getOperand(1).getReg(); + Def = MRI.getVRegDef(CarryInReg); + } + + unsigned Opcode; + if (Def->getOpcode() == TargetOpcode::G_UADDE) { + // carry set by prev ADD. + + BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(X86::COPY), X86::EFLAGS) + .addReg(CarryInReg); + + if (!RBI.constrainGenericRegister(CarryInReg, X86::GR32RegClass, MRI)) + return false; + + Opcode = X86::ADC32rr; + } else if (auto val = getConstantVRegVal(CarryInReg, MRI)) { + // carry is constant, support only 0. + if (*val != 0) + return false; + + Opcode = X86::ADD32rr; + } else + return false; + + MachineInstr &AddInst = + *BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(Opcode), DstReg) + .addReg(Op0Reg) + .addReg(Op1Reg); + + BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(X86::COPY), CarryOutReg) + .addReg(X86::EFLAGS); + + if (!constrainSelectedInstRegOperands(AddInst, TII, TRI, RBI) || + !RBI.constrainGenericRegister(CarryOutReg, X86::GR32RegClass, MRI)) + return false; + + I.eraseFromParent(); + return true; +} + InstructionSelector * llvm::createX86InstructionSelector(const X86TargetMachine &TM, X86Subtarget &Subtarget, diff --git a/contrib/llvm/lib/Target/X86/X86LegalizerInfo.cpp b/contrib/llvm/lib/Target/X86/X86LegalizerInfo.cpp index cf26238c0239..8ce240714f17 100644 --- a/contrib/llvm/lib/Target/X86/X86LegalizerInfo.cpp +++ b/contrib/llvm/lib/Target/X86/X86LegalizerInfo.cpp @@ -59,6 +59,11 @@ void X86LegalizerInfo::setLegalizerInfo32bit() { for (auto Ty : {s8, s16, s32}) setAction({BinOp, Ty}, Legal); + for (unsigned Op : {G_UADDE}) { + setAction({Op, s32}, Legal); + setAction({Op, 1, s1}, Legal); + } + for (unsigned MemOp : {G_LOAD, G_STORE}) { for (auto Ty : {s8, s16, s32, p0}) setAction({MemOp, Ty}, Legal); diff --git a/contrib/llvm/lib/Target/X86/X86Subtarget.h b/contrib/llvm/lib/Target/X86/X86Subtarget.h index 02be95e2e556..de1514243aeb 100644 --- a/contrib/llvm/lib/Target/X86/X86Subtarget.h +++ b/contrib/llvm/lib/Target/X86/X86Subtarget.h @@ -253,11 +253,6 @@ class X86Subtarget final : public X86GenSubtargetInfo { /// True if the LEA instruction with certain arguments is slow bool SlowLEA; - /// True if the LEA instruction has all three source operands: base, index, - /// and offset or if the LEA instruction uses base and index registers where - /// the base is EBP, RBP,or R13 - bool Slow3OpsLEA; - /// True if INC and DEC instructions are slow when writing to flags bool SlowIncDec; @@ -495,7 +490,6 @@ class X86Subtarget final : public X86GenSubtargetInfo { bool callRegIndirect() const { return CallRegIndirect; } bool LEAusesAG() const { return LEAUsesAG; } bool slowLEA() const { return SlowLEA; } - bool slow3OpsLEA() const { return Slow3OpsLEA; } bool slowIncDec() const { return SlowIncDec; } bool hasCDI() const { return HasCDI; } bool hasPFI() const { return HasPFI; } diff --git a/contrib/llvm/lib/Target/X86/X86TargetMachine.cpp b/contrib/llvm/lib/Target/X86/X86TargetMachine.cpp index c6a90725d89c..9a82e6e50463 100644 --- a/contrib/llvm/lib/Target/X86/X86TargetMachine.cpp +++ b/contrib/llvm/lib/Target/X86/X86TargetMachine.cpp @@ -61,7 +61,6 @@ static cl::opt EnableMachineCombinerPass("x86-machine-combiner", namespace llvm { void initializeWinEHStatePassPass(PassRegistry &); -void initializeFixupLEAPassPass(PassRegistry &); void initializeX86ExecutionDepsFixPass(PassRegistry &); } // end namespace llvm @@ -76,7 +75,6 @@ extern "C" void LLVMInitializeX86Target() { initializeWinEHStatePassPass(PR); initializeFixupBWInstPassPass(PR); initializeEvexToVexInstPassPass(PR); - initializeFixupLEAPassPass(PR); initializeX86ExecutionDepsFixPass(PR); } diff --git a/contrib/llvm/lib/Target/X86/X86TargetTransformInfo.cpp b/contrib/llvm/lib/Target/X86/X86TargetTransformInfo.cpp index 80e18161a94b..8566bd91c89e 100644 --- a/contrib/llvm/lib/Target/X86/X86TargetTransformInfo.cpp +++ b/contrib/llvm/lib/Target/X86/X86TargetTransformInfo.cpp @@ -1392,6 +1392,16 @@ int X86TTIImpl::getIntrinsicInstrCost(Intrinsic::ID IID, Type *RetTy, // CTLZ: llvm\test\CodeGen\X86\vector-lzcnt-*.ll // CTPOP: llvm\test\CodeGen\X86\vector-popcnt-*.ll // CTTZ: llvm\test\CodeGen\X86\vector-tzcnt-*.ll + static const CostTblEntry AVX512BWCostTbl[] = { + { ISD::BITREVERSE, MVT::v8i64, 5 }, + { ISD::BITREVERSE, MVT::v16i32, 5 }, + { ISD::BITREVERSE, MVT::v32i16, 5 }, + { ISD::BITREVERSE, MVT::v64i8, 5 }, + }; + static const CostTblEntry AVX512CostTbl[] = { + { ISD::BITREVERSE, MVT::v8i64, 36 }, + { ISD::BITREVERSE, MVT::v16i32, 24 }, + }; static const CostTblEntry XOPCostTbl[] = { { ISD::BITREVERSE, MVT::v4i64, 4 }, { ISD::BITREVERSE, MVT::v8i32, 4 }, @@ -1550,6 +1560,14 @@ int X86TTIImpl::getIntrinsicInstrCost(Intrinsic::ID IID, Type *RetTy, MVT MTy = LT.second; // Attempt to lookup cost. + if (ST->hasBWI()) + if (const auto *Entry = CostTableLookup(AVX512BWCostTbl, ISD, MTy)) + return LT.first * Entry->Cost; + + if (ST->hasAVX512()) + if (const auto *Entry = CostTableLookup(AVX512CostTbl, ISD, MTy)) + return LT.first * Entry->Cost; + if (ST->hasXOP()) if (const auto *Entry = CostTableLookup(XOPCostTbl, ISD, MTy)) return LT.first * Entry->Cost; diff --git a/contrib/llvm/lib/Transforms/Coroutines/CoroFrame.cpp b/contrib/llvm/lib/Transforms/Coroutines/CoroFrame.cpp index 4480220f2cd4..417d57f7625b 100644 --- a/contrib/llvm/lib/Transforms/Coroutines/CoroFrame.cpp +++ b/contrib/llvm/lib/Transforms/Coroutines/CoroFrame.cpp @@ -347,6 +347,27 @@ static StructType *buildFrameType(Function &F, coro::Shape &Shape, return FrameTy; } +// We need to make room to insert a spill after initial PHIs, but before +// catchswitch instruction. Placing it before violates the requirement that +// catchswitch, like all other EHPads must be the first nonPHI in a block. +// +// Split away catchswitch into a separate block and insert in its place: +// +// cleanuppad cleanupret. +// +// cleanupret instruction will act as an insert point for the spill. +static Instruction *splitBeforeCatchSwitch(CatchSwitchInst *CatchSwitch) { + BasicBlock *CurrentBlock = CatchSwitch->getParent(); + BasicBlock *NewBlock = CurrentBlock->splitBasicBlock(CatchSwitch); + CurrentBlock->getTerminator()->eraseFromParent(); + + auto *CleanupPad = + CleanupPadInst::Create(CatchSwitch->getParentPad(), {}, "", CurrentBlock); + auto *CleanupRet = + CleanupReturnInst::Create(CleanupPad, NewBlock, CurrentBlock); + return CleanupRet; +} + // Replace all alloca and SSA values that are accessed across suspend points // with GetElementPointer from coroutine frame + loads and stores. Create an // AllocaSpillBB that will become the new entry block for the resume parts of @@ -437,8 +458,11 @@ static Instruction *insertSpills(SpillInfo &Spills, coro::Shape &Shape) { InsertPt = NewBB->getTerminator(); } else if (dyn_cast(CurrentValue)) { // Skip the PHINodes and EH pads instructions. - InsertPt = - &*cast(E.def())->getParent()->getFirstInsertionPt(); + BasicBlock *DefBlock = cast(E.def())->getParent(); + if (auto *CSI = dyn_cast(DefBlock->getTerminator())) + InsertPt = splitBeforeCatchSwitch(CSI); + else + InsertPt = &*DefBlock->getFirstInsertionPt(); } else { // For all other values, the spill is placed immediately after // the definition. diff --git a/contrib/llvm/lib/Transforms/InstCombine/InstCombineInternal.h b/contrib/llvm/lib/Transforms/InstCombine/InstCombineInternal.h index 1424f61fe701..f88a2c6acc3f 100644 --- a/contrib/llvm/lib/Transforms/InstCombine/InstCombineInternal.h +++ b/contrib/llvm/lib/Transforms/InstCombine/InstCombineInternal.h @@ -74,6 +74,27 @@ static inline unsigned getComplexity(Value *V) { return isa(V) ? (isa(V) ? 0 : 1) : 2; } +/// Predicate canonicalization reduces the number of patterns that need to be +/// matched by other transforms. For example, we may swap the operands of a +/// conditional branch or select to create a compare with a canonical (inverted) +/// predicate which is then more likely to be matched with other values. +static inline bool isCanonicalPredicate(CmpInst::Predicate Pred) { + switch (Pred) { + case CmpInst::ICMP_NE: + case CmpInst::ICMP_ULE: + case CmpInst::ICMP_SLE: + case CmpInst::ICMP_UGE: + case CmpInst::ICMP_SGE: + // TODO: There are 16 FCMP predicates. Should others be (not) canonical? + case CmpInst::FCMP_ONE: + case CmpInst::FCMP_OLE: + case CmpInst::FCMP_OGE: + return false; + default: + return true; + } +} + /// \brief Add one to a Constant static inline Constant *AddOne(Constant *C) { return ConstantExpr::getAdd(C, ConstantInt::get(C->getType(), 1)); diff --git a/contrib/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp b/contrib/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp index 65b1148cb03b..7ed9fd566b37 100644 --- a/contrib/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp +++ b/contrib/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp @@ -2210,37 +2210,17 @@ Instruction *InstCombiner::visitBranchInst(BranchInst &BI) { return &BI; } - // Canonicalize fcmp_one -> fcmp_oeq - FCmpInst::Predicate FPred; Value *Y; - if (match(&BI, m_Br(m_OneUse(m_FCmp(FPred, m_Value(X), m_Value(Y))), - TrueDest, FalseDest))) { - // TODO: Why are we only transforming these 3 predicates? - if (FPred == FCmpInst::FCMP_ONE || FPred == FCmpInst::FCMP_OLE || - FPred == FCmpInst::FCMP_OGE) { - FCmpInst *Cond = cast(BI.getCondition()); - Cond->setPredicate(FCmpInst::getInversePredicate(FPred)); - - // Swap Destinations and condition. - BI.swapSuccessors(); - Worklist.Add(Cond); - return &BI; - } - } - - // Canonicalize icmp_ne -> icmp_eq - ICmpInst::Predicate IPred; - if (match(&BI, m_Br(m_OneUse(m_ICmp(IPred, m_Value(X), m_Value(Y))), - TrueDest, FalseDest))) { - if (IPred == ICmpInst::ICMP_NE || IPred == ICmpInst::ICMP_ULE || - IPred == ICmpInst::ICMP_SLE || IPred == ICmpInst::ICMP_UGE || - IPred == ICmpInst::ICMP_SGE) { - ICmpInst *Cond = cast(BI.getCondition()); - Cond->setPredicate(ICmpInst::getInversePredicate(IPred)); - // Swap Destinations and condition. - BI.swapSuccessors(); - Worklist.Add(Cond); - return &BI; - } + // Canonicalize, for example, icmp_ne -> icmp_eq or fcmp_one -> fcmp_oeq. + CmpInst::Predicate Pred; + if (match(&BI, m_Br(m_OneUse(m_Cmp(Pred, m_Value(), m_Value())), TrueDest, + FalseDest)) && + !isCanonicalPredicate(Pred)) { + // Swap destinations and condition. + CmpInst *Cond = cast(BI.getCondition()); + Cond->setPredicate(CmpInst::getInversePredicate(Pred)); + BI.swapSuccessors(); + Worklist.Add(Cond); + return &BI; } return nullptr; @@ -3053,7 +3033,10 @@ static bool AddReachableCodeToWorklist(BasicBlock *BB, const DataLayout &DL, } } - InstrsForInstCombineWorklist.push_back(Inst); + // Skip processing debug intrinsics in InstCombine. Processing these call instructions + // consumes non-trivial amount of time and provides no value for the optimization. + if (!isa(Inst)) + InstrsForInstCombineWorklist.push_back(Inst); } // Recursively visit successors. If this is a branch or switch on a diff --git a/contrib/llvm/lib/Transforms/Scalar/LICM.cpp b/contrib/llvm/lib/Transforms/Scalar/LICM.cpp index 340c81fed0fd..37b9c4b1094e 100644 --- a/contrib/llvm/lib/Transforms/Scalar/LICM.cpp +++ b/contrib/llvm/lib/Transforms/Scalar/LICM.cpp @@ -546,7 +546,7 @@ static bool isLoadInvariantInLoop(LoadInst *LI, DominatorTree *DT, // If there are escaping uses of invariant.start instruction, the load maybe // non-invariant. if (!II || II->getIntrinsicID() != Intrinsic::invariant_start || - II->hasNUsesOrMore(1)) + !II->use_empty()) continue; unsigned InvariantSizeInBits = cast(II->getArgOperand(0))->getSExtValue() * 8; diff --git a/contrib/llvm/lib/Transforms/Scalar/LoopIdiomRecognize.cpp b/contrib/llvm/lib/Transforms/Scalar/LoopIdiomRecognize.cpp index 6693a26e8890..cb6223b070a6 100644 --- a/contrib/llvm/lib/Transforms/Scalar/LoopIdiomRecognize.cpp +++ b/contrib/llvm/lib/Transforms/Scalar/LoopIdiomRecognize.cpp @@ -1292,13 +1292,15 @@ bool LoopIdiomRecognize::recognizeAndInsertCTLZ() { BasicBlock *PH = CurLoop->getLoopPreheader(); Value *InitX = PhiX->getIncomingValueForBlock(PH); // If we check X != 0 before entering the loop we don't need a zero - // check in CTLZ intrinsic. - if (BasicBlock *PreCondBB = PH->getSinglePredecessor()) - if (BranchInst *PreCondBr = - dyn_cast(PreCondBB->getTerminator())) { - if (matchCondition(PreCondBr, PH) == InitX) - ZeroCheck = true; - } + // check in CTLZ intrinsic, but only if Cnt Phi is not used outside of the + // loop (if it is used we count CTLZ(X >> 1)). + if (!IsCntPhiUsedOutsideLoop) + if (BasicBlock *PreCondBB = PH->getSinglePredecessor()) + if (BranchInst *PreCondBr = + dyn_cast(PreCondBB->getTerminator())) { + if (matchCondition(PreCondBr, PH) == InitX) + ZeroCheck = true; + } // Check if CTLZ intrinsic is profitable. Assume it is always profitable // if we delete the loop (the loop has only 6 instructions): diff --git a/contrib/llvm/lib/Transforms/Scalar/LoopStrengthReduce.cpp b/contrib/llvm/lib/Transforms/Scalar/LoopStrengthReduce.cpp index ccedb98d7fa1..bd1f21c69eba 100644 --- a/contrib/llvm/lib/Transforms/Scalar/LoopStrengthReduce.cpp +++ b/contrib/llvm/lib/Transforms/Scalar/LoopStrengthReduce.cpp @@ -3902,8 +3902,7 @@ void LSRInstance::GenerateCrossUseConstantOffsets() { // Compute the difference between the two. int64_t Imm = (uint64_t)JImm - M->first; - for (int LUIdx = UsedByIndices.find_first(); LUIdx != -1; - LUIdx = UsedByIndices.find_next(LUIdx)) + for (unsigned LUIdx : UsedByIndices.set_bits()) // Make a memo of this use, offset, and register tuple. if (UniqueItems.insert(std::make_pair(LUIdx, Imm)).second) WorkItems.push_back(WorkItem(LUIdx, Imm, OrigReg)); diff --git a/contrib/llvm/lib/Transforms/Scalar/NewGVN.cpp b/contrib/llvm/lib/Transforms/Scalar/NewGVN.cpp index 5e0a705782ea..0e7572f8d2e5 100644 --- a/contrib/llvm/lib/Transforms/Scalar/NewGVN.cpp +++ b/contrib/llvm/lib/Transforms/Scalar/NewGVN.cpp @@ -642,6 +642,7 @@ class NewGVN { void updateProcessedCount(Value *V); void verifyMemoryCongruency() const; void verifyIterationSettled(Function &F); + void verifyStoreExpressions() const; bool singleReachablePHIPath(const MemoryAccess *, const MemoryAccess *) const; BasicBlock *getBlockForValue(Value *V) const; void deleteExpression(const Expression *E) const; @@ -2003,7 +2004,6 @@ void NewGVN::moveValueToNewCongruenceClass(Instruction *I, const Expression *E, // If it's not a memory use, set the MemoryAccess equivalence auto *InstMA = dyn_cast_or_null(MSSA->getMemoryAccess(I)); - bool InstWasMemoryLeader = InstMA && OldClass->getMemoryLeader() == InstMA; if (InstMA) moveMemoryToNewCongruenceClass(I, InstMA, OldClass, NewClass); ValueToClass[I] = NewClass; @@ -2029,31 +2029,6 @@ void NewGVN::moveValueToNewCongruenceClass(Instruction *I, const Expression *E, if (OldClass->getStoredValue()) OldClass->setStoredValue(nullptr); } - // If we destroy the old access leader and it's a store, we have to - // effectively destroy the congruence class. When it comes to scalars, - // anything with the same value is as good as any other. That means that - // one leader is as good as another, and as long as you have some leader for - // the value, you are good.. When it comes to *memory states*, only one - // particular thing really represents the definition of a given memory - // state. Once it goes away, we need to re-evaluate which pieces of memory - // are really still equivalent. The best way to do this is to re-value - // number things. The only way to really make that happen is to destroy the - // rest of the class. In order to effectively destroy the class, we reset - // ExpressionToClass for each by using the ValueToExpression mapping. The - // members later get marked as touched due to the leader change. We will - // create new congruence classes, and the pieces that are still equivalent - // will end back together in a new class. If this becomes too expensive, it - // is possible to use a versioning scheme for the congruence classes to - // avoid the expressions finding this old class. Note that the situation is - // different for memory phis, becuase they are evaluated anew each time, and - // they become equal not by hashing, but by seeing if all operands are the - // same (or only one is reachable). - if (OldClass->getStoreCount() > 0 && InstWasMemoryLeader) { - DEBUG(dbgs() << "Kicking everything out of class " << OldClass->getID() - << " because MemoryAccess leader changed"); - for (auto Member : *OldClass) - ExpressionToClass.erase(ValueToExpression.lookup(Member)); - } OldClass->setLeader(getNextValueLeader(OldClass)); OldClass->resetNextLeader(); markValueLeaderChangeTouched(OldClass); @@ -2062,7 +2037,6 @@ void NewGVN::moveValueToNewCongruenceClass(Instruction *I, const Expression *E, // Perform congruence finding on a given value numbering expression. void NewGVN::performCongruenceFinding(Instruction *I, const Expression *E) { - ValueToExpression[I] = E; // This is guaranteed to return something, since it will at least find // TOP. @@ -2132,6 +2106,18 @@ void NewGVN::performCongruenceFinding(Instruction *I, const Expression *E) { if (auto *CI = dyn_cast(I)) markPredicateUsersTouched(CI); } + // If we changed the class of the store, we want to ensure nothing finds the + // old store expression. In particular, loads do not compare against stored + // value, so they will find old store expressions (and associated class + // mappings) if we leave them in the table. + if (ClassChanged && isa(E)) { + auto *OldE = ValueToExpression.lookup(I); + // It could just be that the old class died. We don't want to erase it if we + // just moved classes. + if (OldE && isa(OldE) && !OldE->equals(*E)) + ExpressionToClass.erase(OldE); + } + ValueToExpression[I] = E; } // Process the fact that Edge (from, to) is reachable, including marking @@ -2651,6 +2637,30 @@ void NewGVN::verifyIterationSettled(Function &F) { #endif } +// Verify that for each store expression in the expression to class mapping, +// only the latest appears, and multiple ones do not appear. +// Because loads do not use the stored value when doing equality with stores, +// if we don't erase the old store expressions from the table, a load can find +// a no-longer valid StoreExpression. +void NewGVN::verifyStoreExpressions() const { +#ifndef NDEBUG + DenseSet> StoreExpressionSet; + for (const auto &KV : ExpressionToClass) { + if (auto *SE = dyn_cast(KV.first)) { + // Make sure a version that will conflict with loads is not already there + auto Res = + StoreExpressionSet.insert({SE->getOperand(0), SE->getMemoryLeader()}); + assert(Res.second && + "Stored expression conflict exists in expression table"); + auto *ValueExpr = ValueToExpression.lookup(SE->getStoreInst()); + assert(ValueExpr && ValueExpr->equals(*SE) && + "StoreExpression in ExpressionToClass is not latest " + "StoreExpression for value"); + } + } +#endif +} + // This is the main value numbering loop, it iterates over the initial touched // instruction set, propagating value numbers, marking things touched, etc, // until the set of touched instructions is completely empty. @@ -2668,8 +2678,7 @@ void NewGVN::iterateTouchedInstructions() { // TODO: As we hit a new block, we should push and pop equalities into a // table lookupOperandLeader can use, to catch things PredicateInfo // might miss, like edge-only equivalences. - for (int InstrNum = TouchedInstructions.find_first(); InstrNum != -1; - InstrNum = TouchedInstructions.find_next(InstrNum)) { + for (unsigned InstrNum : TouchedInstructions.set_bits()) { // This instruction was found to be dead. We don't bother looking // at it again. @@ -2776,6 +2785,7 @@ bool NewGVN::runGVN() { iterateTouchedInstructions(); verifyMemoryCongruency(); verifyIterationSettled(F); + verifyStoreExpressions(); Changed |= eliminateInstructions(F); diff --git a/contrib/llvm/lib/Transforms/Scalar/Reassociate.cpp b/contrib/llvm/lib/Transforms/Scalar/Reassociate.cpp index ef29d4141600..53320bff0883 100644 --- a/contrib/llvm/lib/Transforms/Scalar/Reassociate.cpp +++ b/contrib/llvm/lib/Transforms/Scalar/Reassociate.cpp @@ -1922,7 +1922,7 @@ Instruction *ReassociatePass::canonicalizeNegConstExpr(Instruction *I) { // User must be a binary operator with one or more uses. Instruction *User = I->user_back(); - if (!isa(User) || !User->hasNUsesOrMore(1)) + if (!isa(User) || User->use_empty()) return nullptr; unsigned UserOpcode = User->getOpcode(); diff --git a/contrib/llvm/lib/Transforms/Scalar/SimpleLoopUnswitch.cpp b/contrib/llvm/lib/Transforms/Scalar/SimpleLoopUnswitch.cpp index 4f608c97147d..b32a61a7e8f8 100644 --- a/contrib/llvm/lib/Transforms/Scalar/SimpleLoopUnswitch.cpp +++ b/contrib/llvm/lib/Transforms/Scalar/SimpleLoopUnswitch.cpp @@ -1,4 +1,4 @@ -//===-- SimpleLoopUnswitch.cpp - Hoist loop-invariant control flow --------===// +//===- SimpleLoopUnswitch.cpp - Hoist loop-invariant control flow ---------===// // // The LLVM Compiler Infrastructure // @@ -7,25 +7,41 @@ // //===----------------------------------------------------------------------===// -#include "llvm/Transforms/Scalar/SimpleLoopUnswitch.h" -#include "llvm/ADT/STLExtras.h" +#include "llvm/ADT/DenseMap.h" +#include "llvm/ADT/Sequence.h" +#include "llvm/ADT/SetVector.h" #include "llvm/ADT/SmallPtrSet.h" +#include "llvm/ADT/SmallVector.h" #include "llvm/ADT/Statistic.h" +#include "llvm/ADT/STLExtras.h" +#include "llvm/ADT/Twine.h" #include "llvm/Analysis/AssumptionCache.h" +#include "llvm/Analysis/LoopAnalysisManager.h" #include "llvm/Analysis/LoopInfo.h" #include "llvm/Analysis/LoopPass.h" +#include "llvm/IR/BasicBlock.h" +#include "llvm/IR/Constant.h" #include "llvm/IR/Constants.h" #include "llvm/IR/Dominators.h" #include "llvm/IR/Function.h" +#include "llvm/IR/InstrTypes.h" +#include "llvm/IR/Instruction.h" #include "llvm/IR/Instructions.h" -#include "llvm/Support/CommandLine.h" +#include "llvm/IR/Use.h" +#include "llvm/IR/Value.h" +#include "llvm/Pass.h" +#include "llvm/Support/Casting.h" #include "llvm/Support/Debug.h" +#include "llvm/Support/ErrorHandling.h" +#include "llvm/Support/GenericDomTree.h" #include "llvm/Support/raw_ostream.h" #include "llvm/Transforms/Utils/BasicBlockUtils.h" -#include "llvm/Transforms/Utils/Cloning.h" -#include "llvm/Transforms/Utils/Local.h" -#include "llvm/Transforms/Scalar/LoopPassManager.h" #include "llvm/Transforms/Utils/LoopUtils.h" +#include "llvm/Transforms/Scalar/SimpleLoopUnswitch.h" +#include +#include +#include +#include #define DEBUG_TYPE "simple-loop-unswitch" @@ -174,7 +190,7 @@ static void rewritePHINodesForUnswitchedExitBlock(BasicBlock &UnswitchedBB, // When the loop exit is directly unswitched we just need to update the // incoming basic block. We loop to handle weird cases with repeated // incoming blocks, but expect to typically only have one operand here. - for (auto i : llvm::seq(0, PN->getNumOperands())) { + for (auto i : seq(0, PN->getNumOperands())) { assert(PN->getIncomingBlock(i) == &OldExitingBB && "Found incoming block different from unique predecessor!"); PN->setIncomingBlock(i, &OldPH); @@ -688,9 +704,11 @@ PreservedAnalyses SimpleLoopUnswitchPass::run(Loop &L, LoopAnalysisManager &AM, } namespace { + class SimpleLoopUnswitchLegacyPass : public LoopPass { public: static char ID; // Pass ID, replacement for typeid + explicit SimpleLoopUnswitchLegacyPass() : LoopPass(ID) { initializeSimpleLoopUnswitchLegacyPassPass( *PassRegistry::getPassRegistry()); @@ -703,7 +721,8 @@ class SimpleLoopUnswitchLegacyPass : public LoopPass { getLoopAnalysisUsage(AU); } }; -} // namespace + +} // end anonymous namespace bool SimpleLoopUnswitchLegacyPass::runOnLoop(Loop *L, LPPassManager &LPM) { if (skipLoop(L)) diff --git a/contrib/llvm/tools/clang/include/clang/AST/Decl.h b/contrib/llvm/tools/clang/include/clang/AST/Decl.h index facef8e55f7a..4f8042ac9291 100644 --- a/contrib/llvm/tools/clang/include/clang/AST/Decl.h +++ b/contrib/llvm/tools/clang/include/clang/AST/Decl.h @@ -301,16 +301,6 @@ class NamedDecl : public Decl { using Decl::isModulePrivate; using Decl::setModulePrivate; - /// \brief Determine whether this declaration is hidden from name lookup. - bool isHidden() const { return Hidden; } - - /// \brief Set whether this declaration is hidden from name lookup. - void setHidden(bool Hide) { - assert((!Hide || isFromASTFile() || hasLocalOwningModuleStorage()) && - "declaration with no owning module can't be hidden"); - Hidden = Hide; - } - /// \brief Determine whether this declaration is a C++ class member. bool isCXXClassMember() const { const DeclContext *DC = getDeclContext(); diff --git a/contrib/llvm/tools/clang/include/clang/AST/DeclBase.h b/contrib/llvm/tools/clang/include/clang/AST/DeclBase.h index 15ac11a5a777..08879b36cce5 100644 --- a/contrib/llvm/tools/clang/include/clang/AST/DeclBase.h +++ b/contrib/llvm/tools/clang/include/clang/AST/DeclBase.h @@ -706,6 +706,20 @@ class LLVM_ALIGNAS(/*alignof(uint64_t)*/ 8) Decl { reinterpret_cast(this)[-1] = M; } + Module *getOwningModule() const { + return isFromASTFile() ? getImportedOwningModule() : getLocalOwningModule(); + } + + /// \brief Determine whether this declaration is hidden from name lookup. + bool isHidden() const { return Hidden; } + + /// \brief Set whether this declaration is hidden from name lookup. + void setHidden(bool Hide) { + assert((!Hide || isFromASTFile() || hasLocalOwningModuleStorage()) && + "declaration with no owning module can't be hidden"); + Hidden = Hide; + } + unsigned getIdentifierNamespace() const { return IdentifierNamespace; } diff --git a/contrib/llvm/tools/clang/include/clang/Basic/LangOptions.h b/contrib/llvm/tools/clang/include/clang/Basic/LangOptions.h index 20a0e5845602..ceaedf58574f 100644 --- a/contrib/llvm/tools/clang/include/clang/Basic/LangOptions.h +++ b/contrib/llvm/tools/clang/include/clang/Basic/LangOptions.h @@ -166,6 +166,11 @@ class LangOptions : public LangOptionsBase { return getCompilingModule() != CMK_None; } + /// Do we need to track the owning module for a local declaration? + bool trackLocalOwningModule() const { + return ModulesLocalVisibility; + } + bool isSignedOverflowDefined() const { return getSignedOverflowBehavior() == SOB_Defined; } diff --git a/contrib/llvm/tools/clang/include/clang/Basic/SourceManager.h b/contrib/llvm/tools/clang/include/clang/Basic/SourceManager.h index c8fe2ab90c29..6960ea690b91 100644 --- a/contrib/llvm/tools/clang/include/clang/Basic/SourceManager.h +++ b/contrib/llvm/tools/clang/include/clang/Basic/SourceManager.h @@ -865,7 +865,7 @@ class SourceManager : public RefCountedBase { const FileEntry *NewFile); /// \brief Returns true if the file contents have been overridden. - bool isFileOverridden(const FileEntry *File) { + bool isFileOverridden(const FileEntry *File) const { if (OverriddenFilesInfo) { if (OverriddenFilesInfo->OverriddenFilesWithBuffer.count(File)) return true; diff --git a/contrib/llvm/tools/clang/include/clang/Frontend/ASTUnit.h b/contrib/llvm/tools/clang/include/clang/Frontend/ASTUnit.h index 2a8df1b7b9ae..46395cf6e861 100644 --- a/contrib/llvm/tools/clang/include/clang/Frontend/ASTUnit.h +++ b/contrib/llvm/tools/clang/include/clang/Frontend/ASTUnit.h @@ -419,7 +419,6 @@ class ASTUnit : public ModuleLoader { explicit ASTUnit(bool MainFileIsAST); - void CleanTemporaryFiles(); bool Parse(std::shared_ptr PCHContainerOps, std::unique_ptr OverrideMainBuffer); @@ -530,11 +529,6 @@ class ASTUnit : public ModuleLoader { ASTMutationListener *getASTMutationListener(); ASTDeserializationListener *getDeserializationListener(); - /// \brief Add a temporary file that the ASTUnit depends on. - /// - /// This file will be erased when the ASTUnit is destroyed. - void addTemporaryFile(StringRef TempFile); - bool getOnlyLocalDecls() const { return OnlyLocalDecls; } bool getOwnsRemappedFileBuffers() const { return OwnsRemappedFileBuffers; } diff --git a/contrib/llvm/tools/clang/include/clang/Sema/Sema.h b/contrib/llvm/tools/clang/include/clang/Sema/Sema.h index e910be14f969..ba2da92c5be1 100644 --- a/contrib/llvm/tools/clang/include/clang/Sema/Sema.h +++ b/contrib/llvm/tools/clang/include/clang/Sema/Sema.h @@ -1467,11 +1467,9 @@ class Sema { VisibleModuleSet VisibleModules; - Module *CachedFakeTopLevelModule; - public: /// \brief Get the module owning an entity. - Module *getOwningModule(Decl *Entity); + Module *getOwningModule(Decl *Entity) { return Entity->getOwningModule(); } /// \brief Make a merged definition of an existing hidden definition \p ND /// visible at the specified location. diff --git a/contrib/llvm/tools/clang/lib/AST/ASTDumper.cpp b/contrib/llvm/tools/clang/lib/AST/ASTDumper.cpp index ef491ab06f8c..d89be0d9e6fa 100644 --- a/contrib/llvm/tools/clang/lib/AST/ASTDumper.cpp +++ b/contrib/llvm/tools/clang/lib/AST/ASTDumper.cpp @@ -1038,10 +1038,10 @@ void ASTDumper::dumpDecl(const Decl *D) { dumpSourceRange(D->getSourceRange()); OS << ' '; dumpLocation(D->getLocation()); - if (Module *M = D->getImportedOwningModule()) + if (D->isFromASTFile()) + OS << " imported"; + if (Module *M = D->getOwningModule()) OS << " in " << M->getFullModuleName(); - else if (Module *M = D->getLocalOwningModule()) - OS << " in (local) " << M->getFullModuleName(); if (auto *ND = dyn_cast(D)) for (Module *M : D->getASTContext().getModulesWithMergedDefinition( const_cast(ND))) diff --git a/contrib/llvm/tools/clang/lib/AST/Decl.cpp b/contrib/llvm/tools/clang/lib/AST/Decl.cpp index 0f2558e24ba5..a1342f477b68 100644 --- a/contrib/llvm/tools/clang/lib/AST/Decl.cpp +++ b/contrib/llvm/tools/clang/lib/AST/Decl.cpp @@ -47,9 +47,7 @@ bool Decl::isOutOfLine() const { TranslationUnitDecl::TranslationUnitDecl(ASTContext &ctx) : Decl(TranslationUnit, nullptr, SourceLocation()), - DeclContext(TranslationUnit), Ctx(ctx), AnonymousNamespace(nullptr) { - Hidden = Ctx.getLangOpts().ModulesLocalVisibility; -} + DeclContext(TranslationUnit), Ctx(ctx), AnonymousNamespace(nullptr) {} //===----------------------------------------------------------------------===// // NamedDecl Implementation diff --git a/contrib/llvm/tools/clang/lib/AST/DeclBase.cpp b/contrib/llvm/tools/clang/lib/AST/DeclBase.cpp index 5c2c9cbd0180..f6f81692611d 100644 --- a/contrib/llvm/tools/clang/lib/AST/DeclBase.cpp +++ b/contrib/llvm/tools/clang/lib/AST/DeclBase.cpp @@ -75,7 +75,7 @@ void *Decl::operator new(std::size_t Size, const ASTContext &Ctx, assert(!Parent || &Parent->getParentASTContext() == &Ctx); // With local visibility enabled, we track the owning module even for local // declarations. - if (Ctx.getLangOpts().ModulesLocalVisibility) { + if (Ctx.getLangOpts().trackLocalOwningModule()) { // Ensure required alignment of the resulting object by adding extra // padding at the start if required. size_t ExtraAlign = @@ -83,7 +83,9 @@ void *Decl::operator new(std::size_t Size, const ASTContext &Ctx, char *Buffer = reinterpret_cast( ::operator new(ExtraAlign + sizeof(Module *) + Size + Extra, Ctx)); Buffer += ExtraAlign; - return new (Buffer) Module*(nullptr) + 1; + auto *ParentModule = + Parent ? cast(Parent)->getOwningModule() : nullptr; + return new (Buffer) Module*(ParentModule) + 1; } return ::operator new(Size + Extra, Ctx); } @@ -94,7 +96,7 @@ Module *Decl::getOwningModuleSlow() const { } bool Decl::hasLocalOwningModuleStorage() const { - return getASTContext().getLangOpts().ModulesLocalVisibility; + return getASTContext().getLangOpts().trackLocalOwningModule(); } const char *Decl::getDeclKindName() const { @@ -273,6 +275,8 @@ void Decl::setLexicalDeclContext(DeclContext *DC) { getMultipleDC()->LexicalDC = DC; } Hidden = cast(DC)->Hidden; + if (Hidden && !isFromASTFile() && hasLocalOwningModuleStorage()) + setLocalOwningModule(cast(DC)->getOwningModule()); } void Decl::setDeclContextsImpl(DeclContext *SemaDC, DeclContext *LexicalDC, diff --git a/contrib/llvm/tools/clang/lib/AST/ODRHash.cpp b/contrib/llvm/tools/clang/lib/AST/ODRHash.cpp index f4d314a6dd0d..24371db64d07 100644 --- a/contrib/llvm/tools/clang/lib/AST/ODRHash.cpp +++ b/contrib/llvm/tools/clang/lib/AST/ODRHash.cpp @@ -81,7 +81,35 @@ void ODRHash::AddDeclarationName(DeclarationName Name) { } } -void ODRHash::AddNestedNameSpecifier(const NestedNameSpecifier *NNS) {} +void ODRHash::AddNestedNameSpecifier(const NestedNameSpecifier *NNS) { + assert(NNS && "Expecting non-null pointer."); + const auto *Prefix = NNS->getPrefix(); + AddBoolean(Prefix); + if (Prefix) { + AddNestedNameSpecifier(Prefix); + } + auto Kind = NNS->getKind(); + ID.AddInteger(Kind); + switch (Kind) { + case NestedNameSpecifier::Identifier: + AddIdentifierInfo(NNS->getAsIdentifier()); + break; + case NestedNameSpecifier::Namespace: + AddDecl(NNS->getAsNamespace()); + break; + case NestedNameSpecifier::NamespaceAlias: + AddDecl(NNS->getAsNamespaceAlias()); + break; + case NestedNameSpecifier::TypeSpec: + case NestedNameSpecifier::TypeSpecWithTemplate: + AddType(NNS->getAsType()); + break; + case NestedNameSpecifier::Global: + case NestedNameSpecifier::Super: + break; + } +} + void ODRHash::AddTemplateName(TemplateName Name) {} void ODRHash::AddTemplateArgument(TemplateArgument TA) {} void ODRHash::AddTemplateParameterList(const TemplateParameterList *TPL) {} @@ -335,6 +363,20 @@ class ODRTypeVisitor : public TypeVisitor { Hash.AddQualType(T); } + void AddNestedNameSpecifier(const NestedNameSpecifier *NNS) { + Hash.AddBoolean(NNS); + if (NNS) { + Hash.AddNestedNameSpecifier(NNS); + } + } + + void AddIdentifierInfo(const IdentifierInfo *II) { + Hash.AddBoolean(II); + if (II) { + Hash.AddIdentifierInfo(II); + } + } + void VisitQualifiers(Qualifiers Quals) { ID.AddInteger(Quals.getAsOpaqueValue()); } @@ -414,6 +456,42 @@ class ODRTypeVisitor : public TypeVisitor { AddQualType(T->getDecl()->getUnderlyingType().getCanonicalType()); VisitType(T); } + + void VisitTagType(const TagType *T) { + AddDecl(T->getDecl()); + VisitType(T); + } + + void VisitRecordType(const RecordType *T) { VisitTagType(T); } + void VisitEnumType(const EnumType *T) { VisitTagType(T); } + + void VisitTypeWithKeyword(const TypeWithKeyword *T) { + ID.AddInteger(T->getKeyword()); + VisitType(T); + }; + + void VisitDependentNameType(const DependentNameType *T) { + AddNestedNameSpecifier(T->getQualifier()); + AddIdentifierInfo(T->getIdentifier()); + VisitTypeWithKeyword(T); + } + + void VisitDependentTemplateSpecializationType( + const DependentTemplateSpecializationType *T) { + AddIdentifierInfo(T->getIdentifier()); + AddNestedNameSpecifier(T->getQualifier()); + ID.AddInteger(T->getNumArgs()); + for (const auto &TA : T->template_arguments()) { + Hash.AddTemplateArgument(TA); + } + VisitTypeWithKeyword(T); + } + + void VisitElaboratedType(const ElaboratedType *T) { + AddNestedNameSpecifier(T->getQualifier()); + AddQualType(T->getNamedType()); + VisitTypeWithKeyword(T); + } }; void ODRHash::AddType(const Type *T) { diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGDebugInfo.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGDebugInfo.cpp index 9d77c61bd52c..bf178dd7fd80 100644 --- a/contrib/llvm/tools/clang/lib/CodeGen/CGDebugInfo.cpp +++ b/contrib/llvm/tools/clang/lib/CodeGen/CGDebugInfo.cpp @@ -2860,7 +2860,7 @@ void CGDebugInfo::collectFunctionDeclProps(GlobalDecl GD, llvm::DIFile *Unit, if (DebugKind >= codegenoptions::LimitedDebugInfo) { if (const NamespaceDecl *NSDecl = - dyn_cast_or_null(FD->getLexicalDeclContext())) + dyn_cast_or_null(FD->getDeclContext())) FDContext = getOrCreateNamespace(NSDecl); else if (const RecordDecl *RDecl = dyn_cast_or_null(FD->getDeclContext())) { diff --git a/contrib/llvm/tools/clang/lib/Driver/ToolChains/MSVC.cpp b/contrib/llvm/tools/clang/lib/Driver/ToolChains/MSVC.cpp index a09304814ca6..6f5f54165b3b 100644 --- a/contrib/llvm/tools/clang/lib/Driver/ToolChains/MSVC.cpp +++ b/contrib/llvm/tools/clang/lib/Driver/ToolChains/MSVC.cpp @@ -125,8 +125,15 @@ static bool findVCToolChainViaEnvironment(std::string &Path, continue; // whatever/VC/bin --> old toolchain, VC dir is toolchain dir. - if (llvm::sys::path::filename(PathEntry) == "bin") { - llvm::StringRef ParentPath = llvm::sys::path::parent_path(PathEntry); + llvm::StringRef TestPath = PathEntry; + bool IsBin = llvm::sys::path::filename(TestPath).equals_lower("bin"); + if (!IsBin) { + // Strip any architecture subdir like "amd64". + TestPath = llvm::sys::path::parent_path(TestPath); + IsBin = llvm::sys::path::filename(TestPath).equals_lower("bin"); + } + if (IsBin) { + llvm::StringRef ParentPath = llvm::sys::path::parent_path(TestPath); if (llvm::sys::path::filename(ParentPath) == "VC") { Path = ParentPath; IsVS2017OrNewer = false; diff --git a/contrib/llvm/tools/clang/lib/Frontend/ASTUnit.cpp b/contrib/llvm/tools/clang/lib/Frontend/ASTUnit.cpp index 32ee9d3e9961..6ee211c2de67 100644 --- a/contrib/llvm/tools/clang/lib/Frontend/ASTUnit.cpp +++ b/contrib/llvm/tools/clang/lib/Frontend/ASTUnit.cpp @@ -84,13 +84,6 @@ namespace { /// \brief The file in which the precompiled preamble is stored. std::string PreambleFile; - /// \brief Temporary files that should be removed when the ASTUnit is - /// destroyed. - SmallVector TemporaryFiles; - - /// \brief Erase temporary files. - void CleanTemporaryFiles(); - /// \brief Erase the preamble file. void CleanPreambleFile(); @@ -163,12 +156,6 @@ static const std::string &getPreambleFile(const ASTUnit *AU) { return getOnDiskData(AU).PreambleFile; } -void OnDiskData::CleanTemporaryFiles() { - for (StringRef File : TemporaryFiles) - llvm::sys::fs::remove(File); - TemporaryFiles.clear(); -} - void OnDiskData::CleanPreambleFile() { if (!PreambleFile.empty()) { llvm::sys::fs::remove(PreambleFile); @@ -177,7 +164,6 @@ void OnDiskData::CleanPreambleFile() { } void OnDiskData::Cleanup() { - CleanTemporaryFiles(); CleanPreambleFile(); } @@ -194,14 +180,6 @@ void ASTUnit::clearFileLevelDecls() { llvm::DeleteContainerSeconds(FileDecls); } -void ASTUnit::CleanTemporaryFiles() { - getOnDiskData(this).CleanTemporaryFiles(); -} - -void ASTUnit::addTemporaryFile(StringRef TempFile) { - getOnDiskData(this).TemporaryFiles.push_back(TempFile); -} - /// \brief After failing to build a precompiled preamble (due to /// errors in the source that occurs in the preamble), the number of /// reparses during which we'll skip even trying to precompile the @@ -1100,7 +1078,6 @@ bool ASTUnit::Parse(std::shared_ptr PCHContainerOps, // Clear out old caches and data. TopLevelDecls.clear(); clearFileLevelDecls(); - CleanTemporaryFiles(); if (!OverrideMainBuffer) { checkAndRemoveNonDriverDiags(StoredDiagnostics); diff --git a/contrib/llvm/tools/clang/lib/Headers/xmmintrin.h b/contrib/llvm/tools/clang/lib/Headers/xmmintrin.h index 9773acb840a5..5a1c572ce614 100644 --- a/contrib/llvm/tools/clang/lib/Headers/xmmintrin.h +++ b/contrib/llvm/tools/clang/lib/Headers/xmmintrin.h @@ -2133,7 +2133,7 @@ void _mm_sfence(void); /// \headerfile /// /// \code -/// void _mm_extract_pi(__m64 a, int n); +/// int _mm_extract_pi16(__m64 a, int n); /// \endcode /// /// This intrinsic corresponds to the VPEXTRW / PEXTRW instruction. @@ -2157,7 +2157,7 @@ void _mm_sfence(void); /// \headerfile /// /// \code -/// void _mm_insert_pi(__m64 a, int d, int n); +/// __m64 _mm_insert_pi16(__m64 a, int d, int n); /// \endcode /// /// This intrinsic corresponds to the VPINSRW / PINSRW instruction. @@ -2680,8 +2680,7 @@ _mm_movelh_ps(__m128 __a, __m128 __b) /// /// \headerfile /// -/// This intrinsic corresponds to the CVTPI2PS + \c COMPOSITE -/// instruction. +/// This intrinsic corresponds to the CVTPI2PS + COMPOSITE instruction. /// /// \param __a /// A 64-bit vector of [4 x i16]. The elements of the destination are copied @@ -2711,8 +2710,7 @@ _mm_cvtpi16_ps(__m64 __a) /// /// \headerfile /// -/// This intrinsic corresponds to the CVTPI2PS + \c COMPOSITE -/// instruction. +/// This intrinsic corresponds to the CVTPI2PS + COMPOSITE instruction. /// /// \param __a /// A 64-bit vector of 16-bit unsigned integer values. The elements of the @@ -2741,8 +2739,7 @@ _mm_cvtpu16_ps(__m64 __a) /// /// \headerfile /// -/// This intrinsic corresponds to the CVTPI2PS + \c COMPOSITE -/// instruction. +/// This intrinsic corresponds to the CVTPI2PS + COMPOSITE instruction. /// /// \param __a /// A 64-bit vector of [8 x i8]. The elements of the destination are copied @@ -2766,8 +2763,7 @@ _mm_cvtpi8_ps(__m64 __a) /// /// \headerfile /// -/// This intrinsic corresponds to the CVTPI2PS + \c COMPOSITE -/// instruction. +/// This intrinsic corresponds to the CVTPI2PS + COMPOSITE instruction. /// /// \param __a /// A 64-bit vector of unsigned 8-bit integer values. The elements of the @@ -2791,8 +2787,7 @@ _mm_cvtpu8_ps(__m64 __a) /// /// \headerfile /// -/// This intrinsic corresponds to the CVTPI2PS + \c COMPOSITE -/// instruction. +/// This intrinsic corresponds to the CVTPI2PS + COMPOSITE instruction. /// /// \param __a /// A 64-bit vector of [2 x i32]. The lower elements of the destination are @@ -2826,8 +2821,7 @@ _mm_cvtpi32x2_ps(__m64 __a, __m64 __b) /// /// \headerfile /// -/// This intrinsic corresponds to the CVTPS2PI + \c COMPOSITE -/// instruction. +/// This intrinsic corresponds to the CVTPS2PI + COMPOSITE instruction. /// /// \param __a /// A 128-bit floating-point vector of [4 x float]. @@ -2857,8 +2851,7 @@ _mm_cvtps_pi16(__m128 __a) /// /// \headerfile /// -/// This intrinsic corresponds to the CVTPS2PI + \c COMPOSITE -/// instruction. +/// This intrinsic corresponds to the CVTPS2PI + COMPOSITE instruction. /// /// \param __a /// 128-bit floating-point vector of [4 x float]. diff --git a/contrib/llvm/tools/clang/lib/Lex/Lexer.cpp b/contrib/llvm/tools/clang/lib/Lex/Lexer.cpp index 3d6fe91115a9..92942fd09a0c 100644 --- a/contrib/llvm/tools/clang/lib/Lex/Lexer.cpp +++ b/contrib/llvm/tools/clang/lib/Lex/Lexer.cpp @@ -43,6 +43,8 @@ using namespace clang; /// isObjCAtKeyword - Return true if we have an ObjC keyword identifier. bool Token::isObjCAtKeyword(tok::ObjCKeywordKind objcKey) const { + if (isAnnotation()) + return false; if (IdentifierInfo *II = getIdentifierInfo()) return II->getObjCKeywordID() == objcKey; return false; @@ -50,6 +52,8 @@ bool Token::isObjCAtKeyword(tok::ObjCKeywordKind objcKey) const { /// getObjCKeywordID - Return the ObjC keyword kind. tok::ObjCKeywordKind Token::getObjCKeywordID() const { + if (isAnnotation()) + return tok::objc_not_keyword; IdentifierInfo *specId = getIdentifierInfo(); return specId ? specId->getObjCKeywordID() : tok::objc_not_keyword; } diff --git a/contrib/llvm/tools/clang/lib/Sema/Sema.cpp b/contrib/llvm/tools/clang/lib/Sema/Sema.cpp index ca1d27e9505f..e7b0914641ff 100644 --- a/contrib/llvm/tools/clang/lib/Sema/Sema.cpp +++ b/contrib/llvm/tools/clang/lib/Sema/Sema.cpp @@ -93,11 +93,10 @@ Sema::Sema(Preprocessor &pp, ASTContext &ctxt, ASTConsumer &consumer, ValueWithBytesObjCTypeMethod(nullptr), NSArrayDecl(nullptr), ArrayWithObjectsMethod(nullptr), NSDictionaryDecl(nullptr), DictionaryWithObjectsMethod(nullptr), GlobalNewDeleteDeclared(false), - TUKind(TUKind), NumSFINAEErrors(0), CachedFakeTopLevelModule(nullptr), - AccessCheckingSFINAE(false), InNonInstantiationSFINAEContext(false), - NonInstantiationEntries(0), ArgumentPackSubstitutionIndex(-1), - CurrentInstantiationScope(nullptr), DisableTypoCorrection(false), - TyposCorrected(0), AnalysisWarnings(*this), + TUKind(TUKind), NumSFINAEErrors(0), AccessCheckingSFINAE(false), + InNonInstantiationSFINAEContext(false), NonInstantiationEntries(0), + ArgumentPackSubstitutionIndex(-1), CurrentInstantiationScope(nullptr), + DisableTypoCorrection(false), TyposCorrected(0), AnalysisWarnings(*this), ThreadSafetyDeclCache(nullptr), VarDataSharingAttributesStack(nullptr), CurScope(nullptr), Ident_super(nullptr), Ident___float128(nullptr) { TUScope = nullptr; diff --git a/contrib/llvm/tools/clang/lib/Sema/SemaDecl.cpp b/contrib/llvm/tools/clang/lib/Sema/SemaDecl.cpp index 2e069a9defaa..dca51b0e8c8e 100644 --- a/contrib/llvm/tools/clang/lib/Sema/SemaDecl.cpp +++ b/contrib/llvm/tools/clang/lib/Sema/SemaDecl.cpp @@ -16047,6 +16047,14 @@ void Sema::ActOnModuleBegin(SourceLocation DirectiveLoc, Module *Mod) { ModuleScopes.back().OuterVisibleModules = std::move(VisibleModules); VisibleModules.setVisible(Mod, DirectiveLoc); + + // The enclosing context is now part of this module. + // FIXME: Consider creating a child DeclContext to hold the entities + // lexically within the module. + if (getLangOpts().trackLocalOwningModule()) { + cast(CurContext)->setHidden(true); + cast(CurContext)->setLocalOwningModule(Mod); + } } void Sema::ActOnModuleEnd(SourceLocation EomLoc, Module *Mod) { @@ -16075,6 +16083,13 @@ void Sema::ActOnModuleEnd(SourceLocation EomLoc, Module *Mod) { DirectiveLoc = EomLoc; } BuildModuleInclude(DirectiveLoc, Mod); + + // Any further declarations are in whatever module we returned to. + if (getLangOpts().trackLocalOwningModule()) { + cast(CurContext)->setLocalOwningModule(getCurrentModule()); + if (!getCurrentModule()) + cast(CurContext)->setHidden(false); + } } void Sema::createImplicitModuleImportForErrorRecovery(SourceLocation Loc, diff --git a/contrib/llvm/tools/clang/lib/Sema/SemaLookup.cpp b/contrib/llvm/tools/clang/lib/Sema/SemaLookup.cpp index c5b579a4b2e9..04350831c681 100644 --- a/contrib/llvm/tools/clang/lib/Sema/SemaLookup.cpp +++ b/contrib/llvm/tools/clang/lib/Sema/SemaLookup.cpp @@ -1326,62 +1326,6 @@ bool Sema::CppLookupName(LookupResult &R, Scope *S) { return !R.empty(); } -Module *Sema::getOwningModule(Decl *Entity) { - // If it's imported, grab its owning module. - Module *M = Entity->getImportedOwningModule(); - if (M || !isa(Entity) || !cast(Entity)->isHidden()) - return M; - assert(!Entity->isFromASTFile() && - "hidden entity from AST file has no owning module"); - - if (!getLangOpts().ModulesLocalVisibility) { - // If we're not tracking visibility locally, the only way a declaration - // can be hidden and local is if it's hidden because it's parent is (for - // instance, maybe this is a lazily-declared special member of an imported - // class). - auto *Parent = cast(Entity->getDeclContext()); - assert(Parent->isHidden() && "unexpectedly hidden decl"); - return getOwningModule(Parent); - } - - // It's local and hidden; grab or compute its owning module. - M = Entity->getLocalOwningModule(); - if (M) - return M; - - if (auto *Containing = - PP.getModuleContainingLocation(Entity->getLocation())) { - M = Containing; - } else if (Entity->isInvalidDecl() || Entity->getLocation().isInvalid()) { - // Don't bother tracking visibility for invalid declarations with broken - // locations. - cast(Entity)->setHidden(false); - } else { - // We need to assign a module to an entity that exists outside of any - // module, so that we can hide it from modules that we textually enter. - // Invent a fake module for all such entities. - if (!CachedFakeTopLevelModule) { - CachedFakeTopLevelModule = - PP.getHeaderSearchInfo().getModuleMap().findOrCreateModule( - "", nullptr, false, false).first; - - auto &SrcMgr = PP.getSourceManager(); - SourceLocation StartLoc = - SrcMgr.getLocForStartOfFile(SrcMgr.getMainFileID()); - auto &TopLevel = ModuleScopes.empty() - ? VisibleModules - : ModuleScopes[0].OuterVisibleModules; - TopLevel.setVisible(CachedFakeTopLevelModule, StartLoc); - } - - M = CachedFakeTopLevelModule; - } - - if (M) - Entity->setLocalOwningModule(M); - return M; -} - void Sema::makeMergedDefinitionVisible(NamedDecl *ND) { if (auto *M = getCurrentModule()) Context.mergeDefinitionIntoModule(ND, M); @@ -1520,7 +1464,6 @@ bool LookupResult::isVisibleSlow(Sema &SemaRef, NamedDecl *D) { if (SemaRef.getLangOpts().ModulesLocalVisibility) { DeclModule = SemaRef.getOwningModule(D); if (!DeclModule) { - // getOwningModule() may have decided the declaration should not be hidden. assert(!D->isHidden() && "hidden decl not from a module"); return true; } diff --git a/contrib/llvm/tools/clang/lib/Serialization/ASTReader.cpp b/contrib/llvm/tools/clang/lib/Serialization/ASTReader.cpp index ef8481488302..5cabd0e6740d 100644 --- a/contrib/llvm/tools/clang/lib/Serialization/ASTReader.cpp +++ b/contrib/llvm/tools/clang/lib/Serialization/ASTReader.cpp @@ -9348,12 +9348,6 @@ void ASTReader::diagnoseOdrViolations() { return Hash.CalculateHash(); }; - auto ComputeDeclNameODRHash = [&Hash](const DeclarationName Name) { - Hash.clear(); - Hash.AddDeclarationName(Name); - return Hash.CalculateHash(); - }; - auto ComputeQualTypeODRHash = [&Hash](QualType Ty) { Hash.clear(); Hash.AddQualType(Ty); @@ -9446,11 +9440,8 @@ void ASTReader::diagnoseOdrViolations() { QualType FirstType = FirstField->getType(); QualType SecondType = SecondField->getType(); - const TypedefType *FirstTypedef = dyn_cast(FirstType); - const TypedefType *SecondTypedef = dyn_cast(SecondType); - - if ((FirstTypedef && !SecondTypedef) || - (!FirstTypedef && SecondTypedef)) { + if (ComputeQualTypeODRHash(FirstType) != + ComputeQualTypeODRHash(SecondType)) { ODRDiagError(FirstField->getLocation(), FirstField->getSourceRange(), FieldTypeName) << FirstII << FirstType; @@ -9462,24 +9453,6 @@ void ASTReader::diagnoseOdrViolations() { break; } - if (FirstTypedef && SecondTypedef) { - unsigned FirstHash = ComputeDeclNameODRHash( - FirstTypedef->getDecl()->getDeclName()); - unsigned SecondHash = ComputeDeclNameODRHash( - SecondTypedef->getDecl()->getDeclName()); - if (FirstHash != SecondHash) { - ODRDiagError(FirstField->getLocation(), - FirstField->getSourceRange(), FieldTypeName) - << FirstII << FirstType; - ODRDiagNote(SecondField->getLocation(), - SecondField->getSourceRange(), FieldTypeName) - << SecondII << SecondType; - - Diagnosed = true; - break; - } - } - const bool IsFirstBitField = FirstField->isBitField(); const bool IsSecondBitField = SecondField->isBitField(); if (IsFirstBitField != IsSecondBitField) { diff --git a/contrib/llvm/tools/lld/ELF/Relocations.cpp b/contrib/llvm/tools/lld/ELF/Relocations.cpp index ea7477e03842..c505a14f3c64 100644 --- a/contrib/llvm/tools/lld/ELF/Relocations.cpp +++ b/contrib/llvm/tools/lld/ELF/Relocations.cpp @@ -963,9 +963,8 @@ template void elf::scanRelocations(InputSectionBase &S) { // in the Sections vector, and recalculate the InputSection output section // offsets. // This may invalidate any output section offsets stored outside of InputSection -template -void ThunkCreator::mergeThunks(OutputSection *OS, - std::vector &Thunks) { +void ThunkCreator::mergeThunks(OutputSection *OS, + std::vector &Thunks) { // Order Thunks in ascending OutSecOff auto ThunkCmp = [](const ThunkSection *A, const ThunkSection *B) { return A->OutSecOff < B->OutSecOff; @@ -993,9 +992,8 @@ void ThunkCreator::mergeThunks(OutputSection *OS, OS->assignOffsets(); } -template -ThunkSection *ThunkCreator::getOSThunkSec(ThunkSection *&TS, - OutputSection *OS) { +ThunkSection *ThunkCreator::getOSThunkSec(ThunkSection *&TS, + OutputSection *OS) { if (TS == nullptr) { uint32_t Off = 0; for (auto *IS : OS->Sections) { @@ -1009,9 +1007,7 @@ ThunkSection *ThunkCreator::getOSThunkSec(ThunkSection *&TS, return TS; } -template -ThunkSection *ThunkCreator::getISThunkSec(InputSection *IS, - OutputSection *OS) { +ThunkSection *ThunkCreator::getISThunkSec(InputSection *IS, OutputSection *OS) { ThunkSection *TS = ThunkedSections.lookup(IS); if (TS) return TS; @@ -1022,12 +1018,11 @@ ThunkSection *ThunkCreator::getISThunkSec(InputSection *IS, return TS; } -template -std::pair ThunkCreator::getThunk(SymbolBody &Body, - uint32_t Type) { +std::pair ThunkCreator::getThunk(SymbolBody &Body, + uint32_t Type) { auto res = ThunkedSymbols.insert({&Body, nullptr}); if (res.second) - res.first->second = addThunk(Type, Body); + res.first->second = addThunk(Type, Body); return std::make_pair(res.first->second, res.second); } @@ -1041,9 +1036,7 @@ std::pair ThunkCreator::getThunk(SymbolBody &Body, // // FIXME: All Thunks are assumed to be in range of the relocation. Range // extension Thunks are not yet supported. -template -bool ThunkCreator::createThunks( - ArrayRef OutputSections) { +bool ThunkCreator::createThunks(ArrayRef OutputSections) { // Create all the Thunks and insert them into synthetic ThunkSections. The // ThunkSections are later inserted back into the OutputSection. @@ -1086,8 +1079,3 @@ template void elf::scanRelocations(InputSectionBase &); template void elf::scanRelocations(InputSectionBase &); template void elf::scanRelocations(InputSectionBase &); template void elf::scanRelocations(InputSectionBase &); - -template class elf::ThunkCreator; -template class elf::ThunkCreator; -template class elf::ThunkCreator; -template class elf::ThunkCreator; diff --git a/contrib/llvm/tools/lld/ELF/Relocations.h b/contrib/llvm/tools/lld/ELF/Relocations.h index f8f0f11e14a9..f3512e0a89fc 100644 --- a/contrib/llvm/tools/lld/ELF/Relocations.h +++ b/contrib/llvm/tools/lld/ELF/Relocations.h @@ -119,7 +119,7 @@ template void scanRelocations(InputSectionBase &); class ThunkSection; class Thunk; -template class ThunkCreator { +class ThunkCreator { public: // Return true if Thunks have been added to OutputSections bool createThunks(ArrayRef OutputSections); diff --git a/contrib/llvm/tools/lld/ELF/Thunks.cpp b/contrib/llvm/tools/lld/ELF/Thunks.cpp index 80ea69663c01..da2b13677513 100644 --- a/contrib/llvm/tools/lld/ELF/Thunks.cpp +++ b/contrib/llvm/tools/lld/ELF/Thunks.cpp @@ -50,7 +50,7 @@ namespace { // Specific ARM Thunk implementations. The naming convention is: // Source State, TargetState, Target Requirement, ABS or PI, Range -template class ARMV7ABSLongThunk final : public Thunk { +class ARMV7ABSLongThunk final : public Thunk { public: ARMV7ABSLongThunk(const SymbolBody &Dest) : Thunk(Dest) {} @@ -59,7 +59,7 @@ template class ARMV7ABSLongThunk final : public Thunk { void addSymbols(ThunkSection &IS) override; }; -template class ARMV7PILongThunk final : public Thunk { +class ARMV7PILongThunk final : public Thunk { public: ARMV7PILongThunk(const SymbolBody &Dest) : Thunk(Dest) {} @@ -68,7 +68,7 @@ template class ARMV7PILongThunk final : public Thunk { void addSymbols(ThunkSection &IS) override; }; -template class ThumbV7ABSLongThunk final : public Thunk { +class ThumbV7ABSLongThunk final : public Thunk { public: ThumbV7ABSLongThunk(const SymbolBody &Dest) : Thunk(Dest) { this->alignment = 2; @@ -79,7 +79,7 @@ template class ThumbV7ABSLongThunk final : public Thunk { void addSymbols(ThunkSection &IS) override; }; -template class ThumbV7PILongThunk final : public Thunk { +class ThumbV7PILongThunk final : public Thunk { public: ThumbV7PILongThunk(const SymbolBody &Dest) : Thunk(Dest) { this->alignment = 2; @@ -91,7 +91,7 @@ template class ThumbV7PILongThunk final : public Thunk { }; // MIPS LA25 thunk -template class MipsThunk final : public Thunk { +class MipsThunk final : public Thunk { public: MipsThunk(const SymbolBody &Dest) : Thunk(Dest) {} @@ -109,117 +109,105 @@ static uint64_t getARMThunkDestVA(const SymbolBody &S) { return SignExtend64<32>(V); } -template -void ARMV7ABSLongThunk::writeTo(uint8_t *Buf, ThunkSection &IS) const { +void ARMV7ABSLongThunk::writeTo(uint8_t *Buf, ThunkSection &IS) const { const uint8_t Data[] = { 0x00, 0xc0, 0x00, 0xe3, // movw ip,:lower16:S 0x00, 0xc0, 0x40, 0xe3, // movt ip,:upper16:S 0x1c, 0xff, 0x2f, 0xe1, // bx ip }; - uint64_t S = getARMThunkDestVA(this->Destination); + uint64_t S = getARMThunkDestVA(Destination); memcpy(Buf, Data, sizeof(Data)); Target->relocateOne(Buf, R_ARM_MOVW_ABS_NC, S); Target->relocateOne(Buf + 4, R_ARM_MOVT_ABS, S); } -template -void ARMV7ABSLongThunk::addSymbols(ThunkSection &IS) { - this->ThunkSym = addSyntheticLocal( - Saver.save("__ARMv7ABSLongThunk_" + this->Destination.getName()), - STT_FUNC, this->Offset, size(), &IS); - addSyntheticLocal("$a", STT_NOTYPE, this->Offset, 0, &IS); +void ARMV7ABSLongThunk::addSymbols(ThunkSection &IS) { + ThunkSym = addSyntheticLocal( + Saver.save("__ARMv7ABSLongThunk_" + Destination.getName()), STT_FUNC, + Offset, size(), &IS); + addSyntheticLocal("$a", STT_NOTYPE, Offset, 0, &IS); } -template -void ThumbV7ABSLongThunk::writeTo(uint8_t *Buf, ThunkSection &IS) const { +void ThumbV7ABSLongThunk::writeTo(uint8_t *Buf, ThunkSection &IS) const { const uint8_t Data[] = { 0x40, 0xf2, 0x00, 0x0c, // movw ip, :lower16:S 0xc0, 0xf2, 0x00, 0x0c, // movt ip, :upper16:S 0x60, 0x47, // bx ip }; - uint64_t S = getARMThunkDestVA(this->Destination); + uint64_t S = getARMThunkDestVA(Destination); memcpy(Buf, Data, sizeof(Data)); Target->relocateOne(Buf, R_ARM_THM_MOVW_ABS_NC, S); Target->relocateOne(Buf + 4, R_ARM_THM_MOVT_ABS, S); } -template -void ThumbV7ABSLongThunk::addSymbols(ThunkSection &IS) { - this->ThunkSym = addSyntheticLocal( - Saver.save("__Thumbv7ABSLongThunk_" + this->Destination.getName()), - STT_FUNC, this->Offset, size(), &IS); - addSyntheticLocal("$t", STT_NOTYPE, this->Offset, 0, &IS); +void ThumbV7ABSLongThunk::addSymbols(ThunkSection &IS) { + ThunkSym = addSyntheticLocal( + Saver.save("__Thumbv7ABSLongThunk_" + Destination.getName()), STT_FUNC, + Offset, size(), &IS); + addSyntheticLocal("$t", STT_NOTYPE, Offset, 0, &IS); } -template -void ARMV7PILongThunk::writeTo(uint8_t *Buf, ThunkSection &IS) const { +void ARMV7PILongThunk::writeTo(uint8_t *Buf, ThunkSection &IS) const { const uint8_t Data[] = { 0xf0, 0xcf, 0x0f, 0xe3, // P: movw ip,:lower16:S - (P + (L1-P) +8) 0x00, 0xc0, 0x40, 0xe3, // movt ip,:upper16:S - (P + (L1-P+4) +8) 0x0f, 0xc0, 0x8c, 0xe0, // L1: add ip, ip, pc 0x1c, 0xff, 0x2f, 0xe1, // bx r12 }; - uint64_t S = getARMThunkDestVA(this->Destination); - uint64_t P = this->ThunkSym->getVA(); + uint64_t S = getARMThunkDestVA(Destination); + uint64_t P = ThunkSym->getVA(); memcpy(Buf, Data, sizeof(Data)); Target->relocateOne(Buf, R_ARM_MOVW_PREL_NC, S - P - 16); Target->relocateOne(Buf + 4, R_ARM_MOVT_PREL, S - P - 12); } -template -void ARMV7PILongThunk::addSymbols(ThunkSection &IS) { - this->ThunkSym = addSyntheticLocal( - Saver.save("__ARMV7PILongThunk_" + this->Destination.getName()), STT_FUNC, - this->Offset, size(), &IS); - addSyntheticLocal("$a", STT_NOTYPE, this->Offset, 0, &IS); +void ARMV7PILongThunk::addSymbols(ThunkSection &IS) { + ThunkSym = addSyntheticLocal( + Saver.save("__ARMV7PILongThunk_" + Destination.getName()), STT_FUNC, + Offset, size(), &IS); + addSyntheticLocal("$a", STT_NOTYPE, Offset, 0, &IS); } -template -void ThumbV7PILongThunk::writeTo(uint8_t *Buf, ThunkSection &IS) const { +void ThumbV7PILongThunk::writeTo(uint8_t *Buf, ThunkSection &IS) const { const uint8_t Data[] = { 0x4f, 0xf6, 0xf4, 0x7c, // P: movw ip,:lower16:S - (P + (L1-P) + 4) 0xc0, 0xf2, 0x00, 0x0c, // movt ip,:upper16:S - (P + (L1-P+4) + 4) 0xfc, 0x44, // L1: add r12, pc 0x60, 0x47, // bx r12 }; - uint64_t S = getARMThunkDestVA(this->Destination); - uint64_t P = this->ThunkSym->getVA(); + uint64_t S = getARMThunkDestVA(Destination); + uint64_t P = ThunkSym->getVA(); memcpy(Buf, Data, sizeof(Data)); Target->relocateOne(Buf, R_ARM_THM_MOVW_PREL_NC, S - P - 12); Target->relocateOne(Buf + 4, R_ARM_THM_MOVT_PREL, S - P - 8); } -template -void ThumbV7PILongThunk::addSymbols(ThunkSection &IS) { - this->ThunkSym = addSyntheticLocal( - Saver.save("__ThumbV7PILongThunk_" + this->Destination.getName()), - STT_FUNC, this->Offset, size(), &IS); - addSyntheticLocal("$t", STT_NOTYPE, this->Offset, 0, &IS); +void ThumbV7PILongThunk::addSymbols(ThunkSection &IS) { + ThunkSym = addSyntheticLocal( + Saver.save("__ThumbV7PILongThunk_" + Destination.getName()), STT_FUNC, + Offset, size(), &IS); + addSyntheticLocal("$t", STT_NOTYPE, Offset, 0, &IS); } // Write MIPS LA25 thunk code to call PIC function from the non-PIC one. -template -void MipsThunk::writeTo(uint8_t *Buf, ThunkSection &) const { - const endianness E = ELFT::TargetEndianness; - +void MipsThunk::writeTo(uint8_t *Buf, ThunkSection &) const { uint64_t S = this->Destination.getVA(); - write32(Buf, 0x3c190000); // lui $25, %hi(func) - write32(Buf + 4, 0x08000000 | (S >> 2)); // j func - write32(Buf + 8, 0x27390000); // addiu $25, $25, %lo(func) - write32(Buf + 12, 0x00000000); // nop + write32(Buf, 0x3c190000, Config->Endianness); // lui $25, %hi(func) + write32(Buf + 4, 0x08000000 | (S >> 2), Config->Endianness); // j func + write32(Buf + 8, 0x27390000, Config->Endianness); // addiu $25, $25, %lo(func) + write32(Buf + 12, 0x00000000, Config->Endianness); // nop Target->relocateOne(Buf, R_MIPS_HI16, S); Target->relocateOne(Buf + 8, R_MIPS_LO16, S); } -template void MipsThunk::addSymbols(ThunkSection &IS) { - this->ThunkSym = addSyntheticLocal( - Saver.save("__LA25Thunk_" + this->Destination.getName()), STT_FUNC, - this->Offset, size(), &IS); +void MipsThunk::addSymbols(ThunkSection &IS) { + ThunkSym = + addSyntheticLocal(Saver.save("__LA25Thunk_" + Destination.getName()), + STT_FUNC, Offset, size(), &IS); } -template -InputSection *MipsThunk::getTargetInputSection() const { - auto *DR = dyn_cast(&this->Destination); +InputSection *MipsThunk::getTargetInputSection() const { + auto *DR = dyn_cast(&Destination); return dyn_cast(DR->Section); } @@ -228,7 +216,7 @@ Thunk::Thunk(const SymbolBody &D) : Destination(D), Offset(0) {} Thunk::~Thunk() = default; // Creates a thunk for Thumb-ARM interworking. -template static Thunk *addThunkArm(uint32_t Reloc, SymbolBody &S) { +static Thunk *addThunkArm(uint32_t Reloc, SymbolBody &S) { // ARM relocations need ARM to Thumb interworking Thunks. // Thumb relocations need Thumb to ARM relocations. // Use position independent Thunks if we require position independent code. @@ -237,33 +225,29 @@ template static Thunk *addThunkArm(uint32_t Reloc, SymbolBody &S) { case R_ARM_PLT32: case R_ARM_JUMP24: if (Config->Pic) - return make>(S); - return make>(S); + return make(S); + return make(S); case R_ARM_THM_JUMP19: case R_ARM_THM_JUMP24: if (Config->Pic) - return make>(S); - return make>(S); + return make(S); + return make(S); } fatal("unrecognized relocation type"); } -template static Thunk *addThunkMips(SymbolBody &S) { - return make>(S); +static Thunk *addThunkMips(SymbolBody &S) { + return make(S); } -template Thunk *addThunk(uint32_t RelocType, SymbolBody &S) { +Thunk *addThunk(uint32_t RelocType, SymbolBody &S) { if (Config->EMachine == EM_ARM) - return addThunkArm(RelocType, S); + return addThunkArm(RelocType, S); else if (Config->EMachine == EM_MIPS) - return addThunkMips(S); + return addThunkMips(S); llvm_unreachable("add Thunk only supported for ARM and Mips"); return nullptr; } -template Thunk *addThunk(uint32_t, SymbolBody &); -template Thunk *addThunk(uint32_t, SymbolBody &); -template Thunk *addThunk(uint32_t, SymbolBody &); -template Thunk *addThunk(uint32_t, SymbolBody &); } // end namespace elf } // end namespace lld diff --git a/contrib/llvm/tools/lld/ELF/Thunks.h b/contrib/llvm/tools/lld/ELF/Thunks.h index a9f49279f3f2..38ee090e75e1 100644 --- a/contrib/llvm/tools/lld/ELF/Thunks.h +++ b/contrib/llvm/tools/lld/ELF/Thunks.h @@ -51,7 +51,7 @@ class Thunk { // For a Relocation to symbol S create a Thunk to be added to a synthetic // ThunkSection. At present there are implementations for ARM and Mips Thunks. -template Thunk *addThunk(uint32_t RelocType, SymbolBody &S); +Thunk *addThunk(uint32_t RelocType, SymbolBody &S); } // namespace elf } // namespace lld diff --git a/contrib/llvm/tools/lld/ELF/Writer.cpp b/contrib/llvm/tools/lld/ELF/Writer.cpp index 35778f9c16c3..d077cea775e4 100644 --- a/contrib/llvm/tools/lld/ELF/Writer.cpp +++ b/contrib/llvm/tools/lld/ELF/Writer.cpp @@ -1223,7 +1223,7 @@ template void Writer::finalizeSections() { // we need to assign addresses so that we can tell if jump instructions // are out of range. This will need to turn into a loop that converges // when no more Thunks are added - ThunkCreator TC; + ThunkCreator TC; if (TC.createThunks(OutputSections)) applySynthetic({InX::MipsGot}, [](SyntheticSection *SS) { SS->updateAllocSize(); }); diff --git a/contrib/llvm/tools/lldb/include/lldb/Symbol/SymbolContext.h b/contrib/llvm/tools/lldb/include/lldb/Symbol/SymbolContext.h index e4dcc73bb52b..f84b7cf916fe 100644 --- a/contrib/llvm/tools/lldb/include/lldb/Symbol/SymbolContext.h +++ b/contrib/llvm/tools/lldb/include/lldb/Symbol/SymbolContext.h @@ -235,6 +235,29 @@ class SymbolContext { bool GetAddressRangeFromHereToEndLine(uint32_t end_line, AddressRange &range, Status &error); + + //------------------------------------------------------------------ + /// Find the best global data symbol visible from this context. + /// + /// Symbol priority is: + /// - extern symbol in the current module if there is one + /// - non-extern symbol in the current module if there is one + /// - extern symbol in the target + /// - non-extern symbol in the target + /// It is an error if the highest-priority result is ambiguous. + /// + /// @param[in] name + /// The name of the symbol to search for. + /// + /// @param[out] error + /// An error that will be populated with a message if there was an + /// ambiguous result. The error will not be populated if no result + /// was found. + /// + /// @return + /// The symbol that was found, or \b nullptr if none was found. + //------------------------------------------------------------------ + const Symbol *FindBestGlobalDataSymbol(const ConstString &name, Status &error); void GetDescription(Stream *s, lldb::DescriptionLevel level, Target *target) const; diff --git a/contrib/llvm/tools/lldb/source/Breakpoint/Breakpoint.cpp b/contrib/llvm/tools/lldb/source/Breakpoint/Breakpoint.cpp index 4c58f8231344..17c104ba0c60 100644 --- a/contrib/llvm/tools/lldb/source/Breakpoint/Breakpoint.cpp +++ b/contrib/llvm/tools/lldb/source/Breakpoint/Breakpoint.cpp @@ -837,8 +837,8 @@ bool Breakpoint::AddName(llvm::StringRef new_name, Status &error) { if (new_name.empty()) return false; if (!BreakpointID::StringIsBreakpointName(new_name, error)) { - error.SetErrorStringWithFormat("input name \"%s\" not a breakpoint name.", - new_name); + error.SetErrorStringWithFormatv("input name \"{0}\" not a breakpoint name.", + new_name); return false; } if (!error.Success()) diff --git a/contrib/llvm/tools/lldb/source/Plugins/ExpressionParser/Clang/ClangExpressionDeclMap.cpp b/contrib/llvm/tools/lldb/source/Plugins/ExpressionParser/Clang/ClangExpressionDeclMap.cpp index 256d46a15420..8fde41052192 100644 --- a/contrib/llvm/tools/lldb/source/Plugins/ExpressionParser/Clang/ClangExpressionDeclMap.cpp +++ b/contrib/llvm/tools/lldb/source/Plugins/ExpressionParser/Clang/ClangExpressionDeclMap.cpp @@ -591,103 +591,6 @@ addr_t ClangExpressionDeclMap::GetSymbolAddress(const ConstString &name, symbol_type); } -const Symbol *ClangExpressionDeclMap::FindGlobalDataSymbol( - Target &target, const ConstString &name, lldb_private::Module *module) { - SymbolContextList sc_list; - - if (module) - module->FindSymbolsWithNameAndType(name, eSymbolTypeAny, sc_list); - else - target.GetImages().FindSymbolsWithNameAndType(name, eSymbolTypeAny, - sc_list); - - const uint32_t matches = sc_list.GetSize(); - for (uint32_t i = 0; i < matches; ++i) { - SymbolContext sym_ctx; - sc_list.GetContextAtIndex(i, sym_ctx); - if (sym_ctx.symbol) { - const Symbol *symbol = sym_ctx.symbol; - const Address sym_address = symbol->GetAddress(); - - if (sym_address.IsValid()) { - switch (symbol->GetType()) { - case eSymbolTypeData: - case eSymbolTypeRuntime: - case eSymbolTypeAbsolute: - case eSymbolTypeObjCClass: - case eSymbolTypeObjCMetaClass: - case eSymbolTypeObjCIVar: - if (symbol->GetDemangledNameIsSynthesized()) { - // If the demangled name was synthesized, then don't use it - // for expressions. Only let the symbol match if the mangled - // named matches for these symbols. - if (symbol->GetMangled().GetMangledName() != name) - break; - } - return symbol; - - case eSymbolTypeReExported: { - ConstString reexport_name = symbol->GetReExportedSymbolName(); - if (reexport_name) { - ModuleSP reexport_module_sp; - ModuleSpec reexport_module_spec; - reexport_module_spec.GetPlatformFileSpec() = - symbol->GetReExportedSymbolSharedLibrary(); - if (reexport_module_spec.GetPlatformFileSpec()) { - reexport_module_sp = - target.GetImages().FindFirstModule(reexport_module_spec); - if (!reexport_module_sp) { - reexport_module_spec.GetPlatformFileSpec() - .GetDirectory() - .Clear(); - reexport_module_sp = - target.GetImages().FindFirstModule(reexport_module_spec); - } - } - // Don't allow us to try and resolve a re-exported symbol if it is - // the same - // as the current symbol - if (name == symbol->GetReExportedSymbolName() && - module == reexport_module_sp.get()) - return NULL; - - return FindGlobalDataSymbol(target, - symbol->GetReExportedSymbolName(), - reexport_module_sp.get()); - } - } break; - - case eSymbolTypeCode: // We already lookup functions elsewhere - case eSymbolTypeVariable: - case eSymbolTypeLocal: - case eSymbolTypeParam: - case eSymbolTypeTrampoline: - case eSymbolTypeInvalid: - case eSymbolTypeException: - case eSymbolTypeSourceFile: - case eSymbolTypeHeaderFile: - case eSymbolTypeObjectFile: - case eSymbolTypeCommonBlock: - case eSymbolTypeBlock: - case eSymbolTypeVariableType: - case eSymbolTypeLineEntry: - case eSymbolTypeLineHeader: - case eSymbolTypeScopeBegin: - case eSymbolTypeScopeEnd: - case eSymbolTypeAdditional: - case eSymbolTypeCompiler: - case eSymbolTypeInstrumentation: - case eSymbolTypeUndefined: - case eSymbolTypeResolver: - break; - } - } - } - } - - return NULL; -} - lldb::VariableSP ClangExpressionDeclMap::FindGlobalVariable( Target &target, ModuleSP &module, const ConstString &name, CompilerDeclContext *namespace_decl, TypeFromUser *type) { @@ -1526,9 +1429,18 @@ void ClangExpressionDeclMap::FindExternalVisibleDecls( // We couldn't find a non-symbol variable for this. Now we'll hunt for // a generic // data symbol, and -- if it is found -- treat it as a variable. - - const Symbol *data_symbol = FindGlobalDataSymbol(*target, name); - + Status error; + + const Symbol *data_symbol = + m_parser_vars->m_sym_ctx.FindBestGlobalDataSymbol(name, error); + + if (!error.Success()) { + const unsigned diag_id = + m_ast_context->getDiagnostics().getCustomDiagID( + clang::DiagnosticsEngine::Level::Error, "%0"); + m_ast_context->getDiagnostics().Report(diag_id) << error.AsCString(); + } + if (data_symbol) { std::string warning("got name from symbols: "); warning.append(name.AsCString()); diff --git a/contrib/llvm/tools/lldb/source/Plugins/ExpressionParser/Clang/ClangExpressionDeclMap.h b/contrib/llvm/tools/lldb/source/Plugins/ExpressionParser/Clang/ClangExpressionDeclMap.h index ac88c1d6b891..e8a9ba6862db 100644 --- a/contrib/llvm/tools/lldb/source/Plugins/ExpressionParser/Clang/ClangExpressionDeclMap.h +++ b/contrib/llvm/tools/lldb/source/Plugins/ExpressionParser/Clang/ClangExpressionDeclMap.h @@ -447,24 +447,6 @@ class ClangExpressionDeclMap : public ClangASTSource { //---------------------------------------------------------------------- uint64_t GetParserID() { return (uint64_t) this; } - //------------------------------------------------------------------ - /// Given a target, find a data symbol that has the given name. - /// - /// @param[in] target - /// The target to use as the basis for the search. - /// - /// @param[in] name - /// The name as a plain C string. - /// - /// @param[in] module - /// The module to limit the search to. This can be NULL - /// - /// @return - /// The LLDB Symbol found, or NULL if none was found. - //------------------------------------------------------------------ - const Symbol *FindGlobalDataSymbol(Target &target, const ConstString &name, - Module *module = NULL); - //------------------------------------------------------------------ /// Given a target, find a variable that matches the given name and /// type. diff --git a/contrib/llvm/tools/lldb/source/Symbol/SymbolContext.cpp b/contrib/llvm/tools/lldb/source/Symbol/SymbolContext.cpp index 5ea6f91200c1..4ac35010c74c 100644 --- a/contrib/llvm/tools/lldb/source/Symbol/SymbolContext.cpp +++ b/contrib/llvm/tools/lldb/source/Symbol/SymbolContext.cpp @@ -799,6 +799,163 @@ bool SymbolContext::GetAddressRangeFromHereToEndLine(uint32_t end_line, return true; } +const Symbol * +SymbolContext::FindBestGlobalDataSymbol(const ConstString &name, Status &error) { + error.Clear(); + + if (!target_sp) { + return nullptr; + } + + Target &target = *target_sp; + Module *module = module_sp.get(); + + auto ProcessMatches = [this, &name, &target, module] + (SymbolContextList &sc_list, Status &error) -> const Symbol* { + llvm::SmallVector external_symbols; + llvm::SmallVector internal_symbols; + const uint32_t matches = sc_list.GetSize(); + for (uint32_t i = 0; i < matches; ++i) { + SymbolContext sym_ctx; + sc_list.GetContextAtIndex(i, sym_ctx); + if (sym_ctx.symbol) { + const Symbol *symbol = sym_ctx.symbol; + const Address sym_address = symbol->GetAddress(); + + if (sym_address.IsValid()) { + switch (symbol->GetType()) { + case eSymbolTypeData: + case eSymbolTypeRuntime: + case eSymbolTypeAbsolute: + case eSymbolTypeObjCClass: + case eSymbolTypeObjCMetaClass: + case eSymbolTypeObjCIVar: + if (symbol->GetDemangledNameIsSynthesized()) { + // If the demangled name was synthesized, then don't use it + // for expressions. Only let the symbol match if the mangled + // named matches for these symbols. + if (symbol->GetMangled().GetMangledName() != name) + break; + } + if (symbol->IsExternal()) { + external_symbols.push_back(symbol); + } else { + internal_symbols.push_back(symbol); + } + break; + case eSymbolTypeReExported: { + ConstString reexport_name = symbol->GetReExportedSymbolName(); + if (reexport_name) { + ModuleSP reexport_module_sp; + ModuleSpec reexport_module_spec; + reexport_module_spec.GetPlatformFileSpec() = + symbol->GetReExportedSymbolSharedLibrary(); + if (reexport_module_spec.GetPlatformFileSpec()) { + reexport_module_sp = + target.GetImages().FindFirstModule(reexport_module_spec); + if (!reexport_module_sp) { + reexport_module_spec.GetPlatformFileSpec() + .GetDirectory() + .Clear(); + reexport_module_sp = + target.GetImages().FindFirstModule(reexport_module_spec); + } + } + // Don't allow us to try and resolve a re-exported symbol if it is + // the same as the current symbol + if (name == symbol->GetReExportedSymbolName() && + module == reexport_module_sp.get()) + return nullptr; + + return FindBestGlobalDataSymbol( + symbol->GetReExportedSymbolName(), error); + } + } break; + + case eSymbolTypeCode: // We already lookup functions elsewhere + case eSymbolTypeVariable: + case eSymbolTypeLocal: + case eSymbolTypeParam: + case eSymbolTypeTrampoline: + case eSymbolTypeInvalid: + case eSymbolTypeException: + case eSymbolTypeSourceFile: + case eSymbolTypeHeaderFile: + case eSymbolTypeObjectFile: + case eSymbolTypeCommonBlock: + case eSymbolTypeBlock: + case eSymbolTypeVariableType: + case eSymbolTypeLineEntry: + case eSymbolTypeLineHeader: + case eSymbolTypeScopeBegin: + case eSymbolTypeScopeEnd: + case eSymbolTypeAdditional: + case eSymbolTypeCompiler: + case eSymbolTypeInstrumentation: + case eSymbolTypeUndefined: + case eSymbolTypeResolver: + break; + } + } + } + } + + if (external_symbols.size() > 1) { + StreamString ss; + ss.Printf("Multiple external symbols found for '%s'\n", name.AsCString()); + for (const Symbol *symbol : external_symbols) { + symbol->GetDescription(&ss, eDescriptionLevelFull, &target); + } + ss.PutChar('\n'); + error.SetErrorString(ss.GetData()); + return nullptr; + } else if (external_symbols.size()) { + return external_symbols[0]; + } else if (internal_symbols.size() > 1) { + StreamString ss; + ss.Printf("Multiple internal symbols found for '%s'\n", name.AsCString()); + for (const Symbol *symbol : internal_symbols) { + symbol->GetDescription(&ss, eDescriptionLevelVerbose, &target); + ss.PutChar('\n'); + } + error.SetErrorString(ss.GetData()); + return nullptr; + } else if (internal_symbols.size()) { + return internal_symbols[0]; + } else { + return nullptr; + } + }; + + if (module) { + SymbolContextList sc_list; + module->FindSymbolsWithNameAndType(name, eSymbolTypeAny, sc_list); + const Symbol *const module_symbol = ProcessMatches(sc_list, error); + + if (!error.Success()) { + return nullptr; + } else if (module_symbol) { + return module_symbol; + } + } + + { + SymbolContextList sc_list; + target.GetImages().FindSymbolsWithNameAndType(name, eSymbolTypeAny, + sc_list); + const Symbol *const target_symbol = ProcessMatches(sc_list, error); + + if (!error.Success()) { + return nullptr; + } else if (target_symbol) { + return target_symbol; + } + } + + return nullptr; // no error; we just didn't find anything +} + + //---------------------------------------------------------------------- // // SymbolContextSpecifier diff --git a/contrib/llvm/tools/llvm-pdbdump/Analyze.cpp b/contrib/llvm/tools/llvm-pdbdump/Analyze.cpp index f7d6ec53b030..ab4477ed7bad 100644 --- a/contrib/llvm/tools/llvm-pdbdump/Analyze.cpp +++ b/contrib/llvm/tools/llvm-pdbdump/Analyze.cpp @@ -76,26 +76,15 @@ Error AnalysisStyle::dump() { TypeDatabase TypeDB(Tpi->getNumTypeRecords()); TypeDatabaseVisitor DBV(TypeDB); - TypeDeserializer Deserializer; TypeVisitorCallbackPipeline Pipeline; HashLookupVisitor Hasher(*Tpi); - // Deserialize the types - Pipeline.addCallbackToPipeline(Deserializer); // Add them to the database Pipeline.addCallbackToPipeline(DBV); // Store their hash values Pipeline.addCallbackToPipeline(Hasher); - CVTypeVisitor Visitor(Pipeline); - - bool Error = false; - for (auto Item : Tpi->types(&Error)) { - if (auto EC = Visitor.visitTypeRecord(Item)) - return EC; - } - if (Error) - return make_error(raw_error_code::corrupt_file, - "TPI stream contained corrupt record"); + if (auto EC = codeview::visitTypeStream(Tpi->typeArray(), Pipeline)) + return EC; auto &Adjusters = Tpi->getHashAdjusters(); DenseSet AdjusterSet; diff --git a/contrib/llvm/tools/llvm-pdbdump/LLVMOutputStyle.cpp b/contrib/llvm/tools/llvm-pdbdump/LLVMOutputStyle.cpp index e975a5220af6..c4fecb80ea5a 100644 --- a/contrib/llvm/tools/llvm-pdbdump/LLVMOutputStyle.cpp +++ b/contrib/llvm/tools/llvm-pdbdump/LLVMOutputStyle.cpp @@ -178,11 +178,10 @@ class C13RawVisitor : public C13DebugFragmentVisitor { private: Error dumpTypeRecord(StringRef Label, TypeDatabase &DB, TypeIndex Index) { CompactTypeDumpVisitor CTDV(DB, Index, &P); - CVTypeVisitor Visitor(CTDV); DictScope D(P, Label); if (DB.contains(Index)) { CVType &Type = DB.getTypeRecord(Index); - if (auto EC = Visitor.visitTypeRecord(Type)) + if (auto EC = codeview::visitTypeRecord(Type, CTDV)) return EC; } else { P.printString( @@ -629,7 +628,6 @@ Error LLVMOutputStyle::dumpTpiStream(uint32_t StreamIdx) { std::vector> Visitors; - Visitors.push_back(make_unique()); if (!StreamDB.hasValue()) { StreamDB.emplace(Tpi->getNumTypeRecords()); Visitors.push_back(make_unique(*StreamDB)); @@ -659,8 +657,6 @@ Error LLVMOutputStyle::dumpTpiStream(uint32_t StreamIdx) { for (const auto &V : Visitors) Pipeline.addCallbackToPipeline(*V); - CVTypeVisitor Visitor(Pipeline); - if (DumpRecords || DumpRecordBytes) RecordScope = llvm::make_unique(P, "Records"); @@ -673,9 +669,10 @@ Error LLVMOutputStyle::dumpTpiStream(uint32_t StreamIdx) { if ((DumpRecords || DumpRecordBytes) && !opts::raw::CompactRecords) OneRecordScope = llvm::make_unique(P, ""); - if (auto EC = Visitor.visitTypeRecord(Type)) + if (auto EC = codeview::visitTypeRecord(Type, Pipeline)) return EC; - T.setIndex(T.getIndex() + 1); + + ++T; } if (HadError) return make_error(raw_error_code::corrupt_file, @@ -730,22 +727,19 @@ Error LLVMOutputStyle::buildTypeDatabase(uint32_t SN) { DB.emplace(Tpi->getNumTypeRecords()); - TypeVisitorCallbackPipeline Pipeline; - TypeDeserializer Deserializer; TypeDatabaseVisitor DBV(*DB); - Pipeline.addCallbackToPipeline(Deserializer); - Pipeline.addCallbackToPipeline(DBV); auto HashValues = Tpi->getHashValues(); - std::unique_ptr HashVerifier; - if (!HashValues.empty()) { - HashVerifier = - make_unique(HashValues, Tpi->getNumHashBuckets()); - Pipeline.addCallbackToPipeline(*HashVerifier); - } + if (HashValues.empty()) + return codeview::visitTypeStream(Tpi->typeArray(), DBV); - CVTypeVisitor Visitor(Pipeline); - return Visitor.visitTypeStream(Tpi->types(nullptr)); + TypeVisitorCallbackPipeline Pipeline; + Pipeline.addCallbackToPipeline(DBV); + + TpiHashVerifier HashVerifier(HashValues, Tpi->getNumHashBuckets()); + Pipeline.addCallbackToPipeline(HashVerifier); + + return codeview::visitTypeStream(Tpi->typeArray(), Pipeline); } Error LLVMOutputStyle::dumpDbiStream() { diff --git a/contrib/llvm/tools/llvm-pdbdump/PdbYaml.cpp b/contrib/llvm/tools/llvm-pdbdump/PdbYaml.cpp index d6ba7d645459..6527bec31a77 100644 --- a/contrib/llvm/tools/llvm-pdbdump/PdbYaml.cpp +++ b/contrib/llvm/tools/llvm-pdbdump/PdbYaml.cpp @@ -371,16 +371,14 @@ void MappingContextTraits::mapping( void MappingContextTraits:: mapping(IO &IO, pdb::yaml::PdbTpiRecord &Obj, pdb::yaml::SerializationContext &Context) { - codeview::TypeVisitorCallbackPipeline Pipeline; - codeview::TypeDeserializer Deserializer; - codeview::TypeSerializer Serializer(Context.Allocator); - pdb::TpiHashUpdater Hasher; if (IO.outputting()) { // For PDB to Yaml, deserialize into a high level record type, then dump it. - Pipeline.addCallbackToPipeline(Deserializer); - Pipeline.addCallbackToPipeline(Context.Dumper); + consumeError(codeview::visitTypeRecord(Obj.Record, Context.Dumper)); } else { + codeview::TypeVisitorCallbackPipeline Pipeline; + codeview::TypeSerializer Serializer(Context.Allocator); + pdb::TpiHashUpdater Hasher; // For Yaml to PDB, extract from the high level record type, then write it // to bytes. @@ -391,9 +389,9 @@ void MappingContextTraits:: Pipeline.addCallbackToPipeline(Context.Dumper); Pipeline.addCallbackToPipeline(Serializer); Pipeline.addCallbackToPipeline(Hasher); + consumeError(codeview::visitTypeRecord(Obj.Record, Pipeline, + codeview::VDS_BytesExternal)); } - codeview::CVTypeVisitor Visitor(Pipeline); - consumeError(Visitor.visitTypeRecord(Obj.Record)); Context.ActiveSerializer = nullptr; } diff --git a/contrib/llvm/tools/llvm-pdbdump/YamlTypeDumper.cpp b/contrib/llvm/tools/llvm-pdbdump/YamlTypeDumper.cpp index b4eb197e866a..3e447ca60b61 100644 --- a/contrib/llvm/tools/llvm-pdbdump/YamlTypeDumper.cpp +++ b/contrib/llvm/tools/llvm-pdbdump/YamlTypeDumper.cpp @@ -280,16 +280,8 @@ bool ScalarTraits::mustQuote(StringRef Scalar) { return false; } void MappingContextTraits::mapping( IO &IO, CVType &Record, pdb::yaml::SerializationContext &Context) { - if (IO.outputting()) { - codeview::TypeDeserializer Deserializer; - - codeview::TypeVisitorCallbackPipeline Pipeline; - Pipeline.addCallbackToPipeline(Deserializer); - Pipeline.addCallbackToPipeline(Context.Dumper); - - codeview::CVTypeVisitor Visitor(Pipeline); - consumeError(Visitor.visitTypeRecord(Record)); - } + if (IO.outputting()) + consumeError(codeview::visitTypeRecord(Record, Context.Dumper)); } void MappingTraits::mapping(IO &IO, StringIdRecord &String) { @@ -556,26 +548,17 @@ void llvm::codeview::yaml::YamlTypeDumperCallbacks::visitKnownRecordImpl( // (top-level and member fields all have the exact same Yaml syntax so use // the same parser). FieldListRecordSplitter Splitter(FieldListRecords); - CVTypeVisitor V(Splitter); - consumeError(V.visitFieldListMemberStream(FieldList.Data)); - YamlIO.mapRequired("FieldList", FieldListRecords, Context); - } else { - // If we are not outputting, then the array contains no data starting out, - // and is instead populated from the sequence represented by the yaml -- - // again, using the same logic that we use for top-level records. - assert(Context.ActiveSerializer && "There is no active serializer!"); - codeview::TypeVisitorCallbackPipeline Pipeline; - pdb::TpiHashUpdater Hasher; - - // For Yaml to PDB, dump it (to fill out the record fields from the Yaml) - // then serialize those fields to bytes, then update their hashes. - Pipeline.addCallbackToPipeline(Context.Dumper); - Pipeline.addCallbackToPipeline(*Context.ActiveSerializer); - Pipeline.addCallbackToPipeline(Hasher); - - codeview::CVTypeVisitor Visitor(Pipeline); - YamlIO.mapRequired("FieldList", FieldListRecords, Visitor); + consumeError(codeview::visitMemberRecordStream(FieldList.Data, Splitter)); } + // Note that if we're not outputting (i.e. Yaml -> PDB) the result of this + // mapping gets lost, as the records are simply stored in this locally scoped + // vector. What's important though is they are all sharing a single + // Serializer + // instance (in `Context.ActiveSerializer`), and that is building up a list of + // all the types. The fact that we need a throwaway vector here is just to + // appease the YAML API to treat this as a sequence and do this mapping once + // for each YAML Sequence element in the input Yaml stream. + YamlIO.mapRequired("FieldList", FieldListRecords, Context); } namespace llvm { @@ -585,29 +568,22 @@ struct MappingContextTraits { static void mapping(IO &IO, pdb::yaml::PdbTpiFieldListRecord &Obj, pdb::yaml::SerializationContext &Context) { - assert(IO.outputting()); - codeview::TypeVisitorCallbackPipeline Pipeline; + if (IO.outputting()) + consumeError(codeview::visitMemberRecord(Obj.Record, Context.Dumper)); + else { + // If we are not outputting, then the array contains no data starting out, + // and is instead populated from the sequence represented by the yaml -- + // again, using the same logic that we use for top-level records. + assert(Context.ActiveSerializer && "There is no active serializer!"); + codeview::TypeVisitorCallbackPipeline Pipeline; + pdb::TpiHashUpdater Hasher; - BinaryByteStream Data(Obj.Record.Data, llvm::support::little); - BinaryStreamReader FieldReader(Data); - codeview::FieldListDeserializer Deserializer(FieldReader); - - // For PDB to Yaml, deserialize into a high level record type, then dump - // it. - Pipeline.addCallbackToPipeline(Deserializer); - Pipeline.addCallbackToPipeline(Context.Dumper); - - codeview::CVTypeVisitor Visitor(Pipeline); - consumeError(Visitor.visitMemberRecord(Obj.Record)); - } -}; - -template <> -struct MappingContextTraits { - static void mapping(IO &IO, pdb::yaml::PdbTpiFieldListRecord &Obj, - codeview::CVTypeVisitor &Visitor) { - consumeError(Visitor.visitMemberRecord(Obj.Record)); + Pipeline.addCallbackToPipeline(Context.Dumper); + Pipeline.addCallbackToPipeline(*Context.ActiveSerializer); + Pipeline.addCallbackToPipeline(Hasher); + consumeError( + codeview::visitMemberRecord(Obj.Record, Pipeline, VDS_BytesExternal)); + } } }; } diff --git a/contrib/llvm/utils/TableGen/AsmMatcherEmitter.cpp b/contrib/llvm/utils/TableGen/AsmMatcherEmitter.cpp index a5c2ea6c7aca..264175ae9677 100644 --- a/contrib/llvm/utils/TableGen/AsmMatcherEmitter.cpp +++ b/contrib/llvm/utils/TableGen/AsmMatcherEmitter.cpp @@ -763,7 +763,8 @@ class AsmMatcherInfo { } // end anonymous namespace -void MatchableInfo::dump() const { +#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) +LLVM_DUMP_METHOD void MatchableInfo::dump() const { errs() << TheDef->getName() << " -- " << "flattened:\"" << AsmString <<"\"\n"; for (unsigned i = 0, e = AsmOperands.size(); i != e; ++i) { @@ -772,6 +773,7 @@ void MatchableInfo::dump() const { errs() << '\"' << Op.Token << "\"\n"; } } +#endif static std::pair parseTwoOperandConstraint(StringRef S, ArrayRef Loc) { diff --git a/contrib/llvm/utils/TableGen/GlobalISelEmitter.cpp b/contrib/llvm/utils/TableGen/GlobalISelEmitter.cpp index 65a1ea2f0f21..dc022fe1ceb2 100644 --- a/contrib/llvm/utils/TableGen/GlobalISelEmitter.cpp +++ b/contrib/llvm/utils/TableGen/GlobalISelEmitter.cpp @@ -775,6 +775,8 @@ class InstructionOperandMatcher : public OperandPredicateMatcher { void emitCxxCaptureStmts(raw_ostream &OS, RuleMatcher &Rule, StringRef OperandExpr) const override { OS << "if (!" << OperandExpr + ".isReg())\n" + << " return false;\n" + << "if (TRI.isPhysicalRegister(" << OperandExpr + ".getReg()))\n" << " return false;\n"; std::string InsnVarName = Rule.defineInsnVar( OS, *InsnMatcher, @@ -1242,6 +1244,8 @@ class GlobalISelEmitter { Error importExplicitUseRenderer(BuildMIAction &DstMIBuilder, TreePatternNode *DstChild, const InstructionMatcher &InsnMatcher) const; + Error importDefaultOperandRenderers(BuildMIAction &DstMIBuilder, + DagInit *DefaultOps) const; Error importImplicitDefRenderers(BuildMIAction &DstMIBuilder, const std::vector &ImplicitDefs) const; @@ -1321,8 +1325,27 @@ Expected GlobalISelEmitter::createAndImportSelDAGMatcher( // Match the used operands (i.e. the children of the operator). for (unsigned i = 0, e = Src->getNumChildren(); i != e; ++i) { - if (auto Error = importChildMatcher(InsnMatcher, Src->getChild(i), OpIdx++, - TempOpIdx)) + TreePatternNode *SrcChild = Src->getChild(i); + + // For G_INTRINSIC, the operand immediately following the defs is an + // intrinsic ID. + if (SrcGI.TheDef->getName() == "G_INTRINSIC" && i == 0) { + if (!SrcChild->isLeaf()) + return failedImport("Expected IntInit containing intrinsic ID"); + + if (IntInit *SrcChildIntInit = + dyn_cast(SrcChild->getLeafValue())) { + OperandMatcher &OM = + InsnMatcher.addOperand(OpIdx++, SrcChild->getName(), TempOpIdx); + OM.addPredicate(SrcChildIntInit->getValue()); + continue; + } + + return failedImport("Expected IntInit containing instrinsic ID)"); + } + + if (auto Error = + importChildMatcher(InsnMatcher, SrcChild, OpIdx++, TempOpIdx)) return std::move(Error); } @@ -1357,7 +1380,7 @@ Error GlobalISelEmitter::importChildMatcher(InstructionMatcher &InsnMatcher, auto OpTyOrNone = MVTToLLT(ChildTypes.front().getConcrete()); if (!OpTyOrNone) - return failedImport("Src operand has an unsupported type"); + return failedImport("Src operand has an unsupported type (" + to_string(*SrcChild) + ")"); OM.addPredicate(*OpTyOrNone); // Check for nested instructions. @@ -1509,59 +1532,23 @@ Expected GlobalISelEmitter::createAndImportInstructionRenderer( DstMIBuilder.addRenderer(InsnMatcher, DstIOperand.Name); } - // Figure out which operands need defaults inserted. Operands that subclass - // OperandWithDefaultOps are considered from left to right until we have - // enough operands to render the instruction. - SmallSet DefaultOperands; - unsigned DstINumUses = DstI.Operands.size() - DstI.Operands.NumDefs; - unsigned NumDefaultOperands = 0; - for (unsigned I = 0; I < DstINumUses && - DstINumUses > Dst->getNumChildren() + NumDefaultOperands; - ++I) { - const auto &DstIOperand = DstI.Operands[DstI.Operands.NumDefs + I]; - if (DstIOperand.Rec->isSubClassOf("OperandWithDefaultOps")) { - DefaultOperands.insert(I); - NumDefaultOperands += - DstIOperand.Rec->getValueAsDag("DefaultOps")->getNumArgs(); - } - } - if (DstINumUses > Dst->getNumChildren() + DefaultOperands.size()) - return failedImport("Insufficient operands supplied and default ops " - "couldn't make up the shortfall"); - if (DstINumUses < Dst->getNumChildren() + DefaultOperands.size()) - return failedImport("Too many operands supplied"); - // Render the explicit uses. unsigned Child = 0; + unsigned DstINumUses = DstI.Operands.size() - DstI.Operands.NumDefs; + unsigned NumDefaultOps = 0; for (unsigned I = 0; I != DstINumUses; ++I) { - // If we need to insert default ops here, then do so. - if (DefaultOperands.count(I)) { - const auto &DstIOperand = DstI.Operands[DstI.Operands.NumDefs + I]; + const auto &DstIOperand = DstI.Operands[DstI.Operands.NumDefs + I]; + // If the operand has default values, introduce them now. + // FIXME: Until we have a decent test case that dictates we should do + // otherwise, we're going to assume that operands with default values cannot + // be specified in the patterns. Therefore, adding them will not cause us to + // end up with too many rendered operands. + if (DstIOperand.Rec->isSubClassOf("OperandWithDefaultOps")) { DagInit *DefaultOps = DstIOperand.Rec->getValueAsDag("DefaultOps"); - for (const auto *DefaultOp : DefaultOps->args()) { - // Look through ValueType operators. - if (const DagInit *DefaultDagOp = dyn_cast(DefaultOp)) { - if (const DefInit *DefaultDagOperator = - dyn_cast(DefaultDagOp->getOperator())) { - if (DefaultDagOperator->getDef()->isSubClassOf("ValueType")) - DefaultOp = DefaultDagOp->getArg(0); - } - } - - if (const DefInit *DefaultDefOp = dyn_cast(DefaultOp)) { - DstMIBuilder.addRenderer(DefaultDefOp->getDef()); - continue; - } - - if (const IntInit *DefaultIntOp = dyn_cast(DefaultOp)) { - DstMIBuilder.addRenderer(DefaultIntOp->getValue()); - continue; - } - - return failedImport("Could not add default op"); - } - + if (auto Error = importDefaultOperandRenderers(DstMIBuilder, DefaultOps)) + return std::move(Error); + ++NumDefaultOps; continue; } @@ -1571,9 +1558,44 @@ Expected GlobalISelEmitter::createAndImportInstructionRenderer( ++Child; } + if (NumDefaultOps + Dst->getNumChildren() != DstINumUses) + return failedImport("Expected " + llvm::to_string(DstINumUses) + + " used operands but found " + + llvm::to_string(Dst->getNumChildren()) + + " explicit ones and " + llvm::to_string(NumDefaultOps) + + " default ones"); + return DstMIBuilder; } +Error GlobalISelEmitter::importDefaultOperandRenderers( + BuildMIAction &DstMIBuilder, DagInit *DefaultOps) const { + for (const auto *DefaultOp : DefaultOps->args()) { + // Look through ValueType operators. + if (const DagInit *DefaultDagOp = dyn_cast(DefaultOp)) { + if (const DefInit *DefaultDagOperator = + dyn_cast(DefaultDagOp->getOperator())) { + if (DefaultDagOperator->getDef()->isSubClassOf("ValueType")) + DefaultOp = DefaultDagOp->getArg(0); + } + } + + if (const DefInit *DefaultDefOp = dyn_cast(DefaultOp)) { + DstMIBuilder.addRenderer(DefaultDefOp->getDef()); + continue; + } + + if (const IntInit *DefaultIntOp = dyn_cast(DefaultOp)) { + DstMIBuilder.addRenderer(DefaultIntOp->getValue()); + continue; + } + + return failedImport("Could not add default op"); + } + + return Error::success(); +} + Error GlobalISelEmitter::importImplicitDefRenderers( BuildMIAction &DstMIBuilder, const std::vector &ImplicitDefs) const { diff --git a/lib/clang/include/clang/Basic/Version.inc b/lib/clang/include/clang/Basic/Version.inc index 420bf474be44..43af43313b25 100644 --- a/lib/clang/include/clang/Basic/Version.inc +++ b/lib/clang/include/clang/Basic/Version.inc @@ -8,4 +8,4 @@ #define CLANG_VENDOR "FreeBSD " -#define SVN_REVISION "303197" +#define SVN_REVISION "303291" diff --git a/lib/clang/include/lld/Config/Version.inc b/lib/clang/include/lld/Config/Version.inc index 6e6c097ad8e1..64af8589c334 100644 --- a/lib/clang/include/lld/Config/Version.inc +++ b/lib/clang/include/lld/Config/Version.inc @@ -4,5 +4,5 @@ #define LLD_VERSION_STRING "5.0.0" #define LLD_VERSION_MAJOR 5 #define LLD_VERSION_MINOR 0 -#define LLD_REVISION_STRING "303197" +#define LLD_REVISION_STRING "303291" #define LLD_REPOSITORY_STRING "FreeBSD" diff --git a/lib/clang/include/llvm/Support/VCSRevision.h b/lib/clang/include/llvm/Support/VCSRevision.h index bd8abc23c17a..e914eb4b2b9d 100644 --- a/lib/clang/include/llvm/Support/VCSRevision.h +++ b/lib/clang/include/llvm/Support/VCSRevision.h @@ -1,2 +1,2 @@ /* $FreeBSD$ */ -#define LLVM_REVISION "svn-r303197" +#define LLVM_REVISION "svn-r303291"