Update clang to r339409b.
Change-Id: Ied8a188bb072c40035320acedc86164b66d920af
diff --git a/linux-x64/clang/include/llvm/ADT/APInt.h b/linux-x64/clang/include/llvm/ADT/APInt.h
index 684a257..1befe7a 100644
--- a/linux-x64/clang/include/llvm/ADT/APInt.h
+++ b/linux-x64/clang/include/llvm/ADT/APInt.h
@@ -85,7 +85,7 @@
UP,
};
- static const WordType WORD_MAX = ~WordType(0);
+ static const WordType WORDTYPE_MAX = ~WordType(0);
private:
/// This union is used to store the integer value. When the
@@ -150,7 +150,7 @@
unsigned WordBits = ((BitWidth-1) % APINT_BITS_PER_WORD) + 1;
// Mask out the high bits.
- uint64_t mask = WORD_MAX >> (APINT_BITS_PER_WORD - WordBits);
+ uint64_t mask = WORDTYPE_MAX >> (APINT_BITS_PER_WORD - WordBits);
if (isSingleWord())
U.VAL &= mask;
else
@@ -395,7 +395,7 @@
/// This checks to see if the value has all bits of the APInt are set or not.
bool isAllOnesValue() const {
if (isSingleWord())
- return U.VAL == WORD_MAX >> (APINT_BITS_PER_WORD - BitWidth);
+ return U.VAL == WORDTYPE_MAX >> (APINT_BITS_PER_WORD - BitWidth);
return countTrailingOnesSlowCase() == BitWidth;
}
@@ -496,7 +496,7 @@
assert(numBits != 0 && "numBits must be non-zero");
assert(numBits <= BitWidth && "numBits out of range");
if (isSingleWord())
- return U.VAL == (WORD_MAX >> (APINT_BITS_PER_WORD - numBits));
+ return U.VAL == (WORDTYPE_MAX >> (APINT_BITS_PER_WORD - numBits));
unsigned Ones = countTrailingOnesSlowCase();
return (numBits == Ones) &&
((Ones + countLeadingZerosSlowCase()) == BitWidth);
@@ -560,7 +560,7 @@
///
/// \returns the all-ones value for an APInt of the specified bit-width.
static APInt getAllOnesValue(unsigned numBits) {
- return APInt(numBits, WORD_MAX, true);
+ return APInt(numBits, WORDTYPE_MAX, true);
}
/// Get the '0' value.
@@ -1383,7 +1383,7 @@
/// Set every bit to 1.
void setAllBits() {
if (isSingleWord())
- U.VAL = WORD_MAX;
+ U.VAL = WORDTYPE_MAX;
else
// Set all the bits in all the words.
memset(U.pVal, -1, getNumWords() * APINT_WORD_SIZE);
@@ -1416,7 +1416,7 @@
if (loBit == hiBit)
return;
if (loBit < APINT_BITS_PER_WORD && hiBit <= APINT_BITS_PER_WORD) {
- uint64_t mask = WORD_MAX >> (APINT_BITS_PER_WORD - (hiBit - loBit));
+ uint64_t mask = WORDTYPE_MAX >> (APINT_BITS_PER_WORD - (hiBit - loBit));
mask <<= loBit;
if (isSingleWord())
U.VAL |= mask;
@@ -1470,7 +1470,7 @@
/// Toggle every bit to its opposite value.
void flipAllBits() {
if (isSingleWord()) {
- U.VAL ^= WORD_MAX;
+ U.VAL ^= WORDTYPE_MAX;
clearUnusedBits();
} else {
flipAllBitsSlowCase();
@@ -1759,7 +1759,7 @@
/// referencing 2 in a space where 2 does no exist.
unsigned nearestLogBase2() const {
// Special case when we have a bitwidth of 1. If VAL is 1, then we
- // get 0. If VAL is 0, we get WORD_MAX which gets truncated to
+ // get 0. If VAL is 0, we get WORDTYPE_MAX which gets truncated to
// UINT32_MAX.
if (BitWidth == 1)
return U.VAL - 1;
diff --git a/linux-x64/clang/include/llvm/ADT/Any.h b/linux-x64/clang/include/llvm/ADT/Any.h
index c64c399..7faa4c9 100644
--- a/linux-x64/clang/include/llvm/ADT/Any.h
+++ b/linux-x64/clang/include/llvm/ADT/Any.h
@@ -65,6 +65,16 @@
typename std::enable_if<
llvm::conjunction<
llvm::negation<std::is_same<typename std::decay<T>::type, Any>>,
+ // We also disable this overload when an `Any` object can be
+ // converted to the parameter type because in that case, this
+ // constructor may combine with that conversion during overload
+ // resolution for determining copy constructibility, and then
+ // when we try to determine copy constructibility below we may
+ // infinitely recurse. This is being evaluated by the standards
+ // committee as a potential DR in `std::any` as well, but we're
+ // going ahead and adopting it to work-around usage of `Any` with
+ // types that need to be implicitly convertible from an `Any`.
+ llvm::negation<std::is_convertible<Any, typename std::decay<T>::type>>,
std::is_copy_constructible<typename std::decay<T>::type>>::value,
int>::type = 0>
Any(T &&Value) {
diff --git a/linux-x64/clang/include/llvm/ADT/BitVector.h b/linux-x64/clang/include/llvm/ADT/BitVector.h
index 438c7d8..9ab1da7 100644
--- a/linux-x64/clang/include/llvm/ADT/BitVector.h
+++ b/linux-x64/clang/include/llvm/ADT/BitVector.h
@@ -503,6 +503,23 @@
return (*this)[Idx];
}
+ // Push single bit to end of vector.
+ void push_back(bool Val) {
+ unsigned OldSize = Size;
+ unsigned NewSize = Size + 1;
+
+ // Resize, which will insert zeros.
+ // If we already fit then the unused bits will be already zero.
+ if (NewSize > getBitCapacity())
+ resize(NewSize, false);
+ else
+ Size = NewSize;
+
+ // If true, set single bit.
+ if (Val)
+ set(OldSize);
+ }
+
/// Test if any common bits are set.
bool anyCommon(const BitVector &RHS) const {
unsigned ThisWords = NumBitWords(size());
diff --git a/linux-x64/clang/include/llvm/ADT/DenseSet.h b/linux-x64/clang/include/llvm/ADT/DenseSet.h
index b495e25..52fe4ad 100644
--- a/linux-x64/clang/include/llvm/ADT/DenseSet.h
+++ b/linux-x64/clang/include/llvm/ADT/DenseSet.h
@@ -136,8 +136,8 @@
public:
using difference_type = typename MapTy::const_iterator::difference_type;
using value_type = ValueT;
- using pointer = value_type *;
- using reference = value_type &;
+ using pointer = const value_type *;
+ using reference = const value_type &;
using iterator_category = std::forward_iterator_tag;
ConstIterator() = default;
diff --git a/linux-x64/clang/include/llvm/ADT/Hashing.h b/linux-x64/clang/include/llvm/ADT/Hashing.h
index 9f830ba..9175c54 100644
--- a/linux-x64/clang/include/llvm/ADT/Hashing.h
+++ b/linux-x64/clang/include/llvm/ADT/Hashing.h
@@ -133,7 +133,7 @@
/// undone. This makes it thread-hostile and very hard to use outside of
/// immediately on start of a simple program designed for reproducible
/// behavior.
-void set_fixed_execution_hash_seed(size_t fixed_value);
+void set_fixed_execution_hash_seed(uint64_t fixed_value);
// All of the implementation details of actually computing the various hash
@@ -316,9 +316,9 @@
/// This variable can be set using the \see llvm::set_fixed_execution_seed
/// function. See that function for details. Do not, under any circumstances,
/// set or read this variable.
-extern size_t fixed_seed_override;
+extern uint64_t fixed_seed_override;
-inline size_t get_execution_seed() {
+inline uint64_t get_execution_seed() {
// FIXME: This needs to be a per-execution seed. This is just a placeholder
// implementation. Switching to a per-execution seed is likely to flush out
// instability bugs and so will happen as its own commit.
@@ -326,8 +326,7 @@
// However, if there is a fixed seed override set the first time this is
// called, return that instead of the per-execution seed.
const uint64_t seed_prime = 0xff51afd7ed558ccdULL;
- static size_t seed = fixed_seed_override ? fixed_seed_override
- : (size_t)seed_prime;
+ static uint64_t seed = fixed_seed_override ? fixed_seed_override : seed_prime;
return seed;
}
@@ -402,7 +401,7 @@
/// combining them, this (as an optimization) directly combines the integers.
template <typename InputIteratorT>
hash_code hash_combine_range_impl(InputIteratorT first, InputIteratorT last) {
- const size_t seed = get_execution_seed();
+ const uint64_t seed = get_execution_seed();
char buffer[64], *buffer_ptr = buffer;
char *const buffer_end = std::end(buffer);
while (first != last && store_and_advance(buffer_ptr, buffer_end,
@@ -446,7 +445,7 @@
template <typename ValueT>
typename std::enable_if<is_hashable_data<ValueT>::value, hash_code>::type
hash_combine_range_impl(ValueT *first, ValueT *last) {
- const size_t seed = get_execution_seed();
+ const uint64_t seed = get_execution_seed();
const char *s_begin = reinterpret_cast<const char *>(first);
const char *s_end = reinterpret_cast<const char *>(last);
const size_t length = std::distance(s_begin, s_end);
@@ -496,7 +495,7 @@
struct hash_combine_recursive_helper {
char buffer[64];
hash_state state;
- const size_t seed;
+ const uint64_t seed;
public:
/// Construct a recursive hash combining helper.
diff --git a/linux-x64/clang/include/llvm/ADT/ImmutableList.h b/linux-x64/clang/include/llvm/ADT/ImmutableList.h
index 1f5e981..0541dc2 100644
--- a/linux-x64/clang/include/llvm/ADT/ImmutableList.h
+++ b/linux-x64/clang/include/llvm/ADT/ImmutableList.h
@@ -31,8 +31,9 @@
T Head;
const ImmutableListImpl* Tail;
- ImmutableListImpl(const T& head, const ImmutableListImpl* tail = nullptr)
- : Head(head), Tail(tail) {}
+ template <typename ElemT>
+ ImmutableListImpl(ElemT &&head, const ImmutableListImpl *tail = nullptr)
+ : Head(std::forward<ElemT>(head)), Tail(tail) {}
public:
ImmutableListImpl(const ImmutableListImpl &) = delete;
@@ -66,6 +67,9 @@
using value_type = T;
using Factory = ImmutableListFactory<T>;
+ static_assert(std::is_trivially_destructible<T>::value,
+ "T must be trivially destructible!");
+
private:
const ImmutableListImpl<T>* X;
@@ -90,6 +94,9 @@
bool operator==(const iterator& I) const { return L == I.L; }
bool operator!=(const iterator& I) const { return L != I.L; }
const value_type& operator*() const { return L->getHead(); }
+ const typename std::remove_reference<value_type>::type* operator->() const {
+ return &L->getHead();
+ }
ImmutableList getList() const { return L; }
};
@@ -123,14 +130,14 @@
bool operator==(const ImmutableList& L) const { return isEqual(L); }
/// getHead - Returns the head of the list.
- const T& getHead() {
+ const T& getHead() const {
assert(!isEmpty() && "Cannot get the head of an empty list.");
return X->getHead();
}
/// getTail - Returns the tail of the list, which is another (possibly empty)
/// ImmutableList.
- ImmutableList getTail() {
+ ImmutableList getTail() const {
return X ? X->getTail() : nullptr;
}
@@ -166,7 +173,8 @@
if (ownsAllocator()) delete &getAllocator();
}
- LLVM_NODISCARD ImmutableList<T> concat(const T &Head, ImmutableList<T> Tail) {
+ template <typename ElemT>
+ LLVM_NODISCARD ImmutableList<T> concat(ElemT &&Head, ImmutableList<T> Tail) {
// Profile the new list to see if it already exists in our cache.
FoldingSetNodeID ID;
void* InsertPos;
@@ -179,7 +187,7 @@
// The list does not exist in our cache. Create it.
BumpPtrAllocator& A = getAllocator();
L = (ListTy*) A.Allocate<ListTy>();
- new (L) ListTy(Head, TailImpl);
+ new (L) ListTy(std::forward<ElemT>(Head), TailImpl);
// Insert the new list into the cache.
Cache.InsertNode(L, InsertPos);
@@ -188,16 +196,24 @@
return L;
}
- LLVM_NODISCARD ImmutableList<T> add(const T& D, ImmutableList<T> L) {
- return concat(D, L);
+ template <typename ElemT>
+ LLVM_NODISCARD ImmutableList<T> add(ElemT &&Data, ImmutableList<T> L) {
+ return concat(std::forward<ElemT>(Data), L);
+ }
+
+ template <typename ...CtorArgs>
+ LLVM_NODISCARD ImmutableList<T> emplace(ImmutableList<T> Tail,
+ CtorArgs &&...Args) {
+ return concat(T(std::forward<CtorArgs>(Args)...), Tail);
}
ImmutableList<T> getEmptyList() const {
return ImmutableList<T>(nullptr);
}
- ImmutableList<T> create(const T& X) {
- return Concat(X, getEmptyList());
+ template <typename ElemT>
+ ImmutableList<T> create(ElemT &&Data) {
+ return concat(std::forward<ElemT>(Data), getEmptyList());
}
};
diff --git a/linux-x64/clang/include/llvm/ADT/IntervalMap.h b/linux-x64/clang/include/llvm/ADT/IntervalMap.h
index f713668..b75c492 100644
--- a/linux-x64/clang/include/llvm/ADT/IntervalMap.h
+++ b/linux-x64/clang/include/llvm/ADT/IntervalMap.h
@@ -101,6 +101,7 @@
#include "llvm/ADT/PointerIntPair.h"
#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/bit.h"
#include "llvm/Support/AlignOf.h"
#include "llvm/Support/Allocator.h"
#include "llvm/Support/RecyclingAllocator.h"
@@ -963,6 +964,7 @@
private:
// The root data is either a RootLeaf or a RootBranchData instance.
+ LLVM_ALIGNAS(RootLeaf) LLVM_ALIGNAS(RootBranchData)
AlignedCharArrayUnion<RootLeaf, RootBranchData> data;
// Tree height.
@@ -977,15 +979,10 @@
// Allocator used for creating external nodes.
Allocator &allocator;
- /// dataAs - Represent data as a node type without breaking aliasing rules.
+ /// Represent data as a node type without breaking aliasing rules.
template <typename T>
T &dataAs() const {
- union {
- const char *d;
- T *t;
- } u;
- u.d = data.buffer;
- return *u.t;
+ return *bit_cast<T *>(const_cast<char *>(data.buffer));
}
const RootLeaf &rootLeaf() const {
diff --git a/linux-x64/clang/include/llvm/ADT/PointerSumType.h b/linux-x64/clang/include/llvm/ADT/PointerSumType.h
index e379571..a19e45a 100644
--- a/linux-x64/clang/include/llvm/ADT/PointerSumType.h
+++ b/linux-x64/clang/include/llvm/ADT/PointerSumType.h
@@ -10,6 +10,7 @@
#ifndef LLVM_ADT_POINTERSUMTYPE_H
#define LLVM_ADT_POINTERSUMTYPE_H
+#include "llvm/ADT/bit.h"
#include "llvm/ADT/DenseMapInfo.h"
#include "llvm/Support/PointerLikeTypeTraits.h"
#include <cassert>
@@ -58,56 +59,142 @@
/// and may be desirable to set to a state that is particularly desirable to
/// default construct.
///
+/// Having a supported zero-valued tag also enables getting the address of a
+/// pointer stored with that tag provided it is stored in its natural bit
+/// representation. This works because in the case of a zero-valued tag, the
+/// pointer's value is directly stored into this object and we can expose the
+/// address of that internal storage. This is especially useful when building an
+/// `ArrayRef` of a single pointer stored in a sum type.
+///
/// There is no support for constructing or accessing with a dynamic tag as
/// that would fundamentally violate the type safety provided by the sum type.
template <typename TagT, typename... MemberTs> class PointerSumType {
- uintptr_t Value = 0;
-
using HelperT = detail::PointerSumTypeHelper<TagT, MemberTs...>;
+ // We keep both the raw value and the min tag value's pointer in a union. When
+ // the minimum tag value is zero, this allows code below to cleanly expose the
+ // address of the zero-tag pointer instead of just the zero-tag pointer
+ // itself. This is especially useful when building `ArrayRef`s out of a single
+ // pointer. However, we have to carefully access the union due to the active
+ // member potentially changing. When we *store* a new value, we directly
+ // access the union to allow us to store using the obvious types. However,
+ // when we *read* a value, we copy the underlying storage out to avoid relying
+ // on one member or the other being active.
+ union StorageT {
+ // Ensure we get a null default constructed value. We don't use a member
+ // initializer because some compilers seem to not implement those correctly
+ // for a union.
+ StorageT() : Value(0) {}
+
+ uintptr_t Value;
+
+ typename HelperT::template Lookup<HelperT::MinTag>::PointerT MinTagPointer;
+ };
+
+ StorageT Storage;
+
public:
constexpr PointerSumType() = default;
+ /// A typed setter to a given tagged member of the sum type.
+ template <TagT N>
+ void set(typename HelperT::template Lookup<N>::PointerT Pointer) {
+ void *V = HelperT::template Lookup<N>::TraitsT::getAsVoidPointer(Pointer);
+ assert((reinterpret_cast<uintptr_t>(V) & HelperT::TagMask) == 0 &&
+ "Pointer is insufficiently aligned to store the discriminant!");
+ Storage.Value = reinterpret_cast<uintptr_t>(V) | N;
+ }
+
/// A typed constructor for a specific tagged member of the sum type.
template <TagT N>
static PointerSumType
create(typename HelperT::template Lookup<N>::PointerT Pointer) {
PointerSumType Result;
- void *V = HelperT::template Lookup<N>::TraitsT::getAsVoidPointer(Pointer);
- assert((reinterpret_cast<uintptr_t>(V) & HelperT::TagMask) == 0 &&
- "Pointer is insufficiently aligned to store the discriminant!");
- Result.Value = reinterpret_cast<uintptr_t>(V) | N;
+ Result.set<N>(Pointer);
return Result;
}
- TagT getTag() const { return static_cast<TagT>(Value & HelperT::TagMask); }
+ /// Clear the value to null with the min tag type.
+ void clear() { set<HelperT::MinTag>(nullptr); }
+
+ TagT getTag() const {
+ return static_cast<TagT>(getOpaqueValue() & HelperT::TagMask);
+ }
template <TagT N> bool is() const { return N == getTag(); }
template <TagT N> typename HelperT::template Lookup<N>::PointerT get() const {
- void *P = is<N>() ? getImpl() : nullptr;
+ void *P = is<N>() ? getVoidPtr() : nullptr;
return HelperT::template Lookup<N>::TraitsT::getFromVoidPointer(P);
}
template <TagT N>
typename HelperT::template Lookup<N>::PointerT cast() const {
assert(is<N>() && "This instance has a different active member.");
- return HelperT::template Lookup<N>::TraitsT::getFromVoidPointer(getImpl());
+ return HelperT::template Lookup<N>::TraitsT::getFromVoidPointer(
+ getVoidPtr());
}
- explicit operator bool() const { return Value & HelperT::PointerMask; }
- bool operator==(const PointerSumType &R) const { return Value == R.Value; }
- bool operator!=(const PointerSumType &R) const { return Value != R.Value; }
- bool operator<(const PointerSumType &R) const { return Value < R.Value; }
- bool operator>(const PointerSumType &R) const { return Value > R.Value; }
- bool operator<=(const PointerSumType &R) const { return Value <= R.Value; }
- bool operator>=(const PointerSumType &R) const { return Value >= R.Value; }
+ /// If the tag is zero and the pointer's value isn't changed when being
+ /// stored, get the address of the stored value type-punned to the zero-tag's
+ /// pointer type.
+ typename HelperT::template Lookup<HelperT::MinTag>::PointerT const *
+ getAddrOfZeroTagPointer() const {
+ return const_cast<PointerSumType *>(this)->getAddrOfZeroTagPointer();
+ }
- uintptr_t getOpaqueValue() const { return Value; }
+ /// If the tag is zero and the pointer's value isn't changed when being
+ /// stored, get the address of the stored value type-punned to the zero-tag's
+ /// pointer type.
+ typename HelperT::template Lookup<HelperT::MinTag>::PointerT *
+ getAddrOfZeroTagPointer() {
+ static_assert(HelperT::MinTag == 0, "Non-zero minimum tag value!");
+ assert(is<HelperT::MinTag>() && "The active tag is not zero!");
+ // Store the initial value of the pointer when read out of our storage.
+ auto InitialPtr = get<HelperT::MinTag>();
+ // Now update the active member of the union to be the actual pointer-typed
+ // member so that accessing it indirectly through the returned address is
+ // valid.
+ Storage.MinTagPointer = InitialPtr;
+ // Finally, validate that this was a no-op as expected by reading it back
+ // out using the same underlying-storage read as above.
+ assert(InitialPtr == get<HelperT::MinTag>() &&
+ "Switching to typed storage changed the pointer returned!");
+ // Now we can correctly return an address to typed storage.
+ return &Storage.MinTagPointer;
+ }
+
+ explicit operator bool() const {
+ return getOpaqueValue() & HelperT::PointerMask;
+ }
+ bool operator==(const PointerSumType &R) const {
+ return getOpaqueValue() == R.getOpaqueValue();
+ }
+ bool operator!=(const PointerSumType &R) const {
+ return getOpaqueValue() != R.getOpaqueValue();
+ }
+ bool operator<(const PointerSumType &R) const {
+ return getOpaqueValue() < R.getOpaqueValue();
+ }
+ bool operator>(const PointerSumType &R) const {
+ return getOpaqueValue() > R.getOpaqueValue();
+ }
+ bool operator<=(const PointerSumType &R) const {
+ return getOpaqueValue() <= R.getOpaqueValue();
+ }
+ bool operator>=(const PointerSumType &R) const {
+ return getOpaqueValue() >= R.getOpaqueValue();
+ }
+
+ uintptr_t getOpaqueValue() const {
+ // Read the underlying storage of the union, regardless of the active
+ // member.
+ return bit_cast<uintptr_t>(Storage);
+ }
protected:
- void *getImpl() const {
- return reinterpret_cast<void *>(Value & HelperT::PointerMask);
+ void *getVoidPtr() const {
+ return reinterpret_cast<void *>(getOpaqueValue() & HelperT::PointerMask);
}
};
@@ -151,8 +238,9 @@
enum { NumTagBits = Min<MemberTs::TraitsT::NumLowBitsAvailable...>::value };
// Also compute the smallest discriminant and various masks for convenience.
+ constexpr static TagT MinTag =
+ static_cast<TagT>(Min<MemberTs::Tag...>::value);
enum : uint64_t {
- MinTag = Min<MemberTs::Tag...>::value,
PointerMask = static_cast<uint64_t>(-1) << NumTagBits,
TagMask = ~PointerMask
};
diff --git a/linux-x64/clang/include/llvm/ADT/STLExtras.h b/linux-x64/clang/include/llvm/ADT/STLExtras.h
index 03109a0..c209c4a 100644
--- a/linux-x64/clang/include/llvm/ADT/STLExtras.h
+++ b/linux-x64/clang/include/llvm/ADT/STLExtras.h
@@ -677,18 +677,20 @@
/// Note that something like iterator_range seems nice at first here, but the
/// range properties are of little benefit and end up getting in the way
/// because we need to do mutation on the current iterators.
- std::tuple<std::pair<IterTs, IterTs>...> IterPairs;
+ std::tuple<IterTs...> Begins;
+ std::tuple<IterTs...> Ends;
/// Attempts to increment a specific iterator.
///
/// Returns true if it was able to increment the iterator. Returns false if
/// the iterator is already at the end iterator.
template <size_t Index> bool incrementHelper() {
- auto &IterPair = std::get<Index>(IterPairs);
- if (IterPair.first == IterPair.second)
+ auto &Begin = std::get<Index>(Begins);
+ auto &End = std::get<Index>(Ends);
+ if (Begin == End)
return false;
- ++IterPair.first;
+ ++Begin;
return true;
}
@@ -712,11 +714,12 @@
/// dereferences the iterator and returns the address of the resulting
/// reference.
template <size_t Index> ValueT *getHelper() const {
- auto &IterPair = std::get<Index>(IterPairs);
- if (IterPair.first == IterPair.second)
+ auto &Begin = std::get<Index>(Begins);
+ auto &End = std::get<Index>(Ends);
+ if (Begin == End)
return nullptr;
- return &*IterPair.first;
+ return &*Begin;
}
/// Finds the first non-end iterator, dereferences, and returns the resulting
@@ -743,7 +746,7 @@
/// iterators.
template <typename... RangeTs>
explicit concat_iterator(RangeTs &&... Ranges)
- : IterPairs({std::begin(Ranges), std::end(Ranges)}...) {}
+ : Begins(std::begin(Ranges)...), Ends(std::end(Ranges)...) {}
using BaseT::operator++;
@@ -755,7 +758,7 @@
ValueT &operator*() const { return get(index_sequence_for<IterTs...>()); }
bool operator==(const concat_iterator &RHS) const {
- return IterPairs == RHS.IterPairs;
+ return Begins == RHS.Begins && Ends == RHS.Ends;
}
};
@@ -974,6 +977,10 @@
std::sort(Start, End);
}
+template <typename Container> inline void sort(Container &&C) {
+ llvm::sort(adl_begin(C), adl_end(C));
+}
+
template <typename IteratorTy, typename Compare>
inline void sort(IteratorTy Start, IteratorTy End, Compare Comp) {
#ifdef EXPENSIVE_CHECKS
@@ -983,6 +990,11 @@
std::sort(Start, End, Comp);
}
+template <typename Container, typename Compare>
+inline void sort(Container &&C, Compare Comp) {
+ llvm::sort(adl_begin(C), adl_end(C), Comp);
+}
+
//===----------------------------------------------------------------------===//
// Extra additions to <algorithm>
//===----------------------------------------------------------------------===//
@@ -1005,6 +1017,18 @@
C.clear();
}
+/// Get the size of a range. This is a wrapper function around std::distance
+/// which is only enabled when the operation is O(1).
+template <typename R>
+auto size(R &&Range, typename std::enable_if<
+ std::is_same<typename std::iterator_traits<decltype(
+ Range.begin())>::iterator_category,
+ std::random_access_iterator_tag>::value,
+ void>::type * = nullptr)
+ -> decltype(std::distance(Range.begin(), Range.end())) {
+ return std::distance(Range.begin(), Range.end());
+}
+
/// Provide wrappers to std::for_each which take ranges instead of having to
/// pass begin/end explicitly.
template <typename R, typename UnaryPredicate>
@@ -1115,6 +1139,33 @@
return std::lower_bound(adl_begin(Range), adl_end(Range), I);
}
+template <typename R, typename ForwardIt, typename Compare>
+auto lower_bound(R &&Range, ForwardIt I, Compare C)
+ -> decltype(adl_begin(Range)) {
+ return std::lower_bound(adl_begin(Range), adl_end(Range), I, C);
+}
+
+/// Provide wrappers to std::upper_bound which take ranges instead of having to
+/// pass begin/end explicitly.
+template <typename R, typename ForwardIt>
+auto upper_bound(R &&Range, ForwardIt I) -> decltype(adl_begin(Range)) {
+ return std::upper_bound(adl_begin(Range), adl_end(Range), I);
+}
+
+template <typename R, typename ForwardIt, typename Compare>
+auto upper_bound(R &&Range, ForwardIt I, Compare C)
+ -> decltype(adl_begin(Range)) {
+ return std::upper_bound(adl_begin(Range), adl_end(Range), I, C);
+}
+/// Wrapper function around std::equal to detect if all elements
+/// in a container are same.
+template <typename R>
+bool is_splat(R &&Range) {
+ size_t range_size = size(Range);
+ return range_size != 0 && (range_size == 1 ||
+ std::equal(adl_begin(Range) + 1, adl_end(Range), adl_begin(Range)));
+}
+
/// Given a range of type R, iterate the entire range and return a
/// SmallVector with elements of the vector. This is useful, for example,
/// when you want to iterate a range and then sort the results.
@@ -1136,18 +1187,6 @@
C.erase(remove_if(C, P), C.end());
}
-/// Get the size of a range. This is a wrapper function around std::distance
-/// which is only enabled when the operation is O(1).
-template <typename R>
-auto size(R &&Range, typename std::enable_if<
- std::is_same<typename std::iterator_traits<decltype(
- Range.begin())>::iterator_category,
- std::random_access_iterator_tag>::value,
- void>::type * = nullptr)
- -> decltype(std::distance(Range.begin(), Range.end())) {
- return std::distance(Range.begin(), Range.end());
-}
-
//===----------------------------------------------------------------------===//
// Extra additions to <memory>
//===----------------------------------------------------------------------===//
diff --git a/linux-x64/clang/include/llvm/ADT/SmallBitVector.h b/linux-x64/clang/include/llvm/ADT/SmallBitVector.h
index b639174..f86bebd 100644
--- a/linux-x64/clang/include/llvm/ADT/SmallBitVector.h
+++ b/linux-x64/clang/include/llvm/ADT/SmallBitVector.h
@@ -465,6 +465,11 @@
return (*this)[Idx];
}
+ // Push single bit to end of vector.
+ void push_back(bool Val) {
+ resize(size() + 1, Val);
+ }
+
/// Test if any common bits are set.
bool anyCommon(const SmallBitVector &RHS) const {
if (isSmall() && RHS.isSmall())
diff --git a/linux-x64/clang/include/llvm/ADT/StringExtras.h b/linux-x64/clang/include/llvm/ADT/StringExtras.h
index 71b0e75..60a0363 100644
--- a/linux-x64/clang/include/llvm/ADT/StringExtras.h
+++ b/linux-x64/clang/include/llvm/ADT/StringExtras.h
@@ -139,22 +139,23 @@
/// Convert buffer \p Input to its hexadecimal representation.
/// The returned string is double the size of \p Input.
-inline std::string toHex(StringRef Input) {
+inline std::string toHex(StringRef Input, bool LowerCase = false) {
static const char *const LUT = "0123456789ABCDEF";
+ const uint8_t Offset = LowerCase ? 32 : 0;
size_t Length = Input.size();
std::string Output;
Output.reserve(2 * Length);
for (size_t i = 0; i < Length; ++i) {
const unsigned char c = Input[i];
- Output.push_back(LUT[c >> 4]);
- Output.push_back(LUT[c & 15]);
+ Output.push_back(LUT[c >> 4] | Offset);
+ Output.push_back(LUT[c & 15] | Offset);
}
return Output;
}
-inline std::string toHex(ArrayRef<uint8_t> Input) {
- return toHex(toStringRef(Input));
+inline std::string toHex(ArrayRef<uint8_t> Input, bool LowerCase = false) {
+ return toHex(toStringRef(Input), LowerCase);
}
inline uint8_t hexFromNibbles(char MSB, char LSB) {
diff --git a/linux-x64/clang/include/llvm/ADT/Triple.h b/linux-x64/clang/include/llvm/ADT/Triple.h
index c95b16d..fe78f78 100644
--- a/linux-x64/clang/include/llvm/ADT/Triple.h
+++ b/linux-x64/clang/include/llvm/ADT/Triple.h
@@ -55,10 +55,10 @@
bpfel, // eBPF or extended BPF or 64-bit BPF (little endian)
bpfeb, // eBPF or extended BPF or 64-bit BPF (big endian)
hexagon, // Hexagon: hexagon
- mips, // MIPS: mips, mipsallegrex
- mipsel, // MIPSEL: mipsel, mipsallegrexel
- mips64, // MIPS64: mips64
- mips64el, // MIPS64EL: mips64el
+ mips, // MIPS: mips, mipsallegrex, mipsr6
+ mipsel, // MIPSEL: mipsel, mipsallegrexe, mipsr6el
+ mips64, // MIPS64: mips64, mips64r6, mipsn32, mipsn32r6
+ mips64el, // MIPS64EL: mips64el, mips64r6el, mipsn32el, mipsn32r6el
msp430, // MSP430: msp430
nios2, // NIOSII: nios2
ppc, // PPC: powerpc
@@ -101,6 +101,7 @@
enum SubArchType {
NoSubArch,
+ ARMSubArch_v8_5a,
ARMSubArch_v8_4a,
ARMSubArch_v8_3a,
ARMSubArch_v8_2a,
@@ -125,7 +126,9 @@
KalimbaSubArch_v3,
KalimbaSubArch_v4,
- KalimbaSubArch_v5
+ KalimbaSubArch_v5,
+
+ MipsSubArch_r6
};
enum VendorType {
UnknownVendor,
@@ -182,7 +185,8 @@
Mesa3D,
Contiki,
AMDPAL, // AMD PAL Runtime
- LastOSType = AMDPAL
+ HermitCore, // HermitCore Unikernel/Multikernel
+ LastOSType = HermitCore
};
enum EnvironmentType {
UnknownEnvironment,
diff --git a/linux-x64/clang/include/llvm/ADT/bit.h b/linux-x64/clang/include/llvm/ADT/bit.h
new file mode 100644
index 0000000..a4aba7b
--- /dev/null
+++ b/linux-x64/clang/include/llvm/ADT/bit.h
@@ -0,0 +1,59 @@
+//===-- llvm/ADT/bit.h - C++20 <bit> ----------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the C++20 <bit> header.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_ADT_BIT_H
+#define LLVM_ADT_BIT_H
+
+#include "llvm/Support/Compiler.h"
+#include <cstring>
+#include <type_traits>
+
+namespace llvm {
+
+// This implementation of bit_cast is different from the C++17 one in two ways:
+// - It isn't constexpr because that requires compiler support.
+// - It requires trivially-constructible To, to avoid UB in the implementation.
+template <typename To, typename From
+ , typename = typename std::enable_if<sizeof(To) == sizeof(From)>::type
+#if (__has_feature(is_trivially_constructible) && defined(_LIBCPP_VERSION)) || \
+ (defined(__GNUC__) && __GNUC__ >= 5)
+ , typename = typename std::is_trivially_constructible<To>::type
+#elif __has_feature(is_trivially_constructible)
+ , typename = typename std::enable_if<__is_trivially_constructible(To)>::type
+#else
+ // See comment below.
+#endif
+#if (__has_feature(is_trivially_copyable) && defined(_LIBCPP_VERSION)) || \
+ (defined(__GNUC__) && __GNUC__ >= 5)
+ , typename = typename std::enable_if<std::is_trivially_copyable<To>::value>::type
+ , typename = typename std::enable_if<std::is_trivially_copyable<From>::value>::type
+#elif __has_feature(is_trivially_copyable)
+ , typename = typename std::enable_if<__is_trivially_copyable(To)>::type
+ , typename = typename std::enable_if<__is_trivially_copyable(From)>::type
+#else
+ // This case is GCC 4.x. clang with libc++ or libstdc++ never get here. Unlike
+ // llvm/Support/type_traits.h's isPodLike we don't want to provide a
+ // good-enough answer here: developers in that configuration will hit
+ // compilation failures on the bots instead of locally. That's acceptable
+ // because it's very few developers, and only until we move past C++11.
+#endif
+>
+inline To bit_cast(const From &from) noexcept {
+ To to;
+ std::memcpy(&to, &from, sizeof(To));
+ return to;
+}
+
+} // namespace llvm
+
+#endif
diff --git a/linux-x64/clang/include/llvm/ADT/iterator.h b/linux-x64/clang/include/llvm/ADT/iterator.h
index 549c522..cb40fc1 100644
--- a/linux-x64/clang/include/llvm/ADT/iterator.h
+++ b/linux-x64/clang/include/llvm/ADT/iterator.h
@@ -334,6 +334,34 @@
PointerIteratorT(std::end(std::forward<RangeT>(Range))));
}
+// Wrapper iterator over iterator ItType, adding DataRef to the type of ItType,
+// to create NodeRef = std::pair<InnerTypeOfItType, DataRef>.
+template <typename ItType, typename NodeRef, typename DataRef>
+class WrappedPairNodeDataIterator
+ : public iterator_adaptor_base<
+ WrappedPairNodeDataIterator<ItType, NodeRef, DataRef>, ItType,
+ typename std::iterator_traits<ItType>::iterator_category, NodeRef,
+ std::ptrdiff_t, NodeRef *, NodeRef &> {
+ using BaseT = iterator_adaptor_base<
+ WrappedPairNodeDataIterator, ItType,
+ typename std::iterator_traits<ItType>::iterator_category, NodeRef,
+ std::ptrdiff_t, NodeRef *, NodeRef &>;
+
+ const DataRef DR;
+ mutable NodeRef NR;
+
+public:
+ WrappedPairNodeDataIterator(ItType Begin, const DataRef DR)
+ : BaseT(Begin), DR(DR) {
+ NR.first = DR;
+ }
+
+ NodeRef &operator*() const {
+ NR.second = *this->I;
+ return NR;
+ }
+};
+
} // end namespace llvm
#endif // LLVM_ADT_ITERATOR_H